Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Mar 2016 23:41:10 +0000 (15:41 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Mar 2016 23:41:10 +0000 (15:41 -0800)
Pull networking fixes from David Miller:

 1) Fix ordering of WEXT netlink messages so we don't see a newlink
    after a dellink, from Johannes Berg.

 2) Out of bounds access in minstrel_ht_set_best_prob_rage, from
    Konstantin Khlebnikov.

 3) Paging buffer memory leak in iwlwifi, from Matti Gottlieb.

 4) Wrong units used to set initial TCP rto from cached metrics, also
    from Konstantin Khlebnikov.

 5) Fix stale IP options data in the SKB control block from leaking
    through layers of encapsulation, from Bernie Harris.

 6) Zero padding len miscalculated in bnxt_en, from Michael Chan.

 7) Only CHECKSUM_PARTIAL packets should be passed down through GSO, fix
    from Hannes Frederic Sowa.

 8) Fix suspend/resume with JME networking devices, from Diego Violat
    and Guo-Fu Tseng.

 9) Checksums not validated properly in bridge multicast support due to
    the placement of the SKB header pointers at the time of the check,
    fix from Álvaro Fernández Rojas.

10) Fix hang/tiemout with r8169 if a stats fetch is done while the
    device is runtime suspended.  From Chun-Hao Lin.

11) The forwarding database netlink dump facilities don't track the
    state of the dump properly, resulting in skipped/missed entries.
    From Minoura Makoto.

12) Fix regression from a recent 3c59x bug fix, from Neil Horman.

13) Fix list corruption in bna driver, from Ivan Vecera.

14) Big endian machines crash on vlan add in bnx2x, fix from Michal
    Schmidt.

15) Ethtool RSS configuration not propagated properly in mlx5 driver,
    from Tariq Toukan.

16) Fix regression in PHY probing in stmmac driver, from Gabriel
    Fernandez.

17) Fix SKB tailroom calculation in igmp/mld code, from Benjamin
    Poirier.

18) A past change to skip empty routing headers in ipv6 extention header
    parsing accidently caused fragment headers to not be matched any
    longer.  Fix from Florian Westphal.

19) eTSEC-106 erratum needs to be applied to more gianfar chips, from
    Atsushi Nemoto.

20) Fix netdev reference after free via workqueues in usb networking
    drivers, from Oliver Neukum and Bjørn Mork.

21) mdio->irq is now an array rather than a pointer to dynamic memory,
    but several drivers were still trying to free it :-/ Fixes from
    Colin Ian King.

22) act_ipt iptables action forgets to set the family field, thus LOG
    netfilter targets don't work with it.  Fix from Phil Sutter.

23) SKB leak in ibmveth when skb_linearize() fails, from Thomas Falcon.

24) pskb_may_pull() cannot be called with interrupts disabled, fix code
    that tries to do this in vmxnet3 driver, from Neil Horman.

25) be2net driver leaks iomap'd memory on removal, fix from Douglas
    Miller.

26) Forgotton RTNL mutex unlock in ppp_create_interface() error paths,
    from Guillaume Nault.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (97 commits)
  ppp: release rtnl mutex when interface creation fails
  cdc_ncm: do not call usbnet_link_change from cdc_ncm_bind
  tcp: fix tcpi_segs_in after connection establishment
  net: hns: fix the bug about loopback
  jme: Fix device PM wakeup API usage
  jme: Do not enable NIC WoL functions on S0
  udp6: fix UDP/IPv6 encap resubmit path
  be2net: Don't leak iomapped memory on removal.
  vmxnet3: avoid calling pskb_may_pull with interrupts disabled
  net: ethernet: Add missing MFD_SYSCON dependency on HAS_IOMEM
  ibmveth: check return of skb_linearize in ibmveth_start_xmit
  cdc_ncm: toggle altsetting to force reset before setup
  usbnet: cleanup after bind() in probe()
  mlxsw: pci: Correctly determine if descriptor queue is full
  mlxsw: spectrum: Always decrement bridge's ref count
  tipc: fix nullptr crash during subscription cancel
  net: eth: altera: do not free array priv->mdio->irq
  net/ethoc: do not free array priv->mdio->irq
  net: sched: fix act_ipt for LOG target
  asix: do not free array priv->mdio->irq
  ...

335 files changed:
Documentation/DocBook/media/v4l/media-types.xml
Documentation/devicetree/bindings/regulator/tps65217.txt
Documentation/watchdog/watchdog-parameters.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/irq.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/intc-compact.c
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/smp.c
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am335x-chilisom.dtsi
arch/arm/boot/dts/am335x-nano.dts
arch/arm/boot/dts/am335x-pepper.dts
arch/arm/boot/dts/am335x-shc.dts
arch/arm/boot/dts/am335x-sl50.dts
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/am57xx-cl-som-am57x.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/kirkwood-ds112.dts
arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
arch/arm/boot/dts/r8a7791-porter.dts
arch/arm/boot/dts/sama5d2-pinfunc.h
arch/arm/boot/dts/tps65217.dtsi [new file with mode: 0644]
arch/arm/include/asm/arch_gicv3.h
arch/arm/include/asm/xen/page-coherent.h
arch/arm/kernel/Makefile
arch/arm/kvm/guest.c
arch/arm/kvm/mmio.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/gpmc-onenand.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-shmobile/common.h
arch/arm/mach-shmobile/headsmp-scu.S
arch/arm/mach-shmobile/headsmp.S
arch/arm/mach-shmobile/platsmp-apmu.c
arch/arm/mach-shmobile/platsmp-scu.c
arch/arm/mach-shmobile/smp-r8a7779.c
arch/arm/mm/mmap.c
arch/arm/mm/pageattr.c
arch/arm64/include/asm/pgtable.h
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/mm/init.c
arch/arm64/mm/mmap.c
arch/mips/jz4740/gpio.c
arch/mips/kernel/r2300_fpu.S
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/traps.c
arch/mips/kvm/mips.c
arch/mips/mm/mmap.c
arch/mips/mm/sc-mips.c
arch/parisc/include/asm/floppy.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/process.c
arch/powerpc/mm/hash64_64k.c
arch/powerpc/mm/hugepage-hash64.c
arch/powerpc/mm/hugetlbpage-book3e.c
arch/powerpc/mm/mmap.c
arch/s390/include/asm/fpu/internal.h
arch/s390/kernel/compat_signal.c
arch/sparc/Makefile
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/entry.S
arch/sparc/kernel/hvcalls.S
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sparc_ksyms_64.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/syscalls.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/um/kernel/reboot.c
arch/um/kernel/signal.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/pci_x86.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/xen/pci.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kvm/emulate.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/mmap.c
arch/x86/mm/mpx.c
arch/x86/mm/pageattr.c
arch/x86/pci/common.c
arch/x86/pci/intel_mid_pci.c
arch/x86/pci/irq.c
arch/x86/pci/xen.c
arch/x86/platform/intel-quark/imr.c
arch/x86/um/os-Linux/task_size.c
block/Kconfig
block/blk-map.c
block/blk-merge.c
drivers/acpi/nfit.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/android/binder.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libata-scsi.c
drivers/ata/pata_rb532_cf.c
drivers/char/random.c
drivers/clk/ti/dpll3xxx.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/mt8173-cpufreq.c
drivers/devfreq/tegra-devfreq.c
drivers/dma/pxa_dma.c
drivers/gpio/gpio-rcar.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/i2c/busses/i2c-brcmstb.c
drivers/infiniband/core/device.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/mlx5/srq.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/md/dm.c
drivers/media/i2c/adp1653.c
drivers/media/i2c/adv7604.c
drivers/media/usb/au0828/au0828-video.c
drivers/misc/cxl/pci.c
drivers/mmc/host/omap_hsmmc.c
drivers/mtd/ubi/upd.c
drivers/nvdimm/bus.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/pci/host/Kconfig
drivers/pci/host/pci-keystone-dw.c
drivers/pci/host/pci-layerscape.c
drivers/pci/xen-pcifront.c
drivers/power/bq27xxx_battery_i2c.c
drivers/scsi/ipr.c
drivers/scsi/scsi_lib.c
drivers/sh/pm_runtime.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/usb/chipidea/ci_hdrc_pci.c
drivers/usb/chipidea/debug.c
drivers/usb/chipidea/otg.c
drivers/usb/core/hub.c
drivers/usb/dwc2/Kconfig
drivers/usb/dwc2/core.c
drivers/usb/dwc2/hcd_ddma.c
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/gadget/udc/net2280.h
drivers/usb/gadget/udc/udc-core.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/serial/Kconfig
drivers/usb/serial/Makefile
drivers/usb/serial/cp210x.c
drivers/usb/serial/mxu11x0.c [deleted file]
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/video/console/fbcon.c
drivers/virtio/virtio_pci_modern.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/sun4v_wdt.c [new file with mode: 0644]
drivers/xen/xen-pciback/pciback_ops.c
drivers/xen/xen-scsiback.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/affs/file.c
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/root-tree.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/super.h
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifssmb.c
fs/cifs/smb2pdu.c
fs/dax.c
fs/dcache.c
fs/ext2/file.c
fs/ext2/inode.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/fs-writeback.c
fs/hpfs/namei.c
fs/jffs2/README.Locking
fs/jffs2/build.c
fs/jffs2/file.c
fs/jffs2/gc.c
fs/jffs2/nodelist.h
fs/namei.c
fs/nfs/blocklayout/extent_tree.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/ocfs2/aops.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/pnode.c
fs/read_write.c
fs/super.c
fs/userfaultfd.c
fs/xattr.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_aops.h
fs/xfs/xfs_bmap_util.c
include/linux/ata.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/ceph/ceph_features.h
include/linux/dax.h
include/linux/dcache.h
include/linux/libata.h
include/linux/libnvdimm.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/power/bq27xxx_battery.h
include/linux/random.h
include/linux/trace_events.h
include/linux/writeback.h
include/sound/hdaudio.h
include/uapi/linux/media.h
include/uapi/linux/ndctl.h
kernel/events/core.c
kernel/memremap.c
kernel/sched/deadline.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_stack.c
mm/filemap.c
mm/huge_memory.c
mm/memory.c
mm/migrate.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/cache.c
net/sunrpc/xprtrdma/backchannel.c
security/selinux/hooks.c
sound/core/control_compat.c
sound/core/pcm_compat.c
sound/core/rawmidi_compat.c
sound/core/seq/oss/seq_oss.c
sound/core/seq/oss/seq_oss_device.h
sound/core/seq/oss/seq_oss_init.c
sound/core/timer_compat.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/usb/quirks.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/ftrace/test.d/instances/instance.tc
virt/kvm/arm/vgic.c
virt/kvm/async_pf.c

index 1af3842..0ee0f33 100644 (file)
            <entry><constant>MEDIA_ENT_F_CONN_COMPOSITE</constant></entry>
            <entry>Connector for a RGB composite signal.</entry>
          </row>
-         <row>
-           <entry><constant>MEDIA_ENT_F_CONN_TEST</constant></entry>
-           <entry>Connector for a test generator.</entry>
-         </row>
          <row>
            <entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry>
            <entry>Camera video sensor entity.</entry>
index d181096..4f05d20 100644 (file)
@@ -26,11 +26,7 @@ Example:
                ti,pmic-shutdown-controller;
 
                regulators {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
                        dcdc1_reg: dcdc1 {
-                               reg = <0>;
                                regulator-min-microvolt = <900000>;
                                regulator-max-microvolt = <1800000>;
                                regulator-boot-on;
@@ -38,7 +34,6 @@ Example:
                        };
 
                        dcdc2_reg: dcdc2 {
-                               reg = <1>;
                                regulator-min-microvolt = <900000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
@@ -46,7 +41,6 @@ Example:
                        };
 
                        dcdc3_reg: dcc3 {
-                               reg = <2>;
                                regulator-min-microvolt = <900000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-boot-on;
@@ -54,7 +48,6 @@ Example:
                        };
 
                        ldo1_reg: ldo1 {
-                               reg = <3>;
                                regulator-min-microvolt = <1000000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
@@ -62,7 +55,6 @@ Example:
                        };
 
                        ldo2_reg: ldo2 {
-                               reg = <4>;
                                regulator-min-microvolt = <900000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
@@ -70,7 +62,6 @@ Example:
                        };
 
                        ldo3_reg: ldo3 {
-                               reg = <5>;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
@@ -78,7 +69,6 @@ Example:
                        };
 
                        ldo4_reg: ldo4 {
-                               reg = <6>;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
index 9f9ec9f..4e4b6f1 100644 (file)
@@ -400,3 +400,7 @@ wm8350_wdt:
 nowayout: Watchdog cannot be stopped once started
        (default=kernel config parameter)
 -------------------------------------------------
+sun4v_wdt:
+timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
index bcda623..4029c63 100644 (file)
@@ -920,17 +920,24 @@ M:        Emilio López <emilio@elopez.com.ar>
 S:     Maintained
 F:     drivers/clk/sunxi/
 
-ARM/Amlogic MesonX SoC support
+ARM/Amlogic Meson SoC support
 M:     Carlo Caione <carlo@caione.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-meson@googlegroups.com
+W:     http://linux-meson.com/
 S:     Maintained
-F:     drivers/media/rc/meson-ir.c
-N:     meson[x68]
+F:     arch/arm/mach-meson/
+F:     arch/arm/boot/dts/meson*
+N:     meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
+M:     Antoine Tenart <antoine.tenart@free-electrons.com>
 S:     Maintained
 F:     arch/arm/mach-alpine/
+F:     arch/arm/boot/dts/alpine*
+F:     arch/arm64/boot/dts/al/
+F:     drivers/*/*alpine*
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
 M:     Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -3444,7 +3451,6 @@ F:        drivers/usb/dwc2/
 DESIGNWARE USB3 DRD IP DRIVER
 M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
-L:     linux-omap@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
 F:     drivers/usb/dwc3/
@@ -4512,6 +4518,12 @@ L:       linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/dma/fsldma.*
 
+FREESCALE GPMI NAND DRIVER
+M:     Han Xu <han.xu@nxp.com>
+L:     linux-mtd@lists.infradead.org
+S:     Maintained
+F:     drivers/mtd/nand/gpmi-nand/*
+
 FREESCALE I2C CPM DRIVER
 M:     Jochen Friedrich <jochen@scram.de>
 L:     linuxppc-dev@lists.ozlabs.org
@@ -4528,7 +4540,7 @@ F:        include/linux/platform_data/video-imxfb.h
 F:     drivers/video/fbdev/imxfb.c
 
 FREESCALE QUAD SPI DRIVER
-M:     Han Xu <han.xu@freescale.com>
+M:     Han Xu <han.xu@nxp.com>
 L:     linux-mtd@lists.infradead.org
 S:     Maintained
 F:     drivers/mtd/spi-nor/fsl-quadspi.c
@@ -7364,7 +7376,7 @@ F:        drivers/tty/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:     Felipe Balbi <balbi@kernel.org>
+M:     Bin Liu <b-liu@ti.com>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
@@ -7696,13 +7708,13 @@ S:      Maintained
 F:     arch/nios2/
 
 NOKIA N900 POWER SUPPLY DRIVERS
-M:     Pali Rohár <pali.rohar@gmail.com>
-S:     Maintained
+R:     Pali Rohár <pali.rohar@gmail.com>
 F:     include/linux/power/bq2415x_charger.h
 F:     include/linux/power/bq27xxx_battery.h
 F:     include/linux/power/isp1704_charger.h
 F:     drivers/power/bq2415x_charger.c
 F:     drivers/power/bq27xxx_battery.c
+F:     drivers/power/bq27xxx_battery_i2c.c
 F:     drivers/power/isp1704_charger.c
 F:     drivers/power/rx51_battery.c
 
@@ -7933,11 +7945,9 @@ F:       drivers/media/platform/omap3isp/
 F:     drivers/staging/media/omap4iss/
 
 OMAP USB SUPPORT
-M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
-S:     Maintained
+S:     Orphan
 F:     drivers/usb/*/*omap*
 F:     arch/arm/*omap*/usb*
 
@@ -9568,6 +9578,12 @@ M:       Andreas Noever <andreas.noever@gmail.com>
 S:     Maintained
 F:     drivers/thunderbolt/
 
+TI BQ27XXX POWER SUPPLY DRIVER
+R:     Andrew F. Davis <afd@ti.com>
+F:     include/linux/power/bq27xxx_battery.h
+F:     drivers/power/bq27xxx_battery.c
+F:     drivers/power/bq27xxx_battery_i2c.c
+
 TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
 M:     John Stultz <john.stultz@linaro.org>
 M:     Thomas Gleixner <tglx@linutronix.de>
index fbe1b92..2d519d2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
index 0655495..8a188bc 100644 (file)
@@ -12,8 +12,6 @@ config ARC
        select BUILDTIME_EXTABLE_SORT
        select COMMON_CLK
        select CLONE_BACKWARDS
-       # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
-       select DEVTMPFS if !INITRAMFS_SOURCE=""
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
        select GENERIC_FIND_FIRST_BIT
@@ -275,14 +273,6 @@ config ARC_DCCM_BASE
        default "0xA0000000"
        depends on ARC_HAS_DCCM
 
-config ARC_HAS_HW_MPY
-       bool "Use Hardware Multiplier (Normal or Faster XMAC)"
-       default y
-       help
-         Influences how gcc generates code for MPY operations.
-         If enabled, MPYxx insns are generated, provided by Standard/XMAC
-         Multipler. Otherwise software multipy lib is used
-
 choice
        prompt "MMU Version"
        default ARC_MMU_V3 if ARC_CPU_770
@@ -542,14 +532,6 @@ config ARC_DBG_TLB_MISS_COUNT
          Counts number of I and D TLB Misses and exports them via Debugfs
          The counters can be cleared via Debugfs as well
 
-if SMP
-
-config ARC_IPI_DBG
-       bool "Debug Inter Core interrupts"
-       default n
-
-endif
-
 endif
 
 config ARC_UBOOT_SUPPORT
index aeb1902..c8230f3 100644 (file)
@@ -74,10 +74,6 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN)     += -EB
 # --build-id w/o "-marclinux". Default arc-elf32-ld is OK
 ldflags-$(upto_gcc44)                  += -marclinux
 
-ifndef CONFIG_ARC_HAS_HW_MPY
-       cflags-y        += -mno-mpy
-endif
-
 LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
index f1ac981..5d4e2a0 100644 (file)
@@ -39,6 +39,7 @@ CONFIG_IP_PNP_RARP=y
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -73,7 +74,6 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
@@ -91,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
index 323486d..87ee46b 100644 (file)
@@ -39,14 +39,10 @@ CONFIG_IP_PNP_RARP=y
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AXS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
@@ -78,14 +74,12 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
 # CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
@@ -97,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
index 66191cd..d80daf4 100644 (file)
@@ -40,14 +40,10 @@ CONFIG_IP_PNP_RARP=y
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AXS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
@@ -79,14 +75,12 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
 # CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
@@ -98,12 +92,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
index 138f9d8..f410953 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
@@ -26,7 +27,6 @@ CONFIG_ARC_PLAT_SIM=y
 CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
 CONFIG_PREEMPT=y
 # CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -34,6 +34,7 @@ CONFIG_UNIX_DIAG=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -51,7 +52,6 @@ CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_IOMMU_SUPPORT is not set
@@ -63,4 +63,3 @@ CONFIG_NFS_FS=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 # CONFIG_DEBUG_PREEMPT is not set
-CONFIG_XZ_DEC=y
index f68838e..cfaa33c 100644 (file)
@@ -35,6 +35,7 @@ CONFIG_UNIX_DIAG=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +50,6 @@ CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_IOMMU_SUPPORT is not set
@@ -61,4 +61,3 @@ CONFIG_NFS_FS=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 # CONFIG_DEBUG_PREEMPT is not set
-CONFIG_XZ_DEC=y
index 96bd1c2..bb2a8dc 100644 (file)
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
@@ -21,13 +22,11 @@ CONFIG_MODULES=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_PLAT_SIM=y
-CONFIG_ARC_BOARD_ML509=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
 CONFIG_PREEMPT=y
 # CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -35,6 +34,7 @@ CONFIG_UNIX_DIAG=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +49,6 @@ CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_IOMMU_SUPPORT is not set
@@ -60,4 +59,3 @@ CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
index 31e1d95..646182e 100644 (file)
@@ -33,6 +33,7 @@ CONFIG_UNIX_DIAG=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -58,7 +59,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
index fcae666..ceca254 100644 (file)
@@ -34,12 +34,12 @@ CONFIG_UNIX_DIAG=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 # CONFIG_BLK_DEV is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_OSCI_LAN=y
 CONFIG_INPUT_EVDEV=y
 # CONFIG_MOUSE_PS2_ALPS is not set
 # CONFIG_MOUSE_PS2_LOGIPS2PP is not set
@@ -58,7 +58,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
index b01b659..4b6da90 100644 (file)
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
@@ -18,15 +19,11 @@ CONFIG_MODULES=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_PLAT_SIM=y
-CONFIG_ARC_BOARD_ML509=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
-CONFIG_ARC_HAS_LL64=y
-# CONFIG_ARC_HAS_RTSC is not set
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
 CONFIG_PREEMPT=y
 # CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=y
@@ -40,6 +37,7 @@ CONFIG_INET=y
 # CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -56,14 +54,11 @@ CONFIG_NETDEVICES=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_NET_OSCI_LAN=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
 CONFIG_SERIO_ARC_PS2=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
@@ -75,9 +70,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-CONFIG_ARCPGU_RGB888=y
-CONFIG_ARCPGU_DISPTYPE=0
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
index 3b4dc9c..9b342ea 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
 CONFIG_DEFAULT_HOSTNAME="tb10x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -26,12 +27,10 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLOCK is not set
 CONFIG_ARC_PLAT_TB10X=y
 CONFIG_ARC_CACHE_LINE_SHIFT=5
-CONFIG_ARC_STACK_NONEXEC=y
 CONFIG_HZ=250
 CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -44,8 +43,8 @@ CONFIG_IP_MULTICAST=y
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_PROC_DEVICETREE=y
 CONFIG_NETDEVICES=y
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -55,9 +54,6 @@ CONFIG_NETDEVICES=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_DEBUG_FS=y
-CONFIG_STMMAC_DA=y
-CONFIG_STMMAC_CHAINED=y
 # CONFIG_NET_VENDOR_WIZNET is not set
 # CONFIG_WLAN is not set
 # CONFIG_INPUT is not set
@@ -91,7 +87,6 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_LEDS_TRIGGER_TRANSIENT=y
 CONFIG_DMADEVICES=y
 CONFIG_DW_DMAC=y
-CONFIG_NET_DMA=y
 CONFIG_ASYNC_TX_DMA=y
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
@@ -100,17 +95,16 @@ CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_MAGIC_SYSRQ=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_FS=y
 CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
index fdc5be5..f9f4c6f 100644 (file)
@@ -10,7 +10,8 @@
 #define _ASM_ARC_ARCREGS_H
 
 /* Build Configuration Registers */
-#define ARC_REG_DCCMBASE_BCR   0x61    /* DCCM Base Addr */
+#define ARC_REG_AUX_DCCM       0x18    /* DCCM Base Addr ARCv2 */
+#define ARC_REG_DCCM_BASE_BUILD        0x61    /* DCCM Base Addr ARCompact */
 #define ARC_REG_CRC_BCR                0x62
 #define ARC_REG_VECBASE_BCR    0x68
 #define ARC_REG_PERIBASE_BCR   0x69
 #define ARC_REG_DPFP_BCR       0x6C    /* ARCompact: Dbl Precision FPU */
 #define ARC_REG_FP_V2_BCR      0xc8    /* ARCv2 FPU */
 #define ARC_REG_SLC_BCR                0xce
-#define ARC_REG_DCCM_BCR       0x74    /* DCCM Present + SZ */
+#define ARC_REG_DCCM_BUILD     0x74    /* DCCM size (common) */
 #define ARC_REG_TIMERS_BCR     0x75
 #define ARC_REG_AP_BCR         0x76
-#define ARC_REG_ICCM_BCR       0x78
+#define ARC_REG_ICCM_BUILD     0x78    /* ICCM size (common) */
 #define ARC_REG_XY_MEM_BCR     0x79
 #define ARC_REG_MAC_BCR                0x7a
 #define ARC_REG_MUL_BCR                0x7b
@@ -36,6 +37,7 @@
 #define ARC_REG_IRQ_BCR                0xF3
 #define ARC_REG_SMART_BCR      0xFF
 #define ARC_REG_CLUSTER_BCR    0xcf
+#define ARC_REG_AUX_ICCM       0x208   /* ICCM Base Addr (ARCv2) */
 
 /* status32 Bits Positions */
 #define STATUS_AE_BIT          5       /* Exception active */
@@ -246,7 +248,7 @@ struct bcr_perip {
 #endif
 };
 
-struct bcr_iccm {
+struct bcr_iccm_arcompact {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int base:16, pad:5, sz:3, ver:8;
 #else
@@ -254,17 +256,15 @@ struct bcr_iccm {
 #endif
 };
 
-/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
-struct bcr_dccm_base {
+struct bcr_iccm_arcv2 {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int addr:24, ver:8;
+       unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
 #else
-       unsigned int ver:8, addr:24;
+       unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
 #endif
 };
 
-/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
-struct bcr_dccm {
+struct bcr_dccm_arcompact {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int res:21, sz:3, ver:8;
 #else
@@ -272,6 +272,14 @@ struct bcr_dccm {
 #endif
 };
 
+struct bcr_dccm_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
+#else
+       unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
+#endif
+};
+
 /* ARCompact: Both SP and DP FPU BCRs have same format */
 struct bcr_fp_arcompact {
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -315,9 +323,9 @@ struct bcr_bpu_arcv2 {
 
 struct bcr_generic {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int pad:24, ver:8;
+       unsigned int info:24, ver:8;
 #else
-       unsigned int ver:8, pad:24;
+       unsigned int ver:8, info:24;
 #endif
 };
 
index 4fd7d62..49014f0 100644 (file)
 #ifdef CONFIG_ISA_ARCOMPACT
 #define TIMER0_IRQ      3
 #define TIMER1_IRQ      4
-#define IPI_IRQ                (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
 #else
 #define TIMER0_IRQ      16
 #define TIMER1_IRQ      17
-#define IPI_IRQ         19
 #endif
 
 #include <linux/interrupt.h>
index 1fc18ee..37c2f75 100644 (file)
@@ -22,6 +22,7 @@
 #define AUX_IRQ_CTRL           0x00E
 #define AUX_IRQ_ACT            0x043   /* Active Intr across all levels */
 #define AUX_IRQ_LVL_PEND       0x200   /* Pending Intr across all levels */
+#define AUX_IRQ_HINT           0x201   /* For generating Soft Interrupts */
 #define AUX_IRQ_PRIORITY       0x206
 #define ICAUSE                 0x40a
 #define AUX_IRQ_SELECT         0x40b
@@ -115,6 +116,16 @@ static inline int arch_irqs_disabled(void)
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
 
+static inline void arc_softirq_trigger(int irq)
+{
+       write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+       write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
 #else
 
 .macro IRQ_DISABLE  scratch
index b178302..c126460 100644 (file)
@@ -45,11 +45,12 @@ VECTOR      reserved                ; Reserved slots
 VECTOR handle_interrupt        ; (16) Timer0
 VECTOR handle_interrupt        ; unused (Timer1)
 VECTOR handle_interrupt        ; unused (WDT)
-VECTOR handle_interrupt        ; (19) ICI (inter core interrupt)
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt        ; (23) End of fixed IRQs
+VECTOR handle_interrupt        ; (19) Inter core Interrupt (IPI)
+VECTOR handle_interrupt        ; (20) perf Interrupt
+VECTOR handle_interrupt        ; (21) Software Triggered Intr (Self IPI)
+VECTOR handle_interrupt        ; unused
+VECTOR handle_interrupt        ; (23) unused
+# End of fixed IRQs
 
 .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
        VECTOR  handle_interrupt
index 06bcedf..224d1c3 100644 (file)
@@ -81,9 +81,6 @@ static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
 {
        switch (irq) {
        case TIMER0_IRQ:
-#ifdef CONFIG_SMP
-       case IPI_IRQ:
-#endif
                irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
                break;
        default:
index bc771f5..c41c364 100644 (file)
 #include <linux/smp.h>
 #include <linux/irq.h>
 #include <linux/spinlock.h>
+#include <asm/irqflags-arcv2.h>
 #include <asm/mcip.h>
 #include <asm/setup.h>
 
+#define IPI_IRQ                19
+#define SOFTIRQ_IRQ    21
+
 static char smp_cpuinfo_buf[128];
 static int idu_detected;
 
@@ -22,6 +26,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
 static void mcip_setup_per_cpu(int cpu)
 {
        smp_ipi_irq_setup(cpu, IPI_IRQ);
+       smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
 }
 
 static void mcip_ipi_send(int cpu)
@@ -29,46 +34,44 @@ static void mcip_ipi_send(int cpu)
        unsigned long flags;
        int ipi_was_pending;
 
+       /* ARConnect can only send IPI to others */
+       if (unlikely(cpu == raw_smp_processor_id())) {
+               arc_softirq_trigger(SOFTIRQ_IRQ);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
        /*
-        * NOTE: We must spin here if the other cpu hasn't yet
-        * serviced a previous message. This can burn lots
-        * of time, but we MUST follows this protocol or
-        * ipi messages can be lost!!!
-        * Also, we must release the lock in this loop because
-        * the other side may get to this same loop and not
-        * be able to ack -- thus causing deadlock.
+        * If receiver already has a pending interrupt, elide sending this one.
+        * Linux cross core calling works well with concurrent IPIs
+        * coalesced into one
+        * see arch/arc/kernel/smp.c: ipi_send_msg_one()
         */
+       __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
+       ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
+       if (!ipi_was_pending)
+               __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
 
-       do {
-               raw_spin_lock_irqsave(&mcip_lock, flags);
-               __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
-               ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
-               if (ipi_was_pending == 0)
-                       break; /* break out but keep lock */
-               raw_spin_unlock_irqrestore(&mcip_lock, flags);
-       } while (1);
-
-       __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
-
-#ifdef CONFIG_ARC_IPI_DBG
-       if (ipi_was_pending)
-               pr_info("IPI ACK delayed from cpu %d\n", cpu);
-#endif
 }
 
 static void mcip_ipi_clear(int irq)
 {
        unsigned int cpu, c;
        unsigned long flags;
-       unsigned int __maybe_unused copy;
+
+       if (unlikely(irq == SOFTIRQ_IRQ)) {
+               arc_softirq_clear(irq);
+               return;
+       }
 
        raw_spin_lock_irqsave(&mcip_lock, flags);
 
        /* Who sent the IPI */
        __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
 
-       copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK);       /* 1,2,4,8... */
+       cpu = read_aux_reg(ARC_REG_MCIP_READBACK);      /* 1,2,4,8... */
 
        /*
         * In rare case, multiple concurrent IPIs sent to same target can
@@ -82,12 +85,6 @@ static void mcip_ipi_clear(int irq)
        } while (cpu);
 
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
-
-#ifdef CONFIG_ARC_IPI_DBG
-       if (c != __ffs(copy))
-               pr_info("IPIs from %x coalesced to %x\n",
-                       copy, raw_smp_processor_id());
-#endif
 }
 
 static void mcip_probe_n_setup(void)
@@ -111,10 +108,11 @@ static void mcip_probe_n_setup(void)
        READ_BCR(ARC_REG_MCIP_BCR, mp);
 
        sprintf(smp_cpuinfo_buf,
-               "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
+               "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
                mp.ver, mp.num_cores,
                IS_AVAIL1(mp.ipi, "IPI "),
                IS_AVAIL1(mp.idu, "IDU "),
+               IS_AVAIL1(mp.llm, "LLM "),
                IS_AVAIL1(mp.dbg, "DEBUG "),
                IS_AVAIL1(mp.gfrc, "GFRC"));
 
index a7edceb..cdc821d 100644 (file)
@@ -42,6 +42,53 @@ struct task_struct *_current_task[NR_CPUS];  /* For stack switching */
 
 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 
+static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
+{
+       if (is_isa_arcompact()) {
+               struct bcr_iccm_arcompact iccm;
+               struct bcr_dccm_arcompact dccm;
+
+               READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+               if (iccm.ver) {
+                       cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
+                       cpu->iccm.base_addr = iccm.base << 16;
+               }
+
+               READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+               if (dccm.ver) {
+                       unsigned long base;
+                       cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
+
+                       base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
+                       cpu->dccm.base_addr = base & ~0xF;
+               }
+       } else {
+               struct bcr_iccm_arcv2 iccm;
+               struct bcr_dccm_arcv2 dccm;
+               unsigned long region;
+
+               READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+               if (iccm.ver) {
+                       cpu->iccm.sz = 256 << iccm.sz00;        /* 512B to 16M */
+                       if (iccm.sz00 == 0xF && iccm.sz01 > 0)
+                               cpu->iccm.sz <<= iccm.sz01;
+
+                       region = read_aux_reg(ARC_REG_AUX_ICCM);
+                       cpu->iccm.base_addr = region & 0xF0000000;
+               }
+
+               READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+               if (dccm.ver) {
+                       cpu->dccm.sz = 256 << dccm.sz0;
+                       if (dccm.sz0 == 0xF && dccm.sz1 > 0)
+                               cpu->dccm.sz <<= dccm.sz1;
+
+                       region = read_aux_reg(ARC_REG_AUX_DCCM);
+                       cpu->dccm.base_addr = region & 0xF0000000;
+               }
+       }
+}
+
 static void read_arc_build_cfg_regs(void)
 {
        struct bcr_perip uncached_space;
@@ -76,36 +123,11 @@ static void read_arc_build_cfg_regs(void)
        cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0;        /* 1,3 */
        cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
        cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
-
-       /* Note that we read the CCM BCRs independent of kernel config
-        * This is to catch the cases where user doesn't know that
-        * CCMs are present in hardware build
-        */
-       {
-               struct bcr_iccm iccm;
-               struct bcr_dccm dccm;
-               struct bcr_dccm_base dccm_base;
-               unsigned int bcr_32bit_val;
-
-               bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
-               if (bcr_32bit_val) {
-                       iccm = *((struct bcr_iccm *)&bcr_32bit_val);
-                       cpu->iccm.base_addr = iccm.base << 16;
-                       cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
-               }
-
-               bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
-               if (bcr_32bit_val) {
-                       dccm = *((struct bcr_dccm *)&bcr_32bit_val);
-                       cpu->dccm.sz = 0x800 << (dccm.sz);
-
-                       READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
-                       cpu->dccm.base_addr = dccm_base.addr << 8;
-               }
-       }
-
        READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
 
+       /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
+       read_decode_ccm_bcr(cpu);
+
        read_decode_mmu_bcr();
        read_decode_cache_bcr();
 
@@ -237,8 +259,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 
                        n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
                }
-               n += scnprintf(buf + n, len - n, "%s",
-                              IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
        }
 
        n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
index ef6e9e1..424e937 100644 (file)
@@ -336,11 +336,8 @@ irqreturn_t do_IPI(int irq, void *dev_id)
                int rc;
 
                rc = __do_IPI(msg);
-#ifdef CONFIG_ARC_IPI_DBG
-               /* IPI received but no valid @msg */
                if (rc)
                        pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
-#endif
                pending &= ~(1U << msg);
        } while (pending);
 
index 7a6a58e..43788b1 100644 (file)
@@ -195,5 +195,7 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
        $(call cmd,shipped)
 
+AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
+
 $(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
        $(call cmd,shipped)
index f3db13d..0cc150b 100644 (file)
        };
 };
 
+
+/include/ "tps65217.dtsi"
+
 &tps {
-       compatible = "ti,tps65217";
        /*
         * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
         * mode") at poweroff.  Most BeagleBone versions do not support RTC-only
        ti,pmic-shutdown-controller;
 
        regulators {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
                dcdc1_reg: regulator@0 {
-                       reg = <0>;
                        regulator-name = "vdds_dpr";
                        regulator-always-on;
                };
 
                dcdc2_reg: regulator@1 {
-                       reg = <1>;
                        /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
                        regulator-name = "vdd_mpu";
                        regulator-min-microvolt = <925000>;
                };
 
                dcdc3_reg: regulator@2 {
-                       reg = <2>;
                        /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
                        regulator-name = "vdd_core";
                        regulator-min-microvolt = <925000>;
                };
 
                ldo1_reg: regulator@3 {
-                       reg = <3>;
                        regulator-name = "vio,vrtc,vdds";
                        regulator-always-on;
                };
 
                ldo2_reg: regulator@4 {
-                       reg = <4>;
                        regulator-name = "vdd_3v3aux";
                        regulator-always-on;
                };
 
                ldo3_reg: regulator@5 {
-                       reg = <5>;
                        regulator-name = "vdd_1v8";
                        regulator-always-on;
                };
 
                ldo4_reg: regulator@6 {
-                       reg = <6>;
                        regulator-name = "vdd_3v3a";
                        regulator-always-on;
                };
index fda457b..857d989 100644 (file)
 
 };
 
-&tps {
-       compatible = "ti,tps65217";
+/include/ "tps65217.dtsi"
 
+&tps {
        regulators {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
                dcdc1_reg: regulator@0 {
-                       reg = <0>;
                        regulator-name = "vdds_dpr";
                        regulator-always-on;
                };
 
                dcdc2_reg: regulator@1 {
-                       reg = <1>;
                        /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
                        regulator-name = "vdd_mpu";
                        regulator-min-microvolt = <925000>;
                };
 
                dcdc3_reg: regulator@2 {
-                       reg = <2>;
                        /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
                        regulator-name = "vdd_core";
                        regulator-min-microvolt = <925000>;
                };
 
                ldo1_reg: regulator@3 {
-                       reg = <3>;
                        regulator-name = "vio,vrtc,vdds";
                        regulator-boot-on;
                        regulator-always-on;
                };
 
                ldo2_reg: regulator@4 {
-                       reg = <4>;
                        regulator-name = "vdd_3v3aux";
                        regulator-boot-on;
                        regulator-always-on;
                };
 
                ldo3_reg: regulator@5 {
-                       reg = <5>;
                        regulator-name = "vdd_1v8";
                        regulator-boot-on;
                        regulator-always-on;
                };
 
                ldo4_reg: regulator@6 {
-                       reg = <6>;
                        regulator-name = "vdd_3v3d";
                        regulator-boot-on;
                        regulator-always-on;
index 77559a1..f313999 100644 (file)
        wp-gpios = <&gpio3 18 0>;
 };
 
-&tps {
-       compatible = "ti,tps65217";
+#include "tps65217.dtsi"
 
+&tps {
        regulators {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
                dcdc1_reg: regulator@0 {
-                       reg = <0>;
                        /* +1.5V voltage with ±4% tolerance */
                        regulator-min-microvolt = <1450000>;
                        regulator-max-microvolt = <1550000>;
                };
 
                dcdc2_reg: regulator@1 {
-                       reg = <1>;
                        /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */
                        regulator-name = "vdd_mpu";
                        regulator-min-microvolt = <915000>;
                };
 
                dcdc3_reg: regulator@2 {
-                       reg = <2>;
                        /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */
                        regulator-name = "vdd_core";
                        regulator-min-microvolt = <915000>;
                };
 
                ldo1_reg: regulator@3 {
-                       reg = <3>;
                        /* +1.8V voltage with ±4% tolerance */
                        regulator-min-microvolt = <1750000>;
                        regulator-max-microvolt = <1870000>;
                };
 
                ldo2_reg: regulator@4 {
-                       reg = <4>;
                        /* +3.3V voltage with ±4% tolerance */
                        regulator-min-microvolt = <3175000>;
                        regulator-max-microvolt = <3430000>;
                };
 
                ldo3_reg: regulator@5 {
-                       reg = <5>;
                        /* +1.8V voltage with ±4% tolerance */
                        regulator-min-microvolt = <1750000>;
                        regulator-max-microvolt = <1870000>;
                };
 
                ldo4_reg: regulator@6 {
-                       reg = <6>;
                        /* +3.3V voltage with ±4% tolerance */
                        regulator-min-microvolt = <3175000>;
                        regulator-max-microvolt = <3430000>;
index 471a3a7..8867aaa 100644 (file)
        vin-supply = <&vbat>;
 };
 
-&tps {
-       compatible = "ti,tps65217";
+/include/ "tps65217.dtsi"
 
+&tps {
        backlight {
                isel = <1>; /* ISET1 */
                fdim = <200>; /* TPS65217_BL_FDIM_200HZ */
        };
 
        regulators {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
                dcdc1_reg: regulator@0 {
-                       reg = <0>;
                        /* VDD_1V8 system supply */
                        regulator-always-on;
                };
 
                dcdc2_reg: regulator@1 {
-                       reg = <1>;
                        /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
                        regulator-name = "vdd_core";
                        regulator-min-microvolt = <925000>;
                };
 
                dcdc3_reg: regulator@2 {
-                       reg = <2>;
                        /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
                        regulator-name = "vdd_mpu";
                        regulator-min-microvolt = <925000>;
                };
 
                ldo1_reg: regulator@3 {
-                       reg = <3>;
                        /* VRTC 1.8V always-on supply */
                        regulator-name = "vrtc,vdds";
                        regulator-always-on;
                };
 
                ldo2_reg: regulator@4 {
-                       reg = <4>;
                        /* 3.3V rail */
                        regulator-name = "vdd_3v3aux";
                        regulator-always-on;
                };
 
                ldo3_reg: regulator@5 {
-                       reg = <5>;
                        /* VDD_3V3A 3.3V rail */
                        regulator-name = "vdd_3v3a";
                        regulator-min-microvolt = <3300000>;
                };
 
                ldo4_reg: regulator@6 {
-                       reg = <6>;
                        /* VDD_3V3B 3.3V rail */
                        regulator-name = "vdd_3v3b";
                        regulator-always-on;
index 1b5b044..865de85 100644 (file)
@@ -46,7 +46,7 @@
                        gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
                        linux,code = <KEY_BACK>;
                        debounce-interval = <1000>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                front_button {
@@ -54,7 +54,7 @@
                        gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
                        linux,code = <KEY_FRONT>;
                        debounce-interval = <1000>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index d38edfa..3303c28 100644 (file)
        pinctrl-0 = <&uart4_pins>;
 };
 
+#include "tps65217.dtsi"
+
 &tps {
-       compatible = "ti,tps65217";
        ti,pmic-shutdown-controller;
 
        interrupt-parent = <&intc>;
        interrupts = <7>;       /* NNMI */
 
        regulators {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
                dcdc1_reg: regulator@0 {
-                       reg = <0>;
                        /* VDDS_DDR */
                        regulator-min-microvolt = <1500000>;
                        regulator-max-microvolt = <1500000>;
                };
 
                dcdc2_reg: regulator@1 {
-                       reg = <1>;
                        /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
                        regulator-name = "vdd_mpu";
                        regulator-min-microvolt = <925000>;
                };
 
                dcdc3_reg: regulator@2 {
-                       reg = <2>;
                        /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
                        regulator-name = "vdd_core";
                        regulator-min-microvolt = <925000>;
                };
 
                ldo1_reg: regulator@3 {
-                       reg = <3>;
                        /* VRTC / VIO / VDDS*/
                        regulator-always-on;
                        regulator-min-microvolt = <1800000>;
                };
 
                ldo2_reg: regulator@4 {
-                       reg = <4>;
                        /* VDD_3V3AUX */
                        regulator-always-on;
                        regulator-min-microvolt = <3300000>;
                };
 
                ldo3_reg: regulator@5 {
-                       reg = <5>;
                        /* VDD_1V8 */
                        regulator-min-microvolt = <1800000>;
                        regulator-max-microvolt = <1800000>;
                };
 
                ldo4_reg: regulator@6 {
-                       reg = <6>;
                        /* VDD_3V3A */
                        regulator-min-microvolt = <3300000>;
                        regulator-max-microvolt = <3300000>;
index 36c0fa6..a0986c6 100644 (file)
 
                sound0_master: simple-audio-card,codec {
                        sound-dai = <&tlv320aic3104>;
+                       assigned-clocks = <&clkoutmux2_clk_mux>;
+                       assigned-clock-parents = <&sys_clk2_dclk_div>;
                        clocks = <&clkout2_clk>;
                };
        };
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&mcasp3_pins_default>;
        pinctrl-1 = <&mcasp3_pins_sleep>;
+       assigned-clocks = <&mcasp3_ahclkx_mux>;
+       assigned-clock-parents = <&sys_clkin2>;
        status = "okay";
 
        op-mode = <0>;  /* MCASP_IIS_MODE */
index 8d93882..1c06cb7 100644 (file)
                ti,debounce-tol = /bits/ 16 <10>;
                ti,debounce-rep = /bits/ 16 <1>;
 
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 4f6ae92..f74d3db 100644 (file)
                                #size-cells = <1>;
                                reg = <0x2100000 0x10000>;
                                ranges = <0 0x2100000 0x10000>;
-                               interrupt-parent = <&intc>;
                                clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
                                         <&clks IMX6QDL_CLK_CAAM_ACLK>,
                                         <&clks IMX6QDL_CLK_CAAM_IPG>,
index bf4143c..b84af3d 100644 (file)
@@ -14,7 +14,7 @@
 #include "kirkwood-synology.dtsi"
 
 / {
-       model = "Synology DS111";
+       model = "Synology DS112";
        compatible = "synology,ds111", "marvell,kirkwood";
 
        memory {
index 4207882..aae8a7a 100644 (file)
        };
 };
 
+&devbus_bootcs {
+       status = "okay";
+       devbus,keep-config;
+
+       flash@0 {
+               compatible = "jedec-flash";
+               reg = <0 0x40000>;
+               bank-width = <1>;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       header@0 {
+                               reg = <0 0x30000>;
+                               read-only;
+                       };
+
+                       uboot@30000 {
+                               reg = <0x30000 0xF000>;
+                               read-only;
+                       };
+
+                       uboot_env@3F000 {
+                               reg = <0x3F000 0x1000>;
+                       };
+               };
+       };
+};
+
 &mdio {
        status = "okay";
 
index 6713b1e..01d239c 100644 (file)
        pinctrl-names = "default";
 
        status = "okay";
-       renesas,enable-gpio = <&gpio5 31 GPIO_ACTIVE_HIGH>;
 };
 
 &usbphy {
index 1afe246..b0c912f 100644 (file)
@@ -90,7 +90,7 @@
 #define PIN_PA14__I2SC1_MCK            PINMUX_PIN(PIN_PA14, 4, 2)
 #define PIN_PA14__FLEXCOM3_IO2         PINMUX_PIN(PIN_PA14, 5, 1)
 #define PIN_PA14__D9                   PINMUX_PIN(PIN_PA14, 6, 2)
-#define PIN_PA15                       14
+#define PIN_PA15                       15
 #define PIN_PA15__GPIO                 PINMUX_PIN(PIN_PA15, 0, 0)
 #define PIN_PA15__SPI0_MOSI            PINMUX_PIN(PIN_PA15, 1, 1)
 #define PIN_PA15__TF1                  PINMUX_PIN(PIN_PA15, 2, 1)
diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi
new file mode 100644 (file)
index 0000000..a632724
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Integrated Power Management Chip
+ * http://www.ti.com/lit/ds/symlink/tps65217.pdf
+ */
+
+&tps {
+       compatible = "ti,tps65217";
+
+       regulators {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               dcdc1_reg: regulator@0 {
+                       reg = <0>;
+                       regulator-compatible = "dcdc1";
+               };
+
+               dcdc2_reg: regulator@1 {
+                       reg = <1>;
+                       regulator-compatible = "dcdc2";
+               };
+
+               dcdc3_reg: regulator@2 {
+                       reg = <2>;
+                       regulator-compatible = "dcdc3";
+               };
+
+               ldo1_reg: regulator@3 {
+                       reg = <3>;
+                       regulator-compatible = "ldo1";
+               };
+
+               ldo2_reg: regulator@4 {
+                       reg = <4>;
+                       regulator-compatible = "ldo2";
+               };
+
+               ldo3_reg: regulator@5 {
+                       reg = <5>;
+                       regulator-compatible = "ldo3";
+               };
+
+               ldo4_reg: regulator@6 {
+                       reg = <6>;
+                       regulator-compatible = "ldo4";
+               };
+       };
+};
index 7da5503..e08d151 100644 (file)
@@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void)
        u32 irqstat;
 
        asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
+       dsb(sy);
        return irqstat;
 }
 
index 0375c8c..9408a99 100644 (file)
@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
             enum dma_data_direction dir, struct dma_attrs *attrs)
 {
-       bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
+       unsigned long page_pfn = page_to_xen_pfn(page);
+       unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+       unsigned long compound_pages =
+               (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+       bool local = (page_pfn <= dev_pfn) &&
+               (dev_pfn - page_pfn < compound_pages);
+
        /*
-        * Dom0 is mapped 1:1, while the Linux page can be spanned accross
-        * multiple Xen page, it's not possible to have a mix of local and
-        * foreign Xen page. So if the first xen_pfn == mfn the page is local
-        * otherwise it's a foreign page grant-mapped in dom0. If the page is
-        * local we can safely call the native dma_ops function, otherwise we
-        * call the xen specific function.
+        * Dom0 is mapped 1:1, while the Linux page can span across
+        * multiple Xen pages, it's not possible for it to contain a
+        * mix of local and foreign Xen pages. So if the first xen_pfn
+        * == mfn the page is local otherwise it's a foreign page
+        * grant-mapped in dom0. If the page is local we can safely
+        * call the native dma_ops function, otherwise we call the xen
+        * specific function.
         */
        if (local)
                __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
index 2c5f160..ad325a8 100644 (file)
@@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL)        += debug.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
 
 obj-$(CONFIG_ARM_VIRT_EXT)     += hyp-stub.o
+AFLAGS_hyp-stub.o              :=-Wa,-march=armv7-a
 ifeq ($(CONFIG_ARM_PSCI),y)
 obj-$(CONFIG_SMP)              += psci_smp.o
 endif
index 5fa69d7..99361f1 100644 (file)
@@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        u64 val;
 
        val = kvm_arm_timer_get_reg(vcpu, reg->id);
-       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
 }
 
 static unsigned long num_core_regs(void)
index 7f33b20..0f6600f 100644 (file)
@@ -206,7 +206,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
        run->mmio.is_write      = is_write;
        run->mmio.phys_addr     = fault_ipa;
        run->mmio.len           = len;
-       memcpy(run->mmio.data, data_buf, len);
+       if (is_write)
+               memcpy(run->mmio.data, data_buf, len);
 
        if (!ret) {
                /* We handled the access successfully in the kernel. */
index 8098272..bab814d 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <asm/setup.h>
 #include <asm/mach/arch.h>
+#include <asm/system_info.h>
 
 #include "common.h"
 
@@ -77,12 +78,31 @@ static const char *const n900_boards_compat[] __initconst = {
        NULL,
 };
 
+/* Set system_rev from atags */
+static void __init rx51_set_system_rev(const struct tag *tags)
+{
+       const struct tag *tag;
+
+       if (tags->hdr.tag != ATAG_CORE)
+               return;
+
+       for_each_tag(tag, tags) {
+               if (tag->hdr.tag == ATAG_REVISION) {
+                       system_rev = tag->u.revision.rev;
+                       break;
+               }
+       }
+}
+
 /* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
  * save them while the data is still not overwritten
  */
 static void __init rx51_reserve(void)
 {
-       save_atags((const struct tag *)(PAGE_OFFSET + 0x100));
+       const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100);
+
+       save_atags(tags);
+       rx51_set_system_rev(tags);
        omap_reserve();
 }
 
index 7b76ce0..8633c70 100644 (file)
@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
 
 static void set_onenand_cfg(void __iomem *onenand_base)
 {
-       u32 reg;
+       u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
 
-       reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
-       reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
        reg |=  (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
                ONENAND_SYS_CFG1_BL_16;
        if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
                reg |= ONENAND_SYS_CFG1_VHF;
        else
                reg &= ~ONENAND_SYS_CFG1_VHF;
+
        writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
 }
 
@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
                }
        }
 
+       onenand_async.sync_write = true;
        omap2_onenand_calc_async_timings(&t);
 
        ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
index 0437537..f7ff3b9 100644 (file)
@@ -191,12 +191,22 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_device *od;
+       int err;
 
        switch (event) {
        case BUS_NOTIFY_DEL_DEVICE:
                if (pdev->archdata.od)
                        omap_device_delete(pdev->archdata.od);
                break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               od = to_omap_device(pdev);
+               if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) {
+                       dev_info(dev, "enabled after unload, idling\n");
+                       err = omap_device_idle(pdev);
+                       if (err)
+                               dev_err(dev, "failed to idle\n");
+               }
+               break;
        case BUS_NOTIFY_ADD_DEVICE:
                if (pdev->dev.of_node)
                        omap_device_build_from_dt(pdev);
@@ -602,8 +612,10 @@ static int _od_runtime_resume(struct device *dev)
        int ret;
 
        ret = omap_device_enable(pdev);
-       if (ret)
+       if (ret) {
+               dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n");
                return ret;
+       }
 
        return pm_generic_runtime_resume(dev);
 }
index 9cb1121..b3a4ed5 100644 (file)
@@ -4,7 +4,6 @@
 extern void shmobile_init_delay(void);
 extern void shmobile_boot_vector(void);
 extern unsigned long shmobile_boot_fn;
-extern unsigned long shmobile_boot_arg;
 extern unsigned long shmobile_boot_size;
 extern void shmobile_smp_boot(void);
 extern void shmobile_smp_sleep(void);
index fa5248c..5e503d9 100644 (file)
@@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu)
 
        b       secondary_startup
 ENDPROC(shmobile_boot_scu)
-
-       .text
-       .align  2
-       .globl  shmobile_scu_base
-shmobile_scu_base:
-       .space  4
index 330c1fc..32e0bf6 100644 (file)
@@ -24,7 +24,6 @@
        .arm
        .align  12
 ENTRY(shmobile_boot_vector)
-       ldr     r0, 2f
        ldr     r1, 1f
        bx      r1
 
@@ -34,9 +33,6 @@ ENDPROC(shmobile_boot_vector)
        .globl  shmobile_boot_fn
 shmobile_boot_fn:
 1:     .space  4
-       .globl  shmobile_boot_arg
-shmobile_boot_arg:
-2:     .space  4
        .globl  shmobile_boot_size
 shmobile_boot_size:
        .long   . - shmobile_boot_vector
@@ -46,13 +42,15 @@ shmobile_boot_size:
  */
 
 ENTRY(shmobile_smp_boot)
-                                               @ r0 = MPIDR_HWID_BITMASK
        mrc     p15, 0, r1, c0, c0, 5           @ r1 = MPIDR
-       and     r0, r1, r0                      @ r0 = cpu_logical_map() value
+       and     r0, r1, #0xffffff               @ MPIDR_HWID_BITMASK
+                                               @ r0 = cpu_logical_map() value
        mov     r1, #0                          @ r1 = CPU index
-       adr     r5, 1f                          @ array of per-cpu mpidr values
-       adr     r6, 2f                          @ array of per-cpu functions
-       adr     r7, 3f                          @ array of per-cpu arguments
+       adr     r2, 1f
+       ldmia   r2, {r5, r6, r7}
+       add     r5, r5, r2                      @ array of per-cpu mpidr values
+       add     r6, r6, r2                      @ array of per-cpu functions
+       add     r7, r7, r2                      @ array of per-cpu arguments
 
 shmobile_smp_boot_find_mpidr:
        ldr     r8, [r5, r1, lsl #2]
@@ -80,12 +78,18 @@ ENTRY(shmobile_smp_sleep)
        b       shmobile_smp_boot
 ENDPROC(shmobile_smp_sleep)
 
+       .align  2
+1:     .long   shmobile_smp_mpidr - .
+       .long   shmobile_smp_fn - 1b
+       .long   shmobile_smp_arg - 1b
+
+       .bss
        .globl  shmobile_smp_mpidr
 shmobile_smp_mpidr:
-1:     .space  NR_CPUS * 4
+       .space  NR_CPUS * 4
        .globl  shmobile_smp_fn
 shmobile_smp_fn:
-2:     .space  NR_CPUS * 4
+       .space  NR_CPUS * 4
        .globl  shmobile_smp_arg
 shmobile_smp_arg:
-3:     .space  NR_CPUS * 4
+       .space  NR_CPUS * 4
index 911884f..aba75c8 100644 (file)
@@ -123,7 +123,6 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
 {
        /* install boot code shared by all CPUs */
        shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
-       shmobile_boot_arg = MPIDR_HWID_BITMASK;
 
        /* perform per-cpu setup */
        apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
index 6466311..081a097 100644 (file)
@@ -17,6 +17,9 @@
 #include <asm/smp_scu.h>
 #include "common.h"
 
+
+void __iomem *shmobile_scu_base;
+
 static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
                                          unsigned long action, void *hcpu)
 {
@@ -41,7 +44,6 @@ void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
 {
        /* install boot code shared by all CPUs */
        shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
-       shmobile_boot_arg = MPIDR_HWID_BITMASK;
 
        /* enable SCU and cache coherency on booting CPU */
        scu_enable(shmobile_scu_base);
index b854fe2..0b024a9 100644 (file)
@@ -92,8 +92,6 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
 {
        /* Map the reset vector (in headsmp-scu.S, headsmp.S) */
        __raw_writel(__pa(shmobile_boot_vector), AVECR);
-       shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
-       shmobile_boot_arg = (unsigned long)shmobile_scu_base;
 
        /* setup r8a7779 specific SCU bits */
        shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
index 4b4058d..66353ca 100644 (file)
@@ -173,7 +173,7 @@ unsigned long arch_mmap_rnd(void)
 {
        unsigned long rnd;
 
-       rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+       rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 
        return rnd << PAGE_SHIFT;
 }
index cf30daf..d19b1ad 100644 (file)
@@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages,
                WARN_ON_ONCE(1);
        }
 
+       if (!numpages)
+               return 0;
+
        if (start < MODULES_VADDR || start >= MODULES_END)
                return -EINVAL;
 
index bf464de..f506086 100644 (file)
 /*
  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
  *
- * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
+ * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
  *     (rounded up to PUD_SIZE).
  * VMALLOC_START: beginning of the kernel VA space
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *     fixed mappings and modules
  */
-#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
+#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
 
 #ifndef CONFIG_KASAN
 #define VMALLOC_START          (VA_START)
@@ -51,7 +51,8 @@
 
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
-#define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
+#define VMEMMAP_START          (VMALLOC_END + SZ_64K)
+#define vmemmap                        ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
 
 #define FIRST_USER_ADDRESS     0UL
 
index fcb7788..9e54ad7 100644 (file)
@@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        u64 val;
 
        val = kvm_arm_timer_get_reg(vcpu, reg->id);
-       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
 }
 
 /**
index 9142e08..5dd2a26 100644 (file)
@@ -147,16 +147,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
        max_lr_idx = vtr_to_max_lr_idx(val);
        nr_pri_bits = vtr_to_nr_pri_bits(val);
 
-       switch (nr_pri_bits) {
-       case 7:
-                write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
-                write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
-       case 6:
-                write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
-       default:
-                write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
-       }                                          
-                                                  
        switch (nr_pri_bits) {
        case 7:
                 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
@@ -167,6 +157,16 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
                 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
        }
 
+       switch (nr_pri_bits) {
+       case 7:
+                write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+                write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+       case 6:
+                write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+       default:
+                write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+       }
+
        switch (max_lr_idx) {
        case 15:
                write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
index f3b061e..7802f21 100644 (file)
@@ -319,8 +319,8 @@ void __init mem_init(void)
 #endif
                  MLG(VMALLOC_START, VMALLOC_END),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-                 MLG((unsigned long)vmemmap,
-                     (unsigned long)vmemmap + VMEMMAP_SIZE),
+                 MLG(VMEMMAP_START,
+                     VMEMMAP_START + VMEMMAP_SIZE),
                  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
                      (unsigned long)virt_to_page(high_memory)),
 #endif
index 4c893b5..232f787 100644 (file)
@@ -53,10 +53,10 @@ unsigned long arch_mmap_rnd(void)
 
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT))
-               rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1);
+               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
        else
 #endif
-               rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
        return rnd << PAGE_SHIFT;
 }
 
index 8c6d76c..d9907e5 100644 (file)
@@ -270,7 +270,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
 }
 EXPORT_SYMBOL(jz_gpio_port_get_value);
 
-#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
+#define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f)
 
 static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
 {
index 5ce3b74..b4ac637 100644 (file)
@@ -125,7 +125,7 @@ LEAF(_restore_fp_context)
        END(_restore_fp_context)
        .set    reorder
 
-       .type   fault@function
+       .type   fault@function
        .ent    fault
 fault: li      v0, -EFAULT
        jr      ra
index f09546e..17732f8 100644 (file)
@@ -358,7 +358,7 @@ LEAF(_restore_msa_all_upper)
 
        .set    reorder
 
-       .type   fault@function
+       .type   fault@function
        .ent    fault
 fault: li      v0, -EFAULT                             # failure
        jr      ra
index ae790c5..bf14da9 100644 (file)
@@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 asmlinkage void do_ov(struct pt_regs *regs)
 {
        enum ctx_state prev_state;
-       siginfo_t info;
+       siginfo_t info = {
+               .si_signo = SIGFPE,
+               .si_code = FPE_INTOVF,
+               .si_addr = (void __user *)regs->cp0_epc,
+       };
 
        prev_state = exception_enter();
        die_if_kernel("Integer overflow", regs);
 
-       info.si_code = FPE_INTOVF;
-       info.si_signo = SIGFPE;
-       info.si_errno = 0;
-       info.si_addr = (void __user *) regs->cp0_epc;
        force_sig_info(SIGFPE, &info, current);
        exception_exit(prev_state);
 }
@@ -874,7 +874,7 @@ out:
 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        const char *str)
 {
-       siginfo_t info;
+       siginfo_t info = { 0 };
        char b[40];
 
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
                else
                        info.si_code = FPE_INTOVF;
                info.si_signo = SIGFPE;
-               info.si_errno = 0;
                info.si_addr = (void __user *) regs->cp0_epc;
                force_sig_info(SIGFPE, &info, current);
                break;
index 8bc3977..3110447 100644 (file)
@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
        } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
                void __user *uaddr = (void __user *)(long)reg->addr;
 
-               return copy_to_user(uaddr, vs, 16);
+               return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
        } else {
                return -EINVAL;
        }
@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
        } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
                void __user *uaddr = (void __user *)(long)reg->addr;
 
-               return copy_from_user(vs, uaddr, 16);
+               return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
        } else {
                return -EINVAL;
        }
index 5c81fdd..3530376 100644 (file)
@@ -146,7 +146,7 @@ unsigned long arch_mmap_rnd(void)
 {
        unsigned long rnd;
 
-       rnd = (unsigned long)get_random_int();
+       rnd = get_random_long();
        rnd <<= PAGE_SHIFT;
        if (TASK_IS_32BIT_ADDR)
                rnd &= 0xfffffful;
@@ -174,7 +174,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
 static inline unsigned long brk_rnd(void)
 {
-       unsigned long rnd = get_random_int();
+       unsigned long rnd = get_random_long();
 
        rnd = rnd << PAGE_SHIFT;
        /* 8MB for 32bit, 256MB for 64bit */
index 2496475..91dec32 100644 (file)
@@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
 
        sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
        sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
-       c->scache.sets = 64 << sets;
+       if (sets)
+               c->scache.sets = 64 << sets;
 
        line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
        line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
-       c->scache.linesz = 2 << line_sz;
+       if (line_sz)
+               c->scache.linesz = 2 << line_sz;
 
        assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
        assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
        c->scache.waysize = c->scache.sets * c->scache.linesz;
        c->scache.waybit = __ffs(c->scache.waysize);
 
-       c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+       if (c->scache.linesz) {
+               c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+               return 1;
+       }
 
-       return 1;
+       return 0;
 }
 
 static inline int __init mips_sc_probe(void)
index f84ff12..6d8276c 100644 (file)
@@ -33,7 +33,7 @@
  * floppy accesses go through the track buffer.
  */
 #define _CROSS_64KB(a,s,vdma) \
-(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
 
 #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
 
index 35bdccb..b75039f 100644 (file)
 #define __NR_membarrier                (__NR_Linux + 343)
 #define __NR_userfaultfd       (__NR_Linux + 344)
 #define __NR_mlock2            (__NR_Linux + 345)
+#define __NR_copy_file_range   (__NR_Linux + 346)
 
-#define __NR_Linux_syscalls    (__NR_mlock2 + 1)
+#define __NR_Linux_syscalls    (__NR_copy_file_range + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 9585c81..ce0b2b4 100644 (file)
@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-       long ret = 0;
-
        /* Do the secure computing check first. */
        secure_computing_strict(regs->gr[20]);
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-           tracehook_report_syscall_entry(regs))
-               ret = -1L;
+           tracehook_report_syscall_entry(regs)) {
+               /*
+                * Tracing decided this syscall should not happen or the
+                * debugger stored an invalid system call number. Skip
+                * the system call and the system call restart handling.
+                */
+               regs->gr[20] = -1UL;
+               goto out;
+       }
 
 #ifdef CONFIG_64BIT
        if (!is_compat_task())
@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
                        regs->gr[24] & 0xffffffff,
                        regs->gr[23] & 0xffffffff);
 
-       return ret ? : regs->gr[20];
+out:
+       return regs->gr[20];
 }
 
 void do_syscall_trace_exit(struct pt_regs *regs)
index 3fbd725..fbafa0d 100644 (file)
@@ -343,7 +343,7 @@ tracesys_next:
 #endif
 
        comiclr,>>=     __NR_Linux_syscalls, %r20, %r0
-       b,n     .Lsyscall_nosys
+       b,n     .Ltracesys_nosys
 
        LDREGX  %r20(%r19), %r19
 
@@ -359,6 +359,9 @@ tracesys_next:
        be      0(%sr7,%r19)
        ldo     R%tracesys_exit(%r2),%r2
 
+.Ltracesys_nosys:
+       ldo     -ENOSYS(%r0),%r28               /* set errno */
+
        /* Do *not* call this function on the gateway page, because it
        makes a direct call to syscall_trace. */
        
index d4ffcfb..585d50f 100644 (file)
        ENTRY_SAME(membarrier)
        ENTRY_SAME(userfaultfd)
        ENTRY_SAME(mlock2)              /* 345 */
+       ENTRY_SAME(copy_file_range)
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 301be31..650cfb3 100644 (file)
@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
                eeh_pcid_put(dev);
                if (driver->err_handler &&
                    driver->err_handler->error_detected &&
-                   driver->err_handler->slot_reset &&
-                   driver->err_handler->resume)
+                   driver->err_handler->slot_reset)
                        return NULL;
        }
 
index 05e804c..aec9a1b 100644 (file)
@@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
         * If the breakpoint is unregistered between a hw_breakpoint_handler()
         * and the single_step_dabr_instruction(), then cleanup the breakpoint
         * restoration variables to prevent dangling pointers.
+        * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
         */
-       if (bp->ctx && bp->ctx->task)
+       if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
                bp->ctx->task->thread.last_hit_ubp = NULL;
 }
 
index dccc87e..3c5736e 100644 (file)
@@ -1768,9 +1768,9 @@ static inline unsigned long brk_rnd(void)
 
        /* 8MB for 32bit, 1GB for 64bit */
        if (is_32bit_task())
-               rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+               rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
        else
-               rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+               rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
 
        return rnd << PAGE_SHIFT;
 }
index 0762c1e..edb0991 100644 (file)
@@ -111,7 +111,13 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
         */
        if (!(old_pte & _PAGE_COMBO)) {
                flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
-               old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND;
+               /*
+                * clear the old slot details from the old and new pte.
+                * On hash insert failure we use old pte value and we don't
+                * want slot information there if we have a insert failure.
+                */
+               old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
+               new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
                goto htab_insert_hpte;
        }
        /*
index 49b152b..eb2accd 100644 (file)
@@ -78,9 +78,19 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                 * base page size. This is because demote_segment won't flush
                 * hash page table entries.
                 */
-               if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
+               if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) {
                        flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
                                            ssize, flags);
+                       /*
+                        * With THP, we also clear the slot information with
+                        * respect to all the 64K hash pte mapping the 16MB
+                        * page. They are all invalid now. This make sure we
+                        * don't find the slot valid when we fault with 4k
+                        * base page size.
+                        *
+                        */
+                       memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
+               }
        }
 
        valid = hpte_valid(hpte_slot_array, index);
index 7e6d088..83a8be7 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
 
+#include <asm/mmu.h>
+
 #ifdef CONFIG_PPC_FSL_BOOK3E
 #ifdef CONFIG_PPC64
 static inline int tlb1_next(void)
@@ -60,6 +62,14 @@ static inline void book3e_tlb_lock(void)
        unsigned long tmp;
        int token = smp_processor_id() + 1;
 
+       /*
+        * Besides being unnecessary in the absence of SMT, this
+        * check prevents trying to do lbarx/stbcx. on e5500 which
+        * doesn't implement either feature.
+        */
+       if (!cpu_has_feature(CPU_FTR_SMT))
+               return;
+
        asm volatile("1: lbarx %0, 0, %1;"
                     "cmpwi %0, 0;"
                     "bne 2f;"
@@ -80,6 +90,9 @@ static inline void book3e_tlb_unlock(void)
 {
        struct paca_struct *paca = get_paca();
 
+       if (!cpu_has_feature(CPU_FTR_SMT))
+               return;
+
        isync();
        paca->tcd_ptr->lock = 0;
 }
index 0f0502e..4087705 100644 (file)
@@ -59,9 +59,9 @@ unsigned long arch_mmap_rnd(void)
 
        /* 8MB for 32bit, 1GB for 64bit */
        if (is_32bit_task())
-               rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
+               rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
        else
-               rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
+               rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
 
        return rnd << PAGE_SHIFT;
 }
index ea91ddf..629c908 100644 (file)
@@ -40,6 +40,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
 static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
 {
        fpregs->pad = 0;
+       fpregs->fpc = fpu->fpc;
        if (MACHINE_HAS_VX)
                convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
        else
@@ -49,6 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
 
 static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
 {
+       fpu->fpc = fpregs->fpc;
        if (MACHINE_HAS_VX)
                convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
        else
index 66c9441..4af6037 100644 (file)
@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
 
        /* Restore high gprs from signal stack */
        if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
-                            sizeof(&sregs_ext->gprs_high)))
+                            sizeof(sregs_ext->gprs_high)))
                return -EFAULT;
        for (i = 0; i < NUM_GPRS; i++)
                *(__u32 *)&regs->gprs[i] = gprs_high[i];
index eaee146..8496a07 100644 (file)
@@ -24,7 +24,13 @@ LDFLAGS        := -m elf32_sparc
 export BITS    := 32
 UTS_MACHINE    := sparc
 
+# We are adding -Wa,-Av8 to KBUILD_CFLAGS to deal with a specs bug in some
+# versions of gcc.  Some gcc versions won't pass -Av8 to binutils when you
+# give -mcpu=v8.  This silently worked with older bintutils versions but
+# does not any more.
 KBUILD_CFLAGS  += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
+KBUILD_CFLAGS  += -Wa,-Av8
+
 KBUILD_AFLAGS  += -m32 -Wa,-Av8
 
 else
index 1c26d44..b6de8b1 100644 (file)
 #define __NR_listen            354
 #define __NR_setsockopt                355
 #define __NR_mlock2            356
+#define __NR_copy_file_range   357
 
-#define NR_syscalls            357
+#define NR_syscalls            358
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 33c02b1..a83707c 100644 (file)
@@ -948,7 +948,24 @@ linux_syscall_trace:
        cmp     %o0, 0
        bne     3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
+       sethi   %hi(sys_call_table), %l7
+       ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
+       or      %l7, %lo(sys_call_table), %l7
+       ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
+       ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
+       ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
+       ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
+       ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
+       cmp     %g1, NR_syscalls
+       bgeu    3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        mov     %i0, %o0
+       ld      [%l7 + %l4], %l7
        mov     %i1, %o1
        mov     %i2, %o2
        mov     %i3, %o3
index afbaba5..d127130 100644 (file)
@@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
        mov     %o1, %o4
        mov     HV_FAST_MACH_SET_WATCHDOG, %o5
        ta      HV_FAST_TRAP
+       brnz,a,pn %o4, 0f
        stx     %o1, [%o4]
-       retl
+0:     retl
         nop
 ENDPROC(sun4v_mach_set_watchdog)
 
index d88beff..39aaec1 100644 (file)
@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
        unsigned char fenab;
        int err;
 
-       flush_user_windows();
+       synchronize_user_stack();
        if (get_thread_wsaved()                                 ||
            (((unsigned long)ucp) & (sizeof(unsigned long)-1))  ||
            (!__access_ok(ucp, sizeof(*ucp))))
index a92d5d2..9e034f2 100644 (file)
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
 EXPORT_SYMBOL(sun4v_niagara_setperf);
 EXPORT_SYMBOL(sun4v_niagara2_getperf);
 EXPORT_SYMBOL(sun4v_niagara2_setperf);
+EXPORT_SYMBOL(sun4v_mach_set_watchdog);
 
 /* from hweight.S */
 EXPORT_SYMBOL(__arch_hweight8);
index c690c8e..b489e97 100644 (file)
@@ -264,7 +264,7 @@ static unsigned long mmap_rnd(void)
        unsigned long rnd = 0UL;
 
        if (current->flags & PF_RANDOMIZE) {
-               unsigned long val = get_random_int();
+               unsigned long val = get_random_long();
                if (test_thread_flag(TIF_32BIT))
                        rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
                else
index bb00089..c4a1b5c 100644 (file)
@@ -158,7 +158,25 @@ linux_syscall_trace32:
         add    %sp, PTREGS_OFF, %o0
        brnz,pn %o0, 3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ldx     [%sp + PTREGS_OFF + PT_V9_G1], %g1
+       sethi   %hi(sys_call_table32), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I0], %i0
+       or      %l7, %lo(sys_call_table32), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I1], %i1
+       ldx     [%sp + PTREGS_OFF + PT_V9_I2], %i2
+       ldx     [%sp + PTREGS_OFF + PT_V9_I3], %i3
+       ldx     [%sp + PTREGS_OFF + PT_V9_I4], %i4
+       ldx     [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+       cmp     %g1, NR_syscalls
+       bgeu,pn %xcc, 3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        srl     %i0, 0, %o0
+       lduw    [%l7 + %l4], %l7
        srl     %i4, 0, %o4
        srl     %i1, 0, %o1
        srl     %i2, 0, %o2
@@ -170,7 +188,25 @@ linux_syscall_trace:
         add    %sp, PTREGS_OFF, %o0
        brnz,pn %o0, 3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ldx     [%sp + PTREGS_OFF + PT_V9_G1], %g1
+       sethi   %hi(sys_call_table64), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I0], %i0
+       or      %l7, %lo(sys_call_table64), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I1], %i1
+       ldx     [%sp + PTREGS_OFF + PT_V9_I2], %i2
+       ldx     [%sp + PTREGS_OFF + PT_V9_I3], %i3
+       ldx     [%sp + PTREGS_OFF + PT_V9_I4], %i4
+       ldx     [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+       cmp     %g1, NR_syscalls
+       bgeu,pn %xcc, 3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        mov     %i0, %o0
+       lduw    [%l7 + %l4], %l7
        mov     %i1, %o1
        mov     %i2, %o2
        mov     %i3, %o3
index e663b6c..6c3dd6c 100644 (file)
@@ -88,4 +88,4 @@ sys_call_table:
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/        .long sys_setsockopt, sys_mlock2
+/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range
index 1557121..12b524c 100644 (file)
@@ -89,7 +89,7 @@ sys_call_table32:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word compat_sys_setsockopt, sys_mlock2
+       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@ sys_call_table:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word sys_setsockopt, sys_mlock2
+       .word sys_setsockopt, sys_mlock2, sys_copy_file_range
index 9bdf67a..b60a9f8 100644 (file)
@@ -12,6 +12,7 @@
 #include <skas.h>
 
 void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
 
 static void kill_off_processes(void)
 {
index fc8be0e..57acbd6 100644 (file)
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
        struct ksignal ksig;
        int handled_sig = 0;
 
-       if (get_signal(&ksig)) {
+       while (get_signal(&ksig)) {
                handled_sig = 1;
                /* Whee!  Actually deliver the signal.  */
                handle_signal(&ksig, regs);
index 77d8c51..bb3e376 100644 (file)
@@ -294,6 +294,7 @@ sysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
        pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
        pushfl                          /* pt_regs->flags (except IF = 0) */
+       ASM_CLAC                        /* Clear AC after saving FLAGS */
        orl     $X86_EFLAGS_IF, (%esp)  /* Fix IF */
        pushl   $__USER_CS              /* pt_regs->cs */
        pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
index ff1c6d6..3c990ee 100644 (file)
@@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat)
         * Interrupts are off on entry.
         */
        PARAVIRT_ADJUST_EXCEPTION_FRAME
+       ASM_CLAC                        /* Do this early to minimize exposure */
        SWAPGS
 
        /*
index 46873fb..d08eacd 100644 (file)
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
 extern int (*pcibios_enable_irq)(struct pci_dev *dev);
 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
 
+extern bool mp_should_keep_irq(struct device *dev);
+
 struct pci_raw_ops {
        int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
                                                int reg, int len, u32 *val);
index f5dcb52..3fe0eac 100644 (file)
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 
                switch (n) {
                case 1:
+                       __uaccess_begin();
                        __put_user_size(*(u8 *)from, (u8 __user *)to,
                                        1, ret, 1);
+                       __uaccess_end();
                        return ret;
                case 2:
+                       __uaccess_begin();
                        __put_user_size(*(u16 *)from, (u16 __user *)to,
                                        2, ret, 2);
+                       __uaccess_end();
                        return ret;
                case 4:
+                       __uaccess_begin();
                        __put_user_size(*(u32 *)from, (u32 __user *)to,
                                        4, ret, 4);
+                       __uaccess_end();
                        return ret;
                case 8:
+                       __uaccess_begin();
                        __put_user_size(*(u64 *)from, (u64 __user *)to,
                                        8, ret, 8);
+                       __uaccess_end();
                        return ret;
                }
        }
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 
                switch (n) {
                case 1:
+                       __uaccess_begin();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
                        return ret;
                case 2:
+                       __uaccess_begin();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
                        return ret;
                case 4:
+                       __uaccess_begin();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
                        return ret;
                }
        }
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 
                switch (n) {
                case 1:
+                       __uaccess_begin();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
                        return ret;
                case 2:
+                       __uaccess_begin();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
                        return ret;
                case 4:
+                       __uaccess_begin();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
                        return ret;
                }
        }
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
 
                switch (n) {
                case 1:
+                       __uaccess_begin();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
                        return ret;
                case 2:
+                       __uaccess_begin();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
                        return ret;
                case 4:
+                       __uaccess_begin();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
                        return ret;
                }
        }
index 968d57d..f320ee3 100644 (file)
@@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
 {
        if (xen_pci_frontend && xen_pci_frontend->enable_msi)
                return xen_pci_frontend->enable_msi(dev, vectors);
-       return -ENODEV;
+       return -ENOSYS;
 }
 static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
 {
@@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
 {
        if (xen_pci_frontend && xen_pci_frontend->enable_msix)
                return xen_pci_frontend->enable_msix(dev, vectors, nvec);
-       return -ENODEV;
+       return -ENOSYS;
 }
 static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
 {
index d1daead..adb3eaf 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/cacheflush.h>
 #include <asm/realmode.h>
 
+#include <linux/ftrace.h>
 #include "../../realmode/rm/wakeup.h"
 #include "sleep.h"
 
@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
        saved_magic = 0x123456789abcdef0L;
 #endif /* CONFIG_64BIT */
 
+       /*
+        * Pause/unpause graph tracing around do_suspend_lowlevel as it has
+        * inconsistent call/return info after it jumps to the wakeup vector.
+        */
+       pause_graph_tracing();
        do_suspend_lowlevel();
+       unpause_graph_tracing();
        return 0;
 }
 
index 1505587..b9b09fe 100644 (file)
@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
        u16 sel;
 
        la = seg_base(ctxt, addr.seg) + addr.ea;
-       *linear = la;
        *max_size = 0;
        switch (mode) {
        case X86EMUL_MODE_PROT64:
+               *linear = la;
                if (is_noncanonical_address(la))
                        goto bad;
 
@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                        goto bad;
                break;
        default:
+               *linear = la = (u32)la;
                usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
                                                addr.seg);
                if (!usable)
@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                        if (size > *max_size)
                                goto bad;
                }
-               la &= (u32)-1;
                break;
        }
        if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
index 6c9fed9..2ce4f05 100644 (file)
@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                        return ret;
 
                kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
-               walker->ptes[level] = pte;
+               walker->ptes[level - 1] = pte;
        }
        return 0;
 }
index e2951b6..0ff4537 100644 (file)
@@ -596,6 +596,8 @@ struct vcpu_vmx {
        /* Support for PML */
 #define PML_ENTITY_NUM         512
        struct page *pml_pg;
+
+       u64 current_tsc_ratio;
 };
 
 enum segment_cache_field {
@@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
 
-               /* Setup TSC multiplier */
-               if (cpu_has_vmx_tsc_scaling())
-                       vmcs_write64(TSC_MULTIPLIER,
-                                    vcpu->arch.tsc_scaling_ratio);
-
                vmx->loaded_vmcs->cpu = cpu;
        }
 
+       /* Setup TSC multiplier */
+       if (kvm_has_tsc_control &&
+           vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
+               vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+               vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+       }
+
        vmx_vcpu_pi_load(vcpu, cpu);
 }
 
index 4244c2b..eaf6ee8 100644 (file)
@@ -6618,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * KVM_DEBUGREG_WONT_EXIT again.
         */
        if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
-               int i;
-
                WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
                kvm_x86_ops->sync_dirty_debug_regs(vcpu);
-               for (i = 0; i < KVM_NR_DB_REGS; i++)
-                       vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+               kvm_update_dr0123(vcpu);
+               kvm_update_dr6(vcpu);
+               kvm_update_dr7(vcpu);
+               vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
        /*
index 96bd1e2..72bb52f 100644 (file)
@@ -71,12 +71,12 @@ unsigned long arch_mmap_rnd(void)
 
        if (mmap_is_ia32())
 #ifdef CONFIG_COMPAT
-               rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1);
+               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 #else
-               rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 #endif
        else
-               rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 
        return rnd << PAGE_SHIFT;
 }
index b2fd67d..ef05755 100644 (file)
@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
                break;
        }
 
-       if (regno > nr_registers) {
+       if (regno >= nr_registers) {
                WARN_ONCE(1, "decoded an instruction with an invalid register");
                return -EINVAL;
        }
index 2440814..9cf96d8 100644 (file)
@@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
 phys_addr_t slow_virt_to_phys(void *__virt_addr)
 {
        unsigned long virt_addr = (unsigned long)__virt_addr;
-       unsigned long phys_addr, offset;
+       phys_addr_t phys_addr;
+       unsigned long offset;
        enum pg_level level;
        pte_t *pte;
 
        pte = lookup_address(virt_addr, &level);
        BUG_ON(!pte);
 
+       /*
+        * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
+        * before being left-shifted PAGE_SHIFT bits -- this trick is to
+        * make 32-PAE kernel work correctly.
+        */
        switch (level) {
        case PG_LEVEL_1G:
-               phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+               phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
                offset = virt_addr & ~PUD_PAGE_MASK;
                break;
        case PG_LEVEL_2M:
-               phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+               phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
                offset = virt_addr & ~PMD_PAGE_MASK;
                break;
        default:
-               phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+               phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
                offset = virt_addr & ~PAGE_MASK;
        }
 
index 2879efc..d34b511 100644 (file)
@@ -711,28 +711,22 @@ int pcibios_add_device(struct pci_dev *dev)
        return 0;
 }
 
-int pcibios_alloc_irq(struct pci_dev *dev)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
 {
-       /*
-        * If the PCI device was already claimed by core code and has
-        * MSI enabled, probing of the pcibios IRQ will overwrite
-        * dev->irq.  So bail out if MSI is already enabled.
-        */
-       if (pci_dev_msi_enabled(dev))
-               return -EBUSY;
+       int err;
 
-       return pcibios_enable_irq(dev);
-}
+       if ((err = pci_enable_resources(dev, mask)) < 0)
+               return err;
 
-void pcibios_free_irq(struct pci_dev *dev)
-{
-       if (pcibios_disable_irq)
-               pcibios_disable_irq(dev);
+       if (!pci_dev_msi_enabled(dev))
+               return pcibios_enable_irq(dev);
+       return 0;
 }
 
-int pcibios_enable_device(struct pci_dev *dev, int mask)
+void pcibios_disable_device (struct pci_dev *dev)
 {
-       return pci_enable_resources(dev, mask);
+       if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
+               pcibios_disable_irq(dev);
 }
 
 int pci_ext_cfg_avail(void)
index 0d24e7c..8b93e63 100644 (file)
@@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
        int polarity;
        int ret;
 
-       if (pci_has_managed_irq(dev))
+       if (dev->irq_managed && dev->irq > 0)
                return 0;
 
        switch (intel_mid_identify_cpu()) {
@@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 
 static void intel_mid_pci_irq_disable(struct pci_dev *dev)
 {
-       if (pci_has_managed_irq(dev)) {
+       if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+           dev->irq > 0) {
                mp_unmap_irq(dev->irq);
                dev->irq_managed = 0;
-               /*
-                * Don't reset dev->irq here, otherwise
-                * intel_mid_pci_irq_enable() will fail on next call.
-                */
        }
 }
 
index 32e7034..9bd1154 100644 (file)
@@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
                        struct pci_dev *temp_dev;
                        int irq;
 
-                       if (pci_has_managed_irq(dev))
+                       if (dev->irq_managed && dev->irq > 0)
                                return 0;
 
                        irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
                        }
                        dev = temp_dev;
                        if (irq >= 0) {
-                               pci_set_managed_irq(dev, irq);
+                               dev->irq_managed = 1;
+                               dev->irq = irq;
                                dev_info(&dev->dev, "PCI->APIC IRQ transform: "
                                         "INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
                                return 0;
@@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
        return 0;
 }
 
+bool mp_should_keep_irq(struct device *dev)
+{
+       if (dev->power.is_prepared)
+               return true;
+#ifdef CONFIG_PM
+       if (dev->power.runtime_status == RPM_SUSPENDING)
+               return true;
+#endif
+
+       return false;
+}
+
 static void pirq_disable_irq(struct pci_dev *dev)
 {
-       if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) {
+       if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
+           dev->irq_managed && dev->irq) {
                mp_unmap_irq(dev->irq);
-               pci_reset_managed_irq(dev);
+               dev->irq = 0;
+               dev->irq_managed = 0;
        }
 }
index ff31ab4..beac4df 100644 (file)
@@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        return 0;
 
 error:
-       dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+       if (ret == -ENOSYS)
+               dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+       else if (ret)
+               dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
 free:
        kfree(v);
        return ret;
index c61b6c3..bfadcd0 100644 (file)
@@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
        end = (unsigned long)__end_rodata - 1;
 
        /*
-        * Setup a locked IMR around the physical extent of the kernel
+        * Setup an unlocked IMR around the physical extent of the kernel
         * from the beginning of the .text secton to the end of the
         * .rodata section as one physically contiguous block.
         *
         * We don't round up @size since it is already PAGE_SIZE aligned.
         * See vmlinux.lds.S for details.
         */
-       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
        if (ret < 0) {
                pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
                        size / 1024, start, end);
index 8502ad3..5adb6a2 100644 (file)
@@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
                exit(1);
        }
 
-       printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
+       printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
        printf("Locating the top of the address space ... ");
        fflush(stdout);
 
@@ -134,7 +134,7 @@ out:
                exit(1);
        }
        top <<= UM_KERN_PAGE_SHIFT;
-       printf("0x%x\n", top);
+       printf("0x%lx\n", top);
 
        return top;
 }
index 161491d..0363cd7 100644 (file)
@@ -88,6 +88,19 @@ config BLK_DEV_INTEGRITY
        T10/SCSI Data Integrity Field or the T13/ATA External Path
        Protection.  If in doubt, say N.
 
+config BLK_DEV_DAX
+       bool "Block device DAX support"
+       depends on FS_DAX
+       depends on BROKEN
+       help
+         When DAX support is available (CONFIG_FS_DAX) raw block
+         devices can also support direct userspace access to the
+         storage capacity via MMAP(2) similar to a file on a
+         DAX-enabled filesystem.  However, the DAX I/O-path disables
+         some standard I/O-statistics, and the MMAP(2) path has some
+         operational differences due to bypassing the page
+         cache.  If in doubt, say N.
+
 config BLK_DEV_THROTTLING
        bool "Block layer bio throttling support"
        depends on BLK_CGROUP=y
index f565e11..a54f054 100644 (file)
@@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
        return ret;
 }
 
+static int __blk_rq_map_user_iov(struct request *rq,
+               struct rq_map_data *map_data, struct iov_iter *iter,
+               gfp_t gfp_mask, bool copy)
+{
+       struct request_queue *q = rq->q;
+       struct bio *bio, *orig_bio;
+       int ret;
+
+       if (copy)
+               bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
+       else
+               bio = bio_map_user_iov(q, iter, gfp_mask);
+
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       if (map_data && map_data->null_mapped)
+               bio_set_flag(bio, BIO_NULL_MAPPED);
+
+       iov_iter_advance(iter, bio->bi_iter.bi_size);
+       if (map_data)
+               map_data->offset += bio->bi_iter.bi_size;
+
+       orig_bio = bio;
+       blk_queue_bounce(q, &bio);
+
+       /*
+        * We link the bounce buffer in and could have to traverse it
+        * later so we have to get a ref to prevent it from being freed
+        */
+       bio_get(bio);
+
+       ret = blk_rq_append_bio(q, rq, bio);
+       if (ret) {
+               bio_endio(bio);
+               __blk_rq_unmap_user(orig_bio);
+               bio_put(bio);
+               return ret;
+       }
+
+       return 0;
+}
+
 /**
  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  * @q:         request queue where request should be inserted
@@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        struct rq_map_data *map_data,
                        const struct iov_iter *iter, gfp_t gfp_mask)
 {
-       struct bio *bio;
-       int unaligned = 0;
-       struct iov_iter i;
        struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
+       bool copy = (q->dma_pad_mask & iter->count) || map_data;
+       struct bio *bio = NULL;
+       struct iov_iter i;
+       int ret;
 
        if (!iter || !iter->count)
                return -EINVAL;
@@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                 */
                if ((uaddr & queue_dma_alignment(q)) ||
                    iovec_gap_to_prv(q, &prv, &iov))
-                       unaligned = 1;
+                       copy = true;
 
                prv.iov_base = iov.iov_base;
                prv.iov_len = iov.iov_len;
        }
 
-       if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
-               bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
-       else
-               bio = bio_map_user_iov(q, iter, gfp_mask);
-
-       if (IS_ERR(bio))
-               return PTR_ERR(bio);
-
-       if (map_data && map_data->null_mapped)
-               bio_set_flag(bio, BIO_NULL_MAPPED);
-
-       if (bio->bi_iter.bi_size != iter->count) {
-               /*
-                * Grab an extra reference to this bio, as bio_unmap_user()
-                * expects to be able to drop it twice as it happens on the
-                * normal IO completion path
-                */
-               bio_get(bio);
-               bio_endio(bio);
-               __blk_rq_unmap_user(bio);
-               return -EINVAL;
-       }
+       i = *iter;
+       do {
+               ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
+               if (ret)
+                       goto unmap_rq;
+               if (!bio)
+                       bio = rq->bio;
+       } while (iov_iter_count(&i));
 
        if (!bio_flagged(bio, BIO_USER_MAPPED))
                rq->cmd_flags |= REQ_COPY_USER;
-
-       blk_queue_bounce(q, &bio);
-       bio_get(bio);
-       blk_rq_bio_prep(q, rq, bio);
        return 0;
+
+unmap_rq:
+       __blk_rq_unmap_user(bio);
+       rq->bio = NULL;
+       return -EINVAL;
 }
 EXPORT_SYMBOL(blk_rq_map_user_iov);
 
index 888a7fe..2613531 100644 (file)
@@ -304,7 +304,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
        struct bio_vec end_bv = { NULL }, nxt_bv;
-       struct bvec_iter iter;
 
        if (!blk_queue_cluster(q))
                return 0;
@@ -316,11 +315,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!bio_has_data(bio))
                return 1;
 
-       bio_for_each_segment(end_bv, bio, iter)
-               if (end_bv.bv_len == iter.bi_size)
-                       break;
-
-       nxt_bv = bio_iovec(nxt);
+       bio_get_last_bvec(bio, &end_bv);
+       bio_get_first_bvec(nxt, &nxt_bv);
 
        if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
                return 0;
index ad6d8c6..35947ac 100644 (file)
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
        nfit_mem->bdw = NULL;
 }
 
-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
                struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
 {
        u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
        struct nfit_memdev *nfit_memdev;
        struct nfit_flush *nfit_flush;
-       struct nfit_dcr *nfit_dcr;
        struct nfit_bdw *nfit_bdw;
        struct nfit_idt *nfit_idt;
        u16 idt_idx, range_index;
 
-       list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
-               if (nfit_dcr->dcr->region_index != dcr)
-                       continue;
-               nfit_mem->dcr = nfit_dcr->dcr;
-               break;
-       }
-
-       if (!nfit_mem->dcr) {
-               dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
-                               spa->range_index, __to_nfit_memdev(nfit_mem)
-                               ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
-               return -ENODEV;
-       }
-
-       /*
-        * We've found enough to create an nvdimm, optionally
-        * find an associated BDW
-        */
-       list_add(&nfit_mem->list, &acpi_desc->dimms);
-
        list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
                if (nfit_bdw->bdw->region_index != dcr)
                        continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
        }
 
        if (!nfit_mem->bdw)
-               return 0;
+               return;
 
        nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
 
        if (!nfit_mem->spa_bdw)
-               return 0;
+               return;
 
        range_index = nfit_mem->spa_bdw->range_index;
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
                }
                break;
        }
-
-       return 0;
 }
 
 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
        struct nfit_mem *nfit_mem, *found;
        struct nfit_memdev *nfit_memdev;
        int type = nfit_spa_type(spa);
-       u16 dcr;
 
        switch (type) {
        case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
        }
 
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
-               int rc;
+               struct nfit_dcr *nfit_dcr;
+               u32 device_handle;
+               u16 dcr;
 
                if (nfit_memdev->memdev->range_index != spa->range_index)
                        continue;
                found = NULL;
                dcr = nfit_memdev->memdev->region_index;
+               device_handle = nfit_memdev->memdev->device_handle;
                list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
-                       if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
+                       if (__to_nfit_memdev(nfit_mem)->device_handle
+                                       == device_handle) {
                                found = nfit_mem;
                                break;
                        }
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
                        if (!nfit_mem)
                                return -ENOMEM;
                        INIT_LIST_HEAD(&nfit_mem->list);
+                       list_add(&nfit_mem->list, &acpi_desc->dimms);
+               }
+
+               list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+                       if (nfit_dcr->dcr->region_index != dcr)
+                               continue;
+                       /*
+                        * Record the control region for the dimm.  For
+                        * the ACPI 6.1 case, where there are separate
+                        * control regions for the pmem vs blk
+                        * interfaces, be sure to record the extended
+                        * blk details.
+                        */
+                       if (!nfit_mem->dcr)
+                               nfit_mem->dcr = nfit_dcr->dcr;
+                       else if (nfit_mem->dcr->windows == 0
+                                       && nfit_dcr->dcr->windows)
+                               nfit_mem->dcr = nfit_dcr->dcr;
+                       break;
+               }
+
+               if (dcr && !nfit_mem->dcr) {
+                       dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+                                       spa->range_index, dcr);
+                       return -ENODEV;
                }
 
                if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
                                nfit_mem->idt_dcr = nfit_idt->idt;
                                break;
                        }
+                       nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
                } else {
                        /*
                         * A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
                         */
                        nfit_mem->memdev_pmem = nfit_memdev->memdev;
                }
-
-               if (found)
-                       continue;
-
-               rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
-               if (rc)
-                       return rc;
        }
 
        return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
                case 1:
                        /* ARS unsupported, but we should never get here */
                        return 0;
-               case 2:
-                       return -EINVAL;
-               case 3:
+               case 6:
                        /* ARS is in progress */
                        msleep(1000);
                        break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
 }
 
 static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
-               struct nd_cmd_ars_status *cmd)
+               struct nd_cmd_ars_status *cmd, u32 size)
 {
        int rc;
 
        while (1) {
                rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
-                       sizeof(*cmd));
+                       size);
                if (rc || cmd->status & 0xffff)
                        return -ENXIO;
 
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
                case 2:
                        /* No ARS performed for the current boot */
                        return 0;
+               case 3:
+                       /* TODO: error list overflow support */
                default:
                        return -ENXIO;
                }
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
        struct nd_cmd_ars_start *ars_start = NULL;
        struct nd_cmd_ars_cap *ars_cap = NULL;
        u64 start, len, cur, remaining;
+       u32 ars_status_size;
        int rc;
 
        ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
        start = ndr_desc->res->start;
        len = ndr_desc->res->end - ndr_desc->res->start + 1;
 
+       /*
+        * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
+        * Scrub' flag in extended status is not set, skip this but continue
+        * initialization
+        */
        rc = ars_get_cap(nd_desc, ars_cap, start, len);
+       if (rc == -ENOTTY) {
+               dev_dbg(acpi_desc->dev,
+                       "Address Range Scrub is not implemented, won't create an error list\n");
+               rc = 0;
+               goto out;
+       }
        if (rc)
                goto out;
 
-       /*
-        * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
-        * extended status is not set, skip this but continue initialization
-        */
        if ((ars_cap->status & 0xffff) ||
                !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
                dev_warn(acpi_desc->dev,
@@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
         * Check if a full-range ARS has been run. If so, use those results
         * without having to start a new ARS.
         */
-       ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status),
-                       GFP_KERNEL);
+       ars_status_size = ars_cap->max_ars_out;
+       ars_status = kzalloc(ars_status_size, GFP_KERNEL);
        if (!ars_status) {
                rc = -ENOMEM;
                goto out;
        }
 
-       rc = ars_get_status(nd_desc, ars_status);
+       rc = ars_get_status(nd_desc, ars_status, ars_status_size);
        if (rc)
                goto out;
 
@@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
                if (rc)
                        goto out;
 
-               rc = ars_get_status(nd_desc, ars_status);
+               rc = ars_get_status(nd_desc, ars_status, ars_status_size);
                if (rc)
                        goto out;
 
index d30184c..c8e169e 100644 (file)
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
                return 0;
        }
 
-       if (pci_has_managed_irq(dev))
+       if (dev->irq_managed && dev->irq > 0)
                return 0;
 
        entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
                kfree(entry);
                return rc;
        }
-       pci_set_managed_irq(dev, rc);
+       dev->irq = rc;
+       dev->irq_managed = 1;
 
        if (link)
                snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        u8 pin;
 
        pin = dev->pin;
-       if (!pin || !pci_has_managed_irq(dev))
+       if (!pin || !dev->irq_managed || dev->irq <= 0)
                return;
 
+       /* Keep IOAPIC pin configuration when suspending */
+       if (dev->dev.power.is_prepared)
+               return;
+#ifdef CONFIG_PM
+       if (dev->dev.power.runtime_status == RPM_SUSPENDING)
+               return;
+#endif
+
        entry = acpi_pci_irq_lookup(dev, pin);
        if (!entry)
                return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
        if (gsi >= 0) {
                acpi_unregister_gsi(gsi);
-               pci_reset_managed_irq(dev);
+               dev->irq_managed = 0;
        }
 }
index fa28635..ededa90 100644 (file)
@@ -4,7 +4,6 @@
  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  *  Copyright (C) 2002       Dominik Brodowski <devel@brodo.de>
- *  Copyright (c) 2015, The Linux Foundation. All rights reserved.
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
  * enabled system.
  */
 
+#define ACPI_MAX_IRQS          256
 #define ACPI_MAX_ISA_IRQ       16
 
 #define PIRQ_PENALTY_PCI_AVAILABLE     (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
 #define PIRQ_PENALTY_ISA_USED          (16*16*16*16*16)
 #define PIRQ_PENALTY_ISA_ALWAYS                (16*16*16*16*16*16)
 
-static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
+static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
        PIRQ_PENALTY_ISA_ALWAYS,        /* IRQ0 timer */
        PIRQ_PENALTY_ISA_ALWAYS,        /* IRQ1 keyboard */
        PIRQ_PENALTY_ISA_ALWAYS,        /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
        PIRQ_PENALTY_ISA_USED,          /* IRQ13 fpe, sometimes */
        PIRQ_PENALTY_ISA_USED,          /* IRQ14 ide0 */
        PIRQ_PENALTY_ISA_USED,          /* IRQ15 ide1 */
+       /* >IRQ15 */
 };
 
-struct irq_penalty_info {
-       int irq;
-       int penalty;
-       struct list_head node;
-};
-
-static LIST_HEAD(acpi_irq_penalty_list);
-
-static int acpi_irq_get_penalty(int irq)
-{
-       struct irq_penalty_info *irq_info;
-
-       if (irq < ACPI_MAX_ISA_IRQ)
-               return acpi_irq_isa_penalty[irq];
-
-       list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
-               if (irq_info->irq == irq)
-                       return irq_info->penalty;
-       }
-
-       return 0;
-}
-
-static int acpi_irq_set_penalty(int irq, int new_penalty)
-{
-       struct irq_penalty_info *irq_info;
-
-       /* see if this is a ISA IRQ */
-       if (irq < ACPI_MAX_ISA_IRQ) {
-               acpi_irq_isa_penalty[irq] = new_penalty;
-               return 0;
-       }
-
-       /* next, try to locate from the dynamic list */
-       list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
-               if (irq_info->irq == irq) {
-                       irq_info->penalty  = new_penalty;
-                       return 0;
-               }
-       }
-
-       /* nope, let's allocate a slot for this IRQ */
-       irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
-       if (!irq_info)
-               return -ENOMEM;
-
-       irq_info->irq = irq;
-       irq_info->penalty = new_penalty;
-       list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
-
-       return 0;
-}
-
-static void acpi_irq_add_penalty(int irq, int penalty)
-{
-       int curpen = acpi_irq_get_penalty(irq);
-
-       acpi_irq_set_penalty(irq, curpen + penalty);
-}
-
 int __init acpi_irq_penalty_init(void)
 {
        struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
                            link->irq.possible_count;
 
                        for (i = 0; i < link->irq.possible_count; i++) {
-                               if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) {
-                                       int irqpos = link->irq.possible[i];
-
-                                       acpi_irq_add_penalty(irqpos, penalty);
-                               }
+                               if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
+                                       acpi_irq_penalty[link->irq.
+                                                        possible[i]] +=
+                                           penalty;
                        }
 
                } else if (link->irq.active) {
-                       acpi_irq_add_penalty(link->irq.active,
-                                            PIRQ_PENALTY_PCI_POSSIBLE);
+                       acpi_irq_penalty[link->irq.active] +=
+                           PIRQ_PENALTY_PCI_POSSIBLE;
                }
        }
 
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                 * the use of IRQs 9, 10, 11, and >15.
                 */
                for (i = (link->irq.possible_count - 1); i >= 0; i--) {
-                       if (acpi_irq_get_penalty(irq) >
-                           acpi_irq_get_penalty(link->irq.possible[i]))
+                       if (acpi_irq_penalty[irq] >
+                           acpi_irq_penalty[link->irq.possible[i]])
                                irq = link->irq.possible[i];
                }
        }
-       if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
+       if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
                printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
                            "Try pci=noacpi or acpi=off\n",
                            acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                            acpi_device_bid(link->device));
                return -ENODEV;
        } else {
-               acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING);
-
+               acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
                printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
                       acpi_device_name(link->device),
                       acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
 }
 
 /*
- * modify penalty from cmdline
+ * modify acpi_irq_penalty[] from cmdline
  */
 static int __init acpi_irq_penalty_update(char *str, int used)
 {
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
                if (irq < 0)
                        continue;
 
+               if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+                       continue;
+
                if (used)
-                       acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED);
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
                else
-                       acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE);
+                       acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
 
                if (retval != 2)        /* no next number */
                        break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
  */
 void acpi_penalize_isa_irq(int irq, int active)
 {
-       if (irq >= 0)
-               acpi_irq_add_penalty(irq, active ?
-                       PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
+       if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+               if (active)
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+               else
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+       }
 }
 
 bool acpi_isa_irq_available(int irq)
 {
-       return irq >= 0 &&
-               (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
+       return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+                           acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
 }
 
 /*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
  */
 void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
 {
-       int penalty;
-
-       if (irq < 0)
-               return;
-
-       if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
-           polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
-               penalty = PIRQ_PENALTY_ISA_ALWAYS;
-       else
-               penalty = PIRQ_PENALTY_PCI_USING;
-
-       acpi_irq_add_penalty(irq, penalty);
+       if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+               if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+                   polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+               else
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+       }
 }
 
 /*
index a39e85f..7d00b7a 100644 (file)
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
 
-                       ptr += sizeof(void *);
+                       ptr += sizeof(cookie);
                        list_for_each_entry(w, &proc->delivered_death, entry) {
                                struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
 
index 546a369..146dc0b 100644 (file)
@@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
        { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
        { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
        { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1325,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+#ifdef CONFIG_ARM64
+/*
+ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
+ * Workaround is to make sure all pending IRQs are served before leaving
+ * handler.
+ */
+static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
+{
+       struct ata_host *host = dev_instance;
+       struct ahci_host_priv *hpriv;
+       unsigned int rc = 0;
+       void __iomem *mmio;
+       u32 irq_stat, irq_masked;
+       unsigned int handled = 1;
+
+       VPRINTK("ENTER\n");
+       hpriv = host->private_data;
+       mmio = hpriv->mmio;
+       irq_stat = readl(mmio + HOST_IRQ_STAT);
+       if (!irq_stat)
+               return IRQ_NONE;
+
+       do {
+               irq_masked = irq_stat & hpriv->port_map;
+               spin_lock(&host->lock);
+               rc = ahci_handle_port_intr(host, irq_masked);
+               if (!rc)
+                       handled = 0;
+               writel(irq_stat, mmio + HOST_IRQ_STAT);
+               irq_stat = readl(mmio + HOST_IRQ_STAT);
+               spin_unlock(&host->lock);
+       } while (irq_stat);
+       VPRINTK("EXIT\n");
+
+       return IRQ_RETVAL(handled);
+}
+#endif
+
 /*
  * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
  * to single msi.
@@ -1560,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ahci_broken_devslp(pdev))
                hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
 
+#ifdef CONFIG_ARM64
+       if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
+               hpriv->irq_handler = ahci_thunderx_irq_handler;
+#endif
+
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
index a44c75d..167ba7e 100644 (file)
@@ -240,8 +240,7 @@ enum {
                                                        error-handling stage) */
        AHCI_HFLAG_NO_DEVSLP            = (1 << 17), /* no device sleep */
        AHCI_HFLAG_NO_FBS               = (1 << 18), /* no FBS */
-       AHCI_HFLAG_EDGE_IRQ             = (1 << 19), /* HOST_IRQ_STAT behaves as
-                                                       Edge Triggered */
+
 #ifdef CONFIG_PCI_MSI
        AHCI_HFLAG_MULTI_MSI            = (1 << 20), /* multiple PCI MSIs */
        AHCI_HFLAG_MULTI_MSIX           = (1 << 21), /* per-port MSI-X */
@@ -361,6 +360,7 @@ struct ahci_host_priv {
         * be overridden anytime before the host is activated.
         */
        void                    (*start_engine)(struct ata_port *ap);
+       irqreturn_t             (*irq_handler)(int irq, void *dev_instance);
 };
 
 #ifdef CONFIG_PCI_MSI
@@ -424,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
 int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
 void ahci_error_handler(struct ata_port *ap);
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
                                             unsigned int port_no)
index e2c6d9e..8e3f7fa 100644 (file)
@@ -548,6 +548,88 @@ softreset_retry:
        return rc;
 }
 
+/**
+ * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
+ * @ata_host: Host that recieved the irq
+ * @irq_masked: HOST_IRQ_STAT value
+ *
+ * For hardware with broken edge trigger latch
+ * the HOST_IRQ_STAT register misses the edge interrupt
+ * when clearing of HOST_IRQ_STAT register and hardware
+ * reporting the PORT_IRQ_STAT register at the
+ * same clock cycle.
+ * As such, the algorithm below outlines the workaround.
+ *
+ * 1. Read HOST_IRQ_STAT register and save the state.
+ * 2. Clear the HOST_IRQ_STAT register.
+ * 3. Read back the HOST_IRQ_STAT register.
+ * 4. If HOST_IRQ_STAT register equals to zero, then
+ *    traverse the rest of port's PORT_IRQ_STAT register
+ *    to check if an interrupt is triggered at that point else
+ *    go to step 6.
+ * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
+ *    then update the state of HOST_IRQ_STAT saved in step 1.
+ * 6. Handle port interrupts.
+ * 7. Exit
+ */
+static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
+                                            u32 irq_masked)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *port_mmio;
+       int i;
+
+       if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
+               for (i = 0; i < host->n_ports; i++) {
+                       if (irq_masked & (1 << i))
+                               continue;
+
+                       port_mmio = ahci_port_base(host->ports[i]);
+                       if (readl(port_mmio + PORT_IRQ_STAT))
+                               irq_masked |= (1 << i);
+               }
+       }
+
+       return ahci_handle_port_intr(host, irq_masked);
+}
+
+static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
+{
+       struct ata_host *host = dev_instance;
+       struct ahci_host_priv *hpriv;
+       unsigned int rc = 0;
+       void __iomem *mmio;
+       u32 irq_stat, irq_masked;
+
+       VPRINTK("ENTER\n");
+
+       hpriv = host->private_data;
+       mmio = hpriv->mmio;
+
+       /* sigh.  0xffffffff is a valid return from h/w */
+       irq_stat = readl(mmio + HOST_IRQ_STAT);
+       if (!irq_stat)
+               return IRQ_NONE;
+
+       irq_masked = irq_stat & hpriv->port_map;
+
+       spin_lock(&host->lock);
+
+       /*
+        * HOST_IRQ_STAT behaves as edge triggered latch meaning that
+        * it should be cleared before all the port events are cleared.
+        */
+       writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+       rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
+
+       spin_unlock(&host->lock);
+
+       VPRINTK("EXIT\n");
+
+       return IRQ_RETVAL(rc);
+}
+
 static struct ata_port_operations xgene_ahci_v1_ops = {
        .inherits = &ahci_ops,
        .host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
                hpriv->flags = AHCI_HFLAG_NO_NCQ;
                break;
        case XGENE_AHCI_V2:
-               hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ;
+               hpriv->flags |= AHCI_HFLAG_YES_FBS;
+               hpriv->irq_handler = xgene_ahci_irq_intr;
                break;
        default:
                break;
index 4029679..85ea514 100644 (file)
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
                                    const char *buf, size_t size);
 static ssize_t ahci_show_em_supported(struct device *dev,
                                      struct device_attribute *attr, char *buf);
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
 
 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
 
        if (!hpriv->start_engine)
                hpriv->start_engine = ahci_start_engine;
+
+       if (!hpriv->irq_handler)
+               hpriv->irq_handler = ahci_single_level_irq_intr;
 }
 EXPORT_SYMBOL_GPL(ahci_save_initial_config);
 
@@ -1164,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
 
        /* mark esata ports */
        tmp = readl(port_mmio + PORT_CMD);
-       if ((tmp & PORT_CMD_HPCP) ||
-           ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
+       if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
                ap->pflags |= ATA_PFLAG_EXTERNAL;
 }
 
@@ -1846,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
 {
        unsigned int i, handled = 0;
 
@@ -1872,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
 
        return handled;
 }
-
-static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
-{
-       struct ata_host *host = dev_instance;
-       struct ahci_host_priv *hpriv;
-       unsigned int rc = 0;
-       void __iomem *mmio;
-       u32 irq_stat, irq_masked;
-
-       VPRINTK("ENTER\n");
-
-       hpriv = host->private_data;
-       mmio = hpriv->mmio;
-
-       /* sigh.  0xffffffff is a valid return from h/w */
-       irq_stat = readl(mmio + HOST_IRQ_STAT);
-       if (!irq_stat)
-               return IRQ_NONE;
-
-       irq_masked = irq_stat & hpriv->port_map;
-
-       spin_lock(&host->lock);
-
-       /*
-        * HOST_IRQ_STAT behaves as edge triggered latch meaning that
-        * it should be cleared before all the port events are cleared.
-        */
-       writel(irq_stat, mmio + HOST_IRQ_STAT);
-
-       rc = ahci_handle_port_intr(host, irq_masked);
-
-       spin_unlock(&host->lock);
-
-       VPRINTK("EXIT\n");
-
-       return IRQ_RETVAL(rc);
-}
+EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
 
 static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
 {
@@ -2535,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
        int irq = hpriv->irq;
        int rc;
 
-       if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX))
+       if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+               if (hpriv->irq_handler)
+                       dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
+                                and custom irq handler implemented\n");
+
                rc = ahci_host_activate_multi_irqs(host, sht);
-       else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ)
-               rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr,
-                                      IRQF_SHARED, sht);
-       else
-               rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
+       } else {
+               rc = ata_host_activate(host, irq, hpriv->irq_handler,
                                       IRQF_SHARED, sht);
+       }
+
+
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_host_activate);
index 7e959f9..e417e1a 100644 (file)
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
                     int cmd, void __user *arg)
 {
-       int val = -EINVAL, rc = -EINVAL;
+       unsigned long val;
+       int rc = -EINVAL;
        unsigned long flags;
 
        switch (cmd) {
-       case ATA_IOC_GET_IO32:
+       case HDIO_GET_32BIT:
                spin_lock_irqsave(ap->lock, flags);
                val = ata_ioc32(ap);
                spin_unlock_irqrestore(ap->lock, flags);
-               if (copy_to_user(arg, &val, 1))
-                       return -EFAULT;
-               return 0;
+               return put_user(val, (unsigned long __user *)arg);
 
-       case ATA_IOC_SET_IO32:
+       case HDIO_SET_32BIT:
                val = (unsigned long) arg;
                rc = 0;
                spin_lock_irqsave(ap->lock, flags);
index 12fe0f3..c8b6a78 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/libata.h>
 #include <scsi/scsi_host.h>
 
+#include <asm/mach-rc32434/rb.h>
+
 #define DRV_NAME       "pata-rb532-cf"
 #define DRV_VERSION    "0.1.0"
 #define DRV_DESC       "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
        int gpio;
        struct resource *res;
        struct ata_host *ah;
+       struct cf_device *pdata;
        struct rb532_cf_info *info;
        int ret;
 
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       gpio = irq_to_gpio(irq);
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               dev_err(&pdev->dev, "no platform data specified\n");
+               return -EINVAL;
+       }
+
+       gpio = pdata->gpio_pin;
        if (gpio < 0) {
                dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
                return -ENOENT;
index d0da5d8..b583e53 100644 (file)
@@ -1818,6 +1818,28 @@ unsigned int get_random_int(void)
 }
 EXPORT_SYMBOL(get_random_int);
 
+/*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+       __u32 *hash;
+       unsigned long ret;
+
+       if (arch_get_random_long(&ret))
+               return ret;
+
+       hash = get_cpu_var(get_random_int_hash);
+
+       hash[0] += current->pid + jiffies + random_get_entropy();
+       md5_transform(hash, random_int_secret);
+       ret = *(unsigned long *)hash;
+       put_cpu_var(get_random_int_hash);
+
+       return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
 /*
  * randomize_range() returns a start address such that
  *
index 1c30038..cc73929 100644 (file)
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
 
        parent = clk_hw_get_parent(hw);
 
-       if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
+       if (clk_hw_get_rate(hw) ==
+           clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
                WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
                r = _omap3_noncore_dpll_bypass(clk);
        } else {
index 659879a..f935110 100644 (file)
@@ -296,6 +296,7 @@ endif
 config QORIQ_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
        depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+       depends on !CPU_THERMAL || THERMAL
        select CLK_QORIQ
        help
          This adds the CPUFreq driver support for Freescale QorIQ SoCs
index 0031069..14b1f93 100644 (file)
@@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
          SoCs.
 
 config ARM_MT8173_CPUFREQ
-       bool "Mediatek MT8173 CPUFreq support"
+       tristate "Mediatek MT8173 CPUFreq support"
        depends on ARCH_MEDIATEK && REGULATOR
        depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
-       depends on !CPU_THERMAL || THERMAL=y
+       depends on !CPU_THERMAL || THERMAL
        select PM_OPP
        help
          This adds the CPUFreq driver support for Mediatek MT8173 SoC.
index 1efba34..2058e6d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/cpu_cooling.h>
 #include <linux/cpufreq.h>
 #include <linux/cpumask.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
index 848b93e..fe9dce0 100644 (file)
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
        clk_set_min_rate(tegra->emc_clock, rate);
        clk_set_rate(tegra->emc_clock, 0);
 
+       *freq = rate;
+
        return 0;
 }
 
index f2a0310..debca82 100644 (file)
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
                (PXA_DCMD_LENGTH & sizeof(u32));
        if (flags & DMA_PREP_INTERRUPT)
                updater->dcmd |= PXA_DCMD_ENDIRQEN;
+       if (sw_desc->cyclic)
+               sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
 }
 
 static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
                dev_dbg(&chan->vc.chan.dev->device,
                        "%s(): checking txd %p[%x]: completed=%d\n",
                        __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+               if (to_pxad_sw_desc(vd)->cyclic) {
+                       vchan_cyclic_callback(vd);
+                       break;
+               }
                if (is_desc_completed(vd)) {
                        list_del(&vd->node);
                        vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
                return NULL;
 
        pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
-       dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
+       dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
        dev_dbg(&chan->vc.chan.dev->device,
                "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
                __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
index cf41440..d9ab0cd 100644 (file)
@@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
+static void gpio_rcar_irq_bus_lock(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+       pm_runtime_get_sync(&p->pdev->dev);
+}
+
+static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+       pm_runtime_put(&p->pdev->dev);
+}
+
+
+static int gpio_rcar_irq_request_resources(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+       int error;
+
+       error = pm_runtime_get_sync(&p->pdev->dev);
+       if (error < 0)
+               return error;
+
+       return 0;
+}
+
+static void gpio_rcar_irq_release_resources(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+       pm_runtime_put(&p->pdev->dev);
+}
+
 static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
 {
        struct gpio_rcar_priv *p = dev_id;
@@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        irq_chip->irq_unmask = gpio_rcar_irq_enable;
        irq_chip->irq_set_type = gpio_rcar_irq_set_type;
        irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
+       irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
+       irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
+       irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
+       irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
        irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
 
        ret = gpiochip_add_data(gpio_chip, p);
index 89c3dd6..119cdc2 100644 (file)
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
                        } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
                                /* Don't try to start link training before we
                                 * have the dpcd */
-                               if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+                               if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
                                        return;
 
                                /* set it to OFF so that drm_helper_connector_dpms()
index acd066d..8297bc3 100644 (file)
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 
        struct drm_crtc *crtc = &amdgpuCrtc->base;
        unsigned long flags;
-       unsigned i;
-       int vpos, hpos, stat, min_udelay;
+       unsigned i, repcnt = 4;
+       int vpos, hpos, stat, min_udelay = 0;
        struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
        amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
         * In practice this won't execute very often unless on very fast
         * machines because the time window for this to happen is very small.
         */
-       for (;;) {
+       while (amdgpuCrtc->enabled && repcnt--) {
                /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
                 * start in hpos, and to the "fudged earlier" vblank start in
                 * vpos.
@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
                /* Sleep at least until estimated real start of hw vblank */
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+               if (min_udelay > vblank->framedur_ns / 2000) {
+                       /* Don't wait ridiculously long - something is wrong */
+                       repcnt = 0;
+                       break;
+               }
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
        };
 
+       if (!repcnt)
+               DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+                                "framedur %d, linedur %d, stat %d, vpos %d, "
+                                "hpos %d\n", work->crtc_id, min_udelay,
+                                vblank->framedur_ns / 1000,
+                                vblank->linedur_ns / 1000, stat, vpos, hpos);
+
        /* do the flip (mmio) */
        adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
        /* set the flip status */
index 7380f78..d20c2a8 100644 (file)
@@ -596,7 +596,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                break;
        }
        ttm_eu_backoff_reservation(&ticket, &list);
-       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
+       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
+           !amdgpu_vm_debug)
                amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
 
        drm_gem_object_unreference_unlocked(gobj);
index 7d8d84e..95a4a25 100644 (file)
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
 
+       if  ((adev->flags & AMD_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        if (adev->pp_enabled) {
                enum amd_dpm_forced_level level;
 
@@ -140,6 +144,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
        enum amdgpu_dpm_forced_level level;
        int ret = 0;
 
+       /* Can't force performance level when the card is off */
+       if  ((adev->flags & AMD_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = AMDGPU_DPM_FORCED_LEVEL_LOW;
        } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
                mutex_lock(&adev->pm.mutex);
                if (adev->pm.dpm.thermal_active) {
                        count = -EINVAL;
+                       mutex_unlock(&adev->pm.mutex);
                        goto fail;
                }
                ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
                mutex_unlock(&adev->pm.mutex);
        }
 fail:
-       mutex_unlock(&adev->pm.mutex);
-
        return count;
 }
 
@@ -182,8 +190,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      char *buf)
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
+       struct drm_device *ddev = adev->ddev;
        int temp;
 
+       /* Can't get temperature when the card is off */
+       if  ((adev->flags & AMD_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
                temp = 0;
        else
@@ -634,11 +648,6 @@ force:
 
        /* update display watermarks based on new power state */
        amdgpu_display_bandwidth_update(adev);
-       /* update displays */
-       amdgpu_dpm_display_configuration_changed(adev);
-
-       adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-       adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
 
        /* wait for the rings to drain */
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -655,6 +664,12 @@ force:
 
        amdgpu_dpm_post_set_power_state(adev);
 
+       /* update displays */
+       amdgpu_dpm_display_configuration_changed(adev);
+
+       adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+       adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
        if (adev->pm.funcs->force_performance_level) {
                if (adev->pm.dpm.thermal_active) {
                        enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
+       struct drm_device *ddev = adev->ddev;
 
        if (!adev->pm.dpm_enabled) {
                seq_printf(m, "dpm not enabled\n");
                return 0;
        }
-       if (adev->pp_enabled) {
+       if  ((adev->flags & AMD_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               seq_printf(m, "PX asic powered off\n");
+       } else if (adev->pp_enabled) {
                amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
        } else {
                mutex_lock(&adev->pm.mutex);
index b9d0d55..3cb6d6c 100644 (file)
@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
                                        adev->powerplay.pp_handle);
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
-       if (adev->pp_enabled)
+       if (adev->pp_enabled) {
                amdgpu_pm_sysfs_init(adev);
+               amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
+       }
 #endif
        return ret;
 }
index 9056355..e7ef226 100644 (file)
@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
                                                            AMD_PG_STATE_GATE);
 
                                cz_enable_vce_dpm(adev, false);
-                               /* TODO: to figure out why vce can't be poweroff. */
-                               /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
+                               cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
                                pi->vce_power_gated = true;
                        } else {
                                cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
                }
        } else { /*pi->caps_vce_pg*/
                cz_update_vce_dpm(adev);
-               cz_enable_vce_dpm(adev, true);
+               cz_enable_vce_dpm(adev, !gate);
        }
-
-       return;
 }
 
 const struct amd_ip_funcs cz_dpm_ip_funcs = {
index 7732059..06602df 100644 (file)
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                        unsigned vm_id, uint64_t pd_addr)
 {
        int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+                                WAIT_REG_MEM_FUNCTION(3) | /* equal */
+                                WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, 0xffffffff);
+       amdgpu_ring_write(ring, 4); /* poll interval */
+
        if (usepfp) {
                /* synce CE with ME to prevent CE fetch CEIB before context switch done */
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
index 8f8ec37..7086ac1 100644 (file)
@@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
        amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
-                WAIT_REG_MEM_FUNCTION(3))); /* equal */
+                                WAIT_REG_MEM_FUNCTION(3) | /* equal */
+                                WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
        amdgpu_ring_write(ring, addr & 0xfffffffc);
        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
        amdgpu_ring_write(ring, seq);
@@ -4995,7 +4996,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
        case AMDGPU_IRQ_STATE_ENABLE:
                cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
                cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-                                           PRIV_REG_INT_ENABLE, 0);
+                                           PRIV_REG_INT_ENABLE, 1);
                WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
                break;
        default:
index aa67244..589599f 100644 (file)
@@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
 
                data.requested_ui_label = power_state_convert(ps);
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+               break;
        }
-       break;
+       case AMD_PP_EVENT_COMPLETE_INIT:
+               ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+               break;
        default:
                break;
        }
index 83be3cf..6b52c78 100644 (file)
@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
 };
 
 static const pem_event_action *complete_init_event[] = {
+       unblock_adjust_power_state_tasks,
        adjust_power_state_tasks,
        enable_gfx_clock_gating_tasks,
        enable_gfx_voltage_island_power_gating_tasks,
index 52a3efc..46410e3 100644 (file)
@@ -31,7 +31,7 @@
 static int pem_init(struct pp_eventmgr *eventmgr)
 {
        int result = 0;
-       struct pem_event_data event_data;
+       struct pem_event_data event_data = { {0} };
 
        /* Initialize PowerPlay feature info */
        pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
 
 static void pem_fini(struct pp_eventmgr *eventmgr)
 {
-       struct pem_event_data event_data;
+       struct pem_event_data event_data = { {0} };
 
        pem_uninit_featureInfo(eventmgr);
        pem_unregister_interrupts(eventmgr);
index ad77008..ff08ce4 100644 (file)
@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                }
        } else {
                cz_dpm_update_vce_dpm(hwmgr);
-               cz_enable_disable_vce_dpm(hwmgr, true);
+               cz_enable_disable_vce_dpm(hwmgr, !bgate);
                return 0;
        }
 
index 9759009..b1480ac 100644 (file)
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
        } while (ast_read32(ast, 0x10000) != 0x01);
        data = ast_read32(ast, 0x10004);
 
-       if (data & 0x400)
+       if (data & 0x40)
                ast->dram_bus_width = 16;
        else
                ast->dram_bus_width = 32;
index 0fc38bb..cf39ed3 100644 (file)
@@ -825,8 +825,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                }
 
                for_each_pipe(dev_priv, pipe) {
-                       if (!intel_display_power_is_enabled(dev_priv,
-                                               POWER_DOMAIN_PIPE(pipe))) {
+                       enum intel_display_power_domain power_domain;
+
+                       power_domain = POWER_DOMAIN_PIPE(pipe);
+                       if (!intel_display_power_get_if_enabled(dev_priv,
+                                                               power_domain)) {
                                seq_printf(m, "Pipe %c power disabled\n",
                                           pipe_name(pipe));
                                continue;
@@ -840,6 +843,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                        seq_printf(m, "Pipe %c IER:\t%08x\n",
                                   pipe_name(pipe),
                                   I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+                       intel_display_power_put(dev_priv, power_domain);
                }
 
                seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
        struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
                                                                        pipe));
+       enum intel_display_power_domain power_domain;
        u32 val = 0; /* shut up gcc */
        int ret;
 
@@ -3995,7 +4001,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
        if (pipe_crc->source && source)
                return -EINVAL;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
                DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
                return -EIO;
        }
@@ -4012,7 +4019,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
 
        if (ret != 0)
-               return ret;
+               goto out;
 
        /* none -> real source transition */
        if (source) {
@@ -4024,8 +4031,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
                                  sizeof(pipe_crc->entries[0]),
                                  GFP_KERNEL);
-               if (!entries)
-                       return -ENOMEM;
+               if (!entries) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
                /*
                 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                hsw_enable_ips(crtc);
        }
 
-       return 0;
+       ret = 0;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 /*
index e7cd311..b0847b9 100644 (file)
@@ -751,6 +751,7 @@ struct intel_csr {
        uint32_t mmio_count;
        i915_reg_t mmioaddr[8];
        uint32_t mmiodata[8];
+       uint32_t dc_state;
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
index 9c89df1..a7b4a52 100644 (file)
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        enum intel_display_power_domain power_domain;
        u32 tmp;
+       bool ret;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       ret = false;
+
        tmp = I915_READ(crt->adpa_reg);
 
        if (!(tmp & ADPA_DAC_ENABLE))
-               return false;
+               goto out;
 
        if (HAS_PCH_CPT(dev))
                *pipe = PORT_TO_PIPE_CPT(tmp);
        else
                *pipe = PORT_TO_PIPE(tmp);
 
-       return true;
+       ret = true;
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
index 9bb63a8..647d85e 100644 (file)
@@ -240,6 +240,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
                I915_WRITE(dev_priv->csr.mmioaddr[i],
                           dev_priv->csr.mmiodata[i]);
        }
+
+       dev_priv->csr.dc_state = 0;
 }
 
 static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
index 54a165b..0f3df2c 100644 (file)
@@ -1969,13 +1969,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
        enum transcoder cpu_transcoder;
        enum intel_display_power_domain power_domain;
        uint32_t tmp;
+       bool ret;
 
        power_domain = intel_display_port_power_domain(intel_encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
-               return false;
+       if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+               ret = false;
+               goto out;
+       }
 
        if (port == PORT_A)
                cpu_transcoder = TRANSCODER_EDP;
@@ -1987,23 +1990,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
        switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
        case TRANS_DDI_MODE_SELECT_HDMI:
        case TRANS_DDI_MODE_SELECT_DVI:
-               return (type == DRM_MODE_CONNECTOR_HDMIA);
+               ret = type == DRM_MODE_CONNECTOR_HDMIA;
+               break;
 
        case TRANS_DDI_MODE_SELECT_DP_SST:
-               if (type == DRM_MODE_CONNECTOR_eDP)
-                       return true;
-               return (type == DRM_MODE_CONNECTOR_DisplayPort);
+               ret = type == DRM_MODE_CONNECTOR_eDP ||
+                     type == DRM_MODE_CONNECTOR_DisplayPort;
+               break;
+
        case TRANS_DDI_MODE_SELECT_DP_MST:
                /* if the transcoder is in MST state then
                 * connector isn't connected */
-               return false;
+               ret = false;
+               break;
 
        case TRANS_DDI_MODE_SELECT_FDI:
-               return (type == DRM_MODE_CONNECTOR_VGA);
+               ret = type == DRM_MODE_CONNECTOR_VGA;
+               break;
 
        default:
-               return false;
+               ret = false;
+               break;
        }
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2015,15 +2028,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
        enum intel_display_power_domain power_domain;
        u32 tmp;
        int i;
+       bool ret;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       ret = false;
+
        tmp = I915_READ(DDI_BUF_CTL(port));
 
        if (!(tmp & DDI_BUF_CTL_ENABLE))
-               return false;
+               goto out;
 
        if (port == PORT_A) {
                tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2041,25 +2057,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
                        break;
                }
 
-               return true;
-       } else {
-               for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
-                       tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+               ret = true;
 
-                       if ((tmp & TRANS_DDI_PORT_MASK)
-                           == TRANS_DDI_SELECT_PORT(port)) {
-                               if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
-                                       return false;
+               goto out;
+       }
 
-                               *pipe = i;
-                               return true;
-                       }
+       for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+               tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+               if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
+                       if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+                           TRANS_DDI_MODE_SELECT_DP_MST)
+                               goto out;
+
+                       *pipe = i;
+                       ret = true;
+
+                       goto out;
                }
        }
 
        DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
 
-       return false;
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2508,12 +2531,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(WRPLL_CTL(pll->id));
        hw_state->wrpll = val;
 
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
        return val & WRPLL_PLL_ENABLE;
 }
 
@@ -2523,12 +2548,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(SPLL_CTL);
        hw_state->spll = val;
 
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
        return val & SPLL_PLL_ENABLE;
 }
 
@@ -2645,16 +2672,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
        uint32_t val;
        unsigned int dpll;
        const struct skl_dpll_regs *regs = skl_dpll_regs;
+       bool ret;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
+       ret = false;
+
        /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
        dpll = pll->id + 1;
 
        val = I915_READ(regs[pll->id].ctl);
        if (!(val & LCPLL_PLL_ENABLE))
-               return false;
+               goto out;
 
        val = I915_READ(DPLL_CTRL1);
        hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2664,8 +2694,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
                hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
        }
+       ret = true;
 
-       return true;
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+       return ret;
 }
 
 static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2932,13 +2966,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        enum port port = (enum port)pll->id;    /* 1:1 port->PLL mapping */
        uint32_t val;
+       bool ret;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
+       ret = false;
+
        val = I915_READ(BXT_PORT_PLL_ENABLE(port));
        if (!(val & PORT_PLL_ENABLE))
-               return false;
+               goto out;
 
        hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
        hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2985,7 +3022,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
                                 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
        hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
-       return true;
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+       return ret;
 }
 
 static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3120,11 +3162,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
 {
        u32 temp;
 
-       if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+       if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
                temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+
+               intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
                if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
                        return true;
        }
+
        return false;
 }
 
index 5feb657..46947ff 100644 (file)
@@ -1351,18 +1351,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
        bool cur_state;
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
+       enum intel_display_power_domain power_domain;
 
        /* if we need the pipe quirk it must be always on */
        if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
                state = true;
 
-       if (!intel_display_power_is_enabled(dev_priv,
-                               POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
-               cur_state = false;
-       } else {
+       power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+       if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
                u32 val = I915_READ(PIPECONF(cpu_transcoder));
                cur_state = !!(val & PIPECONF_ENABLE);
+
+               intel_display_power_put(dev_priv, power_domain);
+       } else {
+               cur_state = false;
        }
 
        I915_STATE_WARN(cur_state != state,
@@ -8171,18 +8174,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
        uint32_t tmp;
+       bool ret;
 
-       if (!intel_display_power_is_enabled(dev_priv,
-                                           POWER_DOMAIN_PIPE(crtc->pipe)))
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
+       ret = false;
+
        tmp = I915_READ(PIPECONF(crtc->pipe));
        if (!(tmp & PIPECONF_ENABLE))
-               return false;
+               goto out;
 
        if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
                switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        pipe_config->base.adjusted_mode.crtc_clock =
                pipe_config->port_clock / pipe_config->pixel_multiplier;
 
-       return true;
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_display_power_domain power_domain;
        uint32_t tmp;
+       bool ret;
 
-       if (!intel_display_power_is_enabled(dev_priv,
-                                           POWER_DOMAIN_PIPE(crtc->pipe)))
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
+       ret = false;
        tmp = I915_READ(PIPECONF(crtc->pipe));
        if (!(tmp & PIPECONF_ENABLE))
-               return false;
+               goto out;
 
        switch (tmp & PIPECONF_BPC_MASK) {
        case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
        ironlake_get_pfit_config(crtc, pipe_config);
 
-       return true;
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum intel_display_power_domain pfit_domain;
+       enum intel_display_power_domain power_domain;
+       unsigned long power_domain_mask;
        uint32_t tmp;
+       bool ret;
 
-       if (!intel_display_power_is_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
+       power_domain_mask = BIT(power_domain);
+
+       ret = false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
        }
 
-       if (!intel_display_power_is_enabled(dev_priv,
-                       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
-               return false;
+       power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               goto out;
+       power_domain_mask |= BIT(power_domain);
 
        tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
        if (!(tmp & PIPECONF_ENABLE))
-               return false;
+               goto out;
 
        haswell_get_ddi_port_state(crtc, pipe_config);
 
@@ -9998,14 +10024,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                skl_init_scalers(dev, crtc, pipe_config);
        }
 
-       pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-
        if (INTEL_INFO(dev)->gen >= 9) {
                pipe_config->scaler_state.scaler_id = -1;
                pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
        }
 
-       if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+       power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+       if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+               power_domain_mask |= BIT(power_domain);
                if (INTEL_INFO(dev)->gen >= 9)
                        skylake_get_pfit_config(crtc, pipe_config);
                else
@@ -10023,7 +10049,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                pipe_config->pixel_multiplier = 1;
        }
 
-       return true;
+       ret = true;
+
+out:
+       for_each_power_domain(power_domain, power_domain_mask)
+               intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -13630,7 +13662,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(PCH_DPLL(pll->id));
@@ -13638,6 +13670,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
        hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
        hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
 
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
        return val & DPLL_VCO_ENABLE;
 }
 
@@ -15568,10 +15602,12 @@ void i915_redisable_vga(struct drm_device *dev)
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
+       if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
                return;
 
        i915_redisable_vga_power_on(dev);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
 }
 
 static bool primary_get_hw_state(struct intel_plane *plane)
index 1bbd67b..1d8de43 100644 (file)
@@ -2362,15 +2362,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum intel_display_power_domain power_domain;
        u32 tmp;
+       bool ret;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       ret = false;
+
        tmp = I915_READ(intel_dp->output_reg);
 
        if (!(tmp & DP_PORT_EN))
-               return false;
+               goto out;
 
        if (IS_GEN7(dev) && port == PORT_A) {
                *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                        u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
                        if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
                                *pipe = p;
-                               return true;
+                               ret = true;
+
+                               goto out;
                        }
                }
 
@@ -2393,7 +2398,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                *pipe = PORT_TO_PIPE(tmp);
        }
 
-       return true;
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static void intel_dp_get_config(struct intel_encoder *encoder,
index ea54158..df7f3cb 100644 (file)
@@ -1428,6 +1428,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
                                      enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+                                       enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
 
@@ -1514,6 +1516,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
        enable_rpm_wakeref_asserts(dev_priv)
 
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
 
index 44742fa..0193c62 100644 (file)
@@ -664,13 +664,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        enum intel_display_power_domain power_domain;
        enum port port;
+       bool ret;
 
        DRM_DEBUG_KMS("\n");
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       ret = false;
+
        /* XXX: this only works for one DSI output */
        for_each_dsi_port(port, intel_dsi->ports) {
                i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
                        if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
                                *pipe = port == PORT_A ? PIPE_A : PIPE_B;
-                               return true;
+                               ret = true;
+
+                               goto out;
                        }
                }
        }
+out:
+       intel_display_power_put(dev_priv, power_domain);
 
-       return false;
+       return ret;
 }
 
 static void intel_dsi_get_config(struct intel_encoder *encoder,
index 4a77639..cb5d1b1 100644 (file)
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        enum intel_display_power_domain power_domain;
        u32 tmp;
+       bool ret;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       ret = false;
+
        tmp = I915_READ(intel_hdmi->hdmi_reg);
 
        if (!(tmp & SDVO_ENABLE))
-               return false;
+               goto out;
 
        if (HAS_PCH_CPT(dev))
                *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
        else
                *pipe = PORT_TO_PIPE(tmp);
 
-       return true;
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static void intel_hdmi_get_config(struct intel_encoder *encoder,
index 0da0240..bc04d8d 100644 (file)
@@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        enum intel_display_power_domain power_domain;
        u32 tmp;
+       bool ret;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_is_enabled(dev_priv, power_domain))
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
+       ret = false;
+
        tmp = I915_READ(lvds_encoder->reg);
 
        if (!(tmp & LVDS_PORT_EN))
-               return false;
+               goto out;
 
        if (HAS_PCH_CPT(dev))
                *pipe = PORT_TO_PIPE_CPT(tmp);
        else
                *pipe = PORT_TO_PIPE(tmp);
 
-       return true;
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 static void intel_lvds_get_config(struct intel_encoder *encoder,
index a234687..b28c29f 100644 (file)
@@ -2829,7 +2829,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
        memset(ddb, 0, sizeof(*ddb));
 
        for_each_pipe(dev_priv, pipe) {
-               if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
+               enum intel_display_power_domain power_domain;
+
+               power_domain = POWER_DOMAIN_PIPE(pipe);
+               if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                        continue;
 
                for_each_plane(dev_priv, pipe, plane) {
@@ -2841,6 +2844,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                val = I915_READ(CUR_BUF_CFG(pipe));
                skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
                                           val);
+
+               intel_display_power_put(dev_priv, power_domain);
        }
 }
 
index ddbdbff..4f43d9b 100644 (file)
@@ -470,6 +470,43 @@ static void gen9_set_dc_state_debugmask_memory_up(
        }
 }
 
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+                               u32 state)
+{
+       int rewrites = 0;
+       int rereads = 0;
+       u32 v;
+
+       I915_WRITE(DC_STATE_EN, state);
+
+       /* It has been observed that disabling the dc6 state sometimes
+        * doesn't stick and dmc keeps returning old value. Make sure
+        * the write really sticks enough times and also force rewrite until
+        * we are confident that state is exactly what we want.
+        */
+       do  {
+               v = I915_READ(DC_STATE_EN);
+
+               if (v != state) {
+                       I915_WRITE(DC_STATE_EN, state);
+                       rewrites++;
+                       rereads = 0;
+               } else if (rereads++ > 5) {
+                       break;
+               }
+
+       } while (rewrites < 100);
+
+       if (v != state)
+               DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+                         state, v);
+
+       /* Most of the times we need one retry, avoid spam */
+       if (rewrites > 1)
+               DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+                             state, rewrites);
+}
+
 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
 {
        uint32_t val;
@@ -494,10 +531,18 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
        val = I915_READ(DC_STATE_EN);
        DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
                      val & mask, state);
+
+       /* Check if DMC is ignoring our DC state requests */
+       if ((val & mask) != dev_priv->csr.dc_state)
+               DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+                         dev_priv->csr.dc_state, val & mask);
+
        val &= ~mask;
        val |= state;
-       I915_WRITE(DC_STATE_EN, val);
-       POSTING_READ(DC_STATE_EN);
+
+       gen9_write_dc_state(dev_priv, val);
+
+       dev_priv->csr.dc_state = val & mask;
 }
 
 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
        chv_set_pipe_power_well(dev_priv, power_well, false);
 }
 
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+                                enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       for_each_power_well(i, power_well, BIT(domain), power_domains) {
+               if (!power_well->count++)
+                       intel_power_well_enable(dev_priv, power_well);
+       }
+
+       power_domains->domain_use_count[domain]++;
+}
+
 /**
  * intel_display_power_get - grab a power domain reference
  * @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
 void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain)
 {
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       int i;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
        intel_runtime_pm_get(dev_priv);
 
-       power_domains = &dev_priv->power_domains;
+       mutex_lock(&power_domains->lock);
+
+       __intel_display_power_get_domain(dev_priv, domain);
+
+       mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+                                       enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       bool is_enabled;
+
+       if (!intel_runtime_pm_get_if_in_use(dev_priv))
+               return false;
 
        mutex_lock(&power_domains->lock);
 
-       for_each_power_well(i, power_well, BIT(domain), power_domains) {
-               if (!power_well->count++)
-                       intel_power_well_enable(dev_priv, power_well);
+       if (__intel_display_power_is_enabled(dev_priv, domain)) {
+               __intel_display_power_get_domain(dev_priv, domain);
+               is_enabled = true;
+       } else {
+               is_enabled = false;
        }
 
-       power_domains->domain_use_count[domain]++;
-
        mutex_unlock(&power_domains->lock);
+
+       if (!is_enabled)
+               intel_runtime_pm_put(dev_priv);
+
+       return is_enabled;
 }
 
 /**
@@ -2213,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
  */
 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
 {
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-               skl_display_core_uninit(dev_priv);
-
        /*
         * Even if power well support was disabled we still want to disable
         * power wells while we are system suspended.
         */
        if (!i915.disable_power_well)
                intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               skl_display_core_uninit(dev_priv);
 }
 
 /**
@@ -2245,6 +2335,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
        assert_rpm_wakelock_held(dev_priv);
 }
 
+/**
+ * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference if the device is
+ * already in use and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (IS_ENABLED(CONFIG_PM)) {
+               int ret = pm_runtime_get_if_in_use(device);
+
+               /*
+                * In cases runtime PM is disabled by the RPM core and we get
+                * an -EINVAL return value we are not supposed to call this
+                * function, since the power state is undefined. This applies
+                * atm to the late/early system suspend/resume handlers.
+                */
+               WARN_ON_ONCE(ret < 0);
+               if (ret <= 0)
+                       return false;
+       }
+
+       atomic_inc(&dev_priv->pm.wakeref_count);
+       assert_rpm_wakelock_held(dev_priv);
+
+       return true;
+}
+
 /**
  * intel_runtime_pm_get_noresume - grab a runtime pm reference
  * @dev_priv: i915 device instance
index 8a70cec..2dfe58a 100644 (file)
@@ -24,7 +24,7 @@
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
        const struct nvkm_device_tegra_func *func;
-       struct nvkm_device *device;
+       struct nvkm_device *device = NULL;
        struct drm_device *drm;
        int ret;
 
index 7f8a427..e7e581d 100644 (file)
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 
        if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
                return -ENOMEM;
-       *pdevice = &tdev->device;
+
        tdev->func = func;
        tdev->pdev = pdev;
        tdev->irq = -1;
 
        tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
-       if (IS_ERR(tdev->vdd))
-               return PTR_ERR(tdev->vdd);
+       if (IS_ERR(tdev->vdd)) {
+               ret = PTR_ERR(tdev->vdd);
+               goto free;
+       }
 
        tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
-       if (IS_ERR(tdev->rst))
-               return PTR_ERR(tdev->rst);
+       if (IS_ERR(tdev->rst)) {
+               ret = PTR_ERR(tdev->rst);
+               goto free;
+       }
 
        tdev->clk = devm_clk_get(&pdev->dev, "gpu");
-       if (IS_ERR(tdev->clk))
-               return PTR_ERR(tdev->clk);
+       if (IS_ERR(tdev->clk)) {
+               ret = PTR_ERR(tdev->clk);
+               goto free;
+       }
 
        tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
-       if (IS_ERR(tdev->clk_pwr))
-               return PTR_ERR(tdev->clk_pwr);
+       if (IS_ERR(tdev->clk_pwr)) {
+               ret = PTR_ERR(tdev->clk_pwr);
+               goto free;
+       }
 
        nvkm_device_tegra_probe_iommu(tdev);
 
        ret = nvkm_device_tegra_power_up(tdev);
        if (ret)
-               return ret;
+               goto remove;
 
        tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
        ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
                               cfg, dbg, detect, mmio, subdev_mask,
                               &tdev->device);
        if (ret)
-               return ret;
+               goto powerdown;
+
+       *pdevice = &tdev->device;
 
        return 0;
+
+powerdown:
+       nvkm_device_tegra_power_down(tdev);
+remove:
+       nvkm_device_tegra_remove_iommu(tdev);
+free:
+       kfree(tdev);
+       return ret;
 }
 #else
 int
index 74e2f7c..9688970 100644 (file)
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
                .outp = outp,
        }, *dp = &_dp;
        u32 datarate = 0;
+       u8  pwr;
        int ret;
 
        if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
        /* disable link interrupt handling during link training */
        nvkm_notify_put(&outp->irq);
 
+       /* ensure sink is not in a low-power state */
+       if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+               if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+                       pwr &= ~DPCD_SC00_SET_POWER;
+                       pwr |=  DPCD_SC00_SET_POWER_D0;
+                       nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+               }
+       }
+
        /* enable down-spreading and execute pre-train script from vbios */
        dp_link_train_init(dp, outp->dpcd[3] & 0x01);
 
index 9596290..6e10c5e 100644 (file)
 #define DPCD_LS0C_LANE1_POST_CURSOR2                                       0x0c
 #define DPCD_LS0C_LANE0_POST_CURSOR2                                       0x03
 
+/* DPCD Sink Control */
+#define DPCD_SC00                                                       0x00600
+#define DPCD_SC00_SET_POWER                                                0x03
+#define DPCD_SC00_SET_POWER_D0                                             0x01
+#define DPCD_SC00_SET_POWER_D3                                             0x03
+
 void nvkm_dp_train(struct work_struct *);
 #endif
index 902b59c..4197ca1 100644 (file)
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
-       drm_helper_hpd_irq_event(dev);
 
        /* set the power state here in case we are a PX system or headless */
        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
index 298ea1c..2b9ba03 100644 (file)
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct drm_crtc *crtc = &radeon_crtc->base;
        unsigned long flags;
        int r;
-       int vpos, hpos, stat, min_udelay;
+       int vpos, hpos, stat, min_udelay = 0;
+       unsigned repcnt = 4;
        struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
         down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
         * In practice this won't execute very often unless on very fast
         * machines because the time window for this to happen is very small.
         */
-       for (;;) {
+       while (radeon_crtc->enabled && repcnt--) {
                /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
                 * start in hpos, and to the "fudged earlier" vblank start in
                 * vpos.
@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
                /* Sleep at least until estimated real start of hw vblank */
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+               if (min_udelay > vblank->framedur_ns / 2000) {
+                       /* Don't wait ridiculously long - something is wrong */
+                       repcnt = 0;
+                       break;
+               }
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
        };
 
+       if (!repcnt)
+               DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+                                "framedur %d, linedur %d, stat %d, vpos %d, "
+                                "hpos %d\n", work->crtc_id, min_udelay,
+                                vblank->framedur_ns / 1000,
+                                vblank->linedur_ns / 1000, stat, vpos, hpos);
+
        /* do the flip (mmio) */
        radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
 
index 248c5a9..0f14d89 100644 (file)
@@ -1079,12 +1079,6 @@ force:
 
        /* update display watermarks based on new power state */
        radeon_bandwidth_update(rdev);
-       /* update displays */
-       radeon_dpm_display_configuration_changed(rdev);
-
-       rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
-       rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
-       rdev->pm.dpm.single_display = single_display;
 
        /* wait for the rings to drain */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1101,6 +1095,13 @@ force:
 
        radeon_dpm_post_set_power_state(rdev);
 
+       /* update displays */
+       radeon_dpm_display_configuration_changed(rdev);
+
+       rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+       rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+       rdev->pm.dpm.single_display = single_display;
+
        if (rdev->asic->dpm.force_performance_level) {
                if (rdev->pm.dpm.thermal_active) {
                        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
index da462af..dd2dbb9 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/host1x.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/of_device.h>
 
 #include "bus.h"
 #include "dev.h"
@@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
        device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
        device->dev.dma_mask = &device->dev.coherent_dma_mask;
        dev_set_name(&device->dev, "%s", driver->driver.name);
+       of_dma_configure(&device->dev, host1x->dev->of_node);
        device->dev.release = host1x_device_release;
        device->dev.bus = &host1x_bus_type;
        device->dev.parent = host1x->dev;
index 314bf37..ff34869 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/of_device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/dma-mapping.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/host1x.h>
@@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
        .nb_bases       = 8,
        .init           = host1x01_init,
        .sync_offset    = 0x3000,
+       .dma_mask       = DMA_BIT_MASK(32),
 };
 
 static const struct host1x_info host1x02_info = {
@@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
        .nb_bases = 12,
        .init = host1x02_init,
        .sync_offset = 0x3000,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static const struct host1x_info host1x04_info = {
@@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
        .nb_bases = 64,
        .init = host1x04_init,
        .sync_offset = 0x2100,
+       .dma_mask = DMA_BIT_MASK(34),
 };
 
 static const struct host1x_info host1x05_info = {
@@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
        .nb_bases = 64,
        .init = host1x05_init,
        .sync_offset = 0x2100,
+       .dma_mask = DMA_BIT_MASK(34),
 };
 
 static struct of_device_id host1x_of_match[] = {
@@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
        if (IS_ERR(host->regs))
                return PTR_ERR(host->regs);
 
+       dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+
        if (host->info->init) {
                err = host->info->init(host);
                if (err)
index 0b6e8e9..dace124 100644 (file)
@@ -96,6 +96,7 @@ struct host1x_info {
        int     nb_mlocks;              /* host1x: number of mlocks */
        int     (*init)(struct host1x *); /* initialize per SoC ops */
        int     sync_offset;
+       u64     dma_mask;               /* mask of addressable memory */
 };
 
 struct host1x {
index 3711df1..4a45408 100644 (file)
@@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
        if (!dev)
                return -ENOMEM;
 
-       dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
-                                      GFP_KERNEL);
+       dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
        if (!dev->bsc_regmap)
                return -ENOMEM;
 
index 00da80e..94b80a5 100644 (file)
@@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
        ret = device->query_device(device, &device->attrs, &uhw);
        if (ret) {
                printk(KERN_WARNING "Couldn't query the device attributes\n");
+               ib_cache_cleanup_one(device);
                goto out;
        }
 
index f334090..1e37f35 100644 (file)
@@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
                }
        }
 
-       if (rec->hop_limit > 1 || use_roce) {
+       if (rec->hop_limit > 0 || use_roce) {
                ah_attr->ah_flags = IB_AH_GRH;
                ah_attr->grh.dgid = rec->dgid;
 
index 6ffc9c4..6c6fbff 100644 (file)
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
                   resp_size);
        INIT_UDATA(&uhw, buf + sizeof(cmd),
                   (unsigned long)cmd.response + resp_size,
-                  in_len - sizeof(cmd), out_len - resp_size);
+                  in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - resp_size);
 
        memset(&cmd_ex, 0, sizeof(cmd_ex));
        cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
 
        INIT_UDATA(&udata, buf + sizeof cmd,
                   (unsigned long) cmd.response + sizeof resp,
-                  in_len - sizeof cmd, out_len - sizeof resp);
+                  in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - sizeof resp);
 
        ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
        if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
 
        INIT_UDATA(&udata, buf + sizeof cmd,
                   (unsigned long) cmd.response + sizeof resp,
-                  in_len - sizeof cmd, out_len - sizeof resp);
+                  in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - sizeof resp);
 
        ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
        if (ret)
index 4659256..3b2ddd6 100644 (file)
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
 
 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
                           struct mlx5_create_srq_mbox_in **in,
-                          struct ib_udata *udata, int buf_size, int *inlen)
+                          struct ib_udata *udata, int buf_size, int *inlen,
+                          int is_xrc)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
        int ncont;
        u32 offset;
        u32 uidx = MLX5_IB_DEFAULT_UIDX;
-       int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
 
-       if (drv_data < 0)
-               return -EINVAL;
-
-       ucmdlen = (drv_data < sizeof(ucmd)) ?
-                 drv_data : sizeof(ucmd);
+       ucmdlen = min(udata->inlen, sizeof(ucmd));
 
        if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
                mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
        if (ucmd.reserved0 || ucmd.reserved1)
                return -EINVAL;
 
-       if (drv_data > sizeof(ucmd) &&
+       if (udata->inlen > sizeof(ucmd) &&
            !ib_is_udata_cleared(udata, sizeof(ucmd),
-                                drv_data - sizeof(ucmd)))
+                                udata->inlen - sizeof(ucmd)))
                return -EINVAL;
 
-       err = get_srq_user_index(to_mucontext(pd->uobject->context),
-                                &ucmd, udata->inlen, &uidx);
-       if (err)
-               return err;
+       if (is_xrc) {
+               err = get_srq_user_index(to_mucontext(pd->uobject->context),
+                                        &ucmd, udata->inlen, &uidx);
+               if (err)
+                       return err;
+       }
 
        srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
 
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
        (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
        (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
 
-       if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+       if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+            is_xrc){
                xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
                                     xrc_srq_context_entry);
                MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
 
 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
                             struct mlx5_create_srq_mbox_in **in, int buf_size,
-                            int *inlen)
+                            int *inlen, int is_xrc)
 {
        int err;
        int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
 
        (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 
-       if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+       if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+            is_xrc){
                xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
                                     xrc_srq_context_entry);
                /* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
                    srq->msrq.max_avail_gather);
 
+       is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+
        if (pd->uobject)
-               err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+               err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
+                                     is_xrc);
        else
-               err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+               err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
+                                       is_xrc);
 
        if (err) {
                mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                goto err_srq;
        }
 
-       is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
        in->ctx.state_log_sz = ilog2(srq->msrq.max);
        flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
        xrcdn = 0;
index e5e2239..374c129 100644 (file)
@@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
 
 static void update_domain(struct protection_domain *domain);
 static int protection_domain_init(struct protection_domain *domain);
+static void detach_device(struct device *dev);
 
 /*
  * For dynamic growth the aperture size is split into ranges of 128MB of
@@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
        if (!dev_data)
                return;
 
+       if (dev_data->domain)
+               detach_device(dev);
+
        iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
                            dev);
 
index 013bdff..bf4959f 100644 (file)
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
 static int __init iommu_go_to_state(enum iommu_init_state state);
 static void init_device_table_dma(void);
 
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+                                   u8 bank, u8 cntr, u8 fxn,
+                                   u64 *value, bool is_write);
+
 static inline void update_last_devid(u16 devid)
 {
        if (devid > amd_iommu_last_bdf)
@@ -1015,6 +1019,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
        pci_write_config_dword(iommu->dev, 0xf0, 0x90);
 }
 
+/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ *     BIOS should enable ATS write permission check by setting
+ *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+       u32 value;
+
+       if ((boot_cpu_data.x86 != 0x15) ||
+           (boot_cpu_data.x86_model < 0x30) ||
+           (boot_cpu_data.x86_model > 0x3f))
+               return;
+
+       /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+       value = iommu_read_l2(iommu, 0x47);
+
+       if (value & BIT(0))
+               return;
+
+       /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+       iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+       pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
+               dev_name(&iommu->dev->dev));
+}
+
 /*
  * This function clues the initialization function for one IOMMU
  * together and also allocates the command buffer and programs the
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
        amd_iommu_pc_present = true;
 
        /* Check if the performance counters can be written to */
-       if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
-           (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
+       if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
+           (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
            (val != val2)) {
                pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
                amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        }
 
        amd_iommu_erratum_746_workaround(iommu);
+       amd_iommu_ats_write_check_workaround(iommu);
 
        iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
                                               amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
 }
 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
 
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+                                   u8 bank, u8 cntr, u8 fxn,
                                    u64 *value, bool is_write)
 {
-       struct amd_iommu *iommu;
        u32 offset;
        u32 max_offset_lim;
 
-       /* Make sure the IOMMU PC resource is available */
-       if (!amd_iommu_pc_present)
-               return -ENODEV;
-
-       /* Locate the iommu associated with the device ID */
-       iommu = amd_iommu_rlookup_table[devid];
-
        /* Check for valid iommu and pc register indexing */
-       if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
+       if (WARN_ON((fxn > 0x28) || (fxn & 7)))
                return -ENODEV;
 
        offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
        return 0;
 }
 EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
+
+int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+                                   u64 *value, bool is_write)
+{
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+       /* Make sure the IOMMU PC resource is available */
+       if (!amd_iommu_pc_present || iommu == NULL)
+               return -ENODEV;
+
+       return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
+                                       value, is_write);
+}
index fb092f3..8ffd756 100644 (file)
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
        /* Only care about add/remove events for physical functions */
        if (pdev->is_virtfn)
                return NOTIFY_DONE;
-       if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+       if (action != BUS_NOTIFY_ADD_DEVICE &&
+           action != BUS_NOTIFY_REMOVED_DEVICE)
                return NOTIFY_DONE;
 
        info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
        down_write(&dmar_global_lock);
        if (action == BUS_NOTIFY_ADD_DEVICE)
                dmar_pci_bus_add_dev(info);
-       else if (action == BUS_NOTIFY_DEL_DEVICE)
+       else if (action == BUS_NOTIFY_REMOVED_DEVICE)
                dmar_pci_bus_del_dev(info);
        up_write(&dmar_global_lock);
 
index 986a53e..a2e1b7f 100644 (file)
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
                                rmrru->devices_cnt);
                        if(ret < 0)
                                return ret;
-               } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+               } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
                        dmar_remove_dev_scope(info, rmrr->segment,
                                rmrru->devices, rmrru->devices_cnt);
                }
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
                                break;
                        else if(ret < 0)
                                return ret;
-               } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+               } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
                        if (dmar_remove_dev_scope(info, atsr->segment,
                                        atsru->devices, atsru->devices_cnt))
                                break;
index 0a73632..43dfd15 100644 (file)
@@ -78,6 +78,9 @@ struct its_node {
 
 #define ITS_ITT_ALIGN          SZ_256
 
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o)  (PAGE_SIZE << (o))
+
 struct event_lpi_map {
        unsigned long           *lpi_map;
        u16                     *col_map;
@@ -600,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
        lpi_set_config(d, true);
 }
 
-static void its_eoi_irq(struct irq_data *d)
-{
-       gic_write_eoir(d->hwirq);
-}
-
 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
@@ -641,7 +639,7 @@ static struct irq_chip its_irq_chip = {
        .name                   = "ITS",
        .irq_mask               = its_mask_irq,
        .irq_unmask             = its_unmask_irq,
-       .irq_eoi                = its_eoi_irq,
+       .irq_eoi                = irq_chip_eoi_parent,
        .irq_set_affinity       = its_set_affinity,
        .irq_compose_msi_msg    = its_irq_compose_msi_msg,
 };
@@ -846,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                u64 type = GITS_BASER_TYPE(val);
                u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
                int order = get_order(psz);
-               int alloc_size;
                int alloc_pages;
                u64 tmp;
                void *base;
@@ -878,9 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                        }
                }
 
-               alloc_size = (1 << order) * PAGE_SIZE;
 retry_alloc_baser:
-               alloc_pages = (alloc_size / psz);
+               alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
                if (alloc_pages > GITS_BASER_PAGES_MAX) {
                        alloc_pages = GITS_BASER_PAGES_MAX;
                        order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -933,7 +929,7 @@ retry_baser:
                        shr = tmp & GITS_BASER_SHAREABILITY_MASK;
                        if (!shr) {
                                cache = GITS_BASER_nC;
-                               __flush_dcache_area(base, alloc_size);
+                               __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
                        }
                        goto retry_baser;
                }
@@ -966,7 +962,7 @@ retry_baser:
                }
 
                pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
-                       (int)(alloc_size / entry_size),
+                       (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
                        its_base_type_string[type],
                        (unsigned long)virt_to_phys(base),
                        psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
index 5df4048..dd83492 100644 (file)
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
 
        if (clone)
                free_rq_clone(clone);
+       else if (!tio->md->queue->mq_ops)
+               free_rq_tio(tio);
 }
 
 /*
index 7e9cbf7..fb7ed73 100644 (file)
@@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client,
                if (!client->dev.platform_data) {
                        dev_err(&client->dev,
                                "Neither DT not platform data provided\n");
-                       return EINVAL;
+                       return -EINVAL;
                }
                flash->platform_data = client->dev.platform_data;
        }
index f8dd750..e1719ff 100644 (file)
@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
        }
 
        /* tx 5v detect */
-       tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+       tx_5v = irq_reg_0x70 & info->cable_det_mask;
        if (tx_5v) {
                v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
-               io_write(sd, 0x71, tx_5v);
                adv76xx_s_detect_tx_5v_ctrl(sd);
                if (handled)
                        *handled = true;
index 8c54fd2..a136257 100644 (file)
@@ -1843,8 +1843,7 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
                        ent->function = MEDIA_ENT_F_CONN_RF;
                        break;
                default: /* AU0828_VMUX_DEBUG */
-                       ent->function = MEDIA_ENT_F_CONN_TEST;
-                       break;
+                       continue;
                }
 
                ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
index 4c1903f..0c6c17a 100644 (file)
@@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
                delta = mftb() - psl_tb;
                if (delta < 0)
                        delta = -delta;
-       } while (cputime_to_usecs(delta) > 16);
+       } while (tb_to_ns(delta) > 16000);
 
        return 0;
 }
index b6639ea..f6e4d97 100644 (file)
@@ -2232,6 +2232,7 @@ err_irq:
                dma_release_channel(host->tx_chan);
        if (host->rx_chan)
                dma_release_channel(host->rx_chan);
+       pm_runtime_dont_use_autosuspend(host->dev);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
        if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
        dma_release_channel(host->tx_chan);
        dma_release_channel(host->rx_chan);
 
+       pm_runtime_dont_use_autosuspend(host->dev);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
        device_init_wakeup(&pdev->dev, false);
index 2a1b6e0..0134ba3 100644 (file)
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
        vol->changing_leb = 1;
        vol->ch_lnum = req->lnum;
 
-       vol->upd_buf = vmalloc(req->bytes);
+       vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
        if (!vol->upd_buf)
                return -ENOMEM;
 
index 7e2c43f..5d28e94 100644 (file)
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
        [ND_CMD_ARS_CAP] = {
                .in_num = 2,
                .in_sizes = { 8, 8, },
-               .out_num = 2,
-               .out_sizes = { 4, 4, },
+               .out_num = 4,
+               .out_sizes = { 4, 4, 4, 4, },
        },
        [ND_CMD_ARS_START] = {
-               .in_num = 4,
-               .in_sizes = { 8, 8, 2, 6, },
-               .out_num = 1,
-               .out_sizes = { 4, },
+               .in_num = 5,
+               .in_sizes = { 8, 8, 2, 1, 5, },
+               .out_num = 2,
+               .out_sizes = { 4, 4, },
        },
        [ND_CMD_ARS_STATUS] = {
-               .out_num = 2,
-               .out_sizes = { 4, UINT_MAX, },
+               .out_num = 3,
+               .out_sizes = { 4, 4, UINT_MAX, },
        },
 };
 
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
                return in_field[1];
        else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
                return out_field[1];
-       else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1)
-               return ND_CMD_ARS_STATUS_MAX;
+       else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
+               return out_field[1] - 8;
 
        return UINT_MAX;
 }
index 7edf316..8d0b546 100644 (file)
@@ -41,7 +41,7 @@ struct pmem_device {
        phys_addr_t             phys_addr;
        /* when non-zero this device is hosting a 'pfn' instance */
        phys_addr_t             data_offset;
-       unsigned long           pfn_flags;
+       u64                     pfn_flags;
        void __pmem             *virt_addr;
        size_t                  size;
        struct badblocks        bb;
index 3cd921e..03c4641 100644 (file)
@@ -55,8 +55,9 @@ static void nvme_free_ns(struct kref *kref)
        ns->disk->private_data = NULL;
        spin_unlock(&dev_list_lock);
 
-       nvme_put_ctrl(ns->ctrl);
        put_disk(ns->disk);
+       ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
+       nvme_put_ctrl(ns->ctrl);
        kfree(ns);
 }
 
@@ -183,7 +184,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out_unmap;
                }
 
-               if (meta_buffer) {
+               if (meta_buffer && meta_len) {
                        struct bio_integrity_payload *bip;
 
                        meta = kmalloc(meta_len, GFP_KERNEL);
@@ -373,6 +374,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 
        if (copy_from_user(&io, uio, sizeof(io)))
                return -EFAULT;
+       if (io.flags)
+               return -EINVAL;
 
        switch (io.opcode) {
        case nvme_cmd_write:
@@ -424,6 +427,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                return -EACCES;
        if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
                return -EFAULT;
+       if (cmd.flags)
+               return -EINVAL;
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = cmd.opcode;
@@ -556,6 +561,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        u16 old_ms;
        unsigned short bs;
 
+       if (test_bit(NVME_NS_DEAD, &ns->flags)) {
+               set_capacity(disk, 0);
+               return -ENODEV;
+       }
        if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
                dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
                                __func__, ns->ctrl->instance, ns->ns_id);
@@ -831,6 +840,23 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
        return ret;
 }
 
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+               struct request_queue *q)
+{
+       if (ctrl->max_hw_sectors) {
+               u32 max_segments =
+                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+
+               blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+               blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+       }
+       if (ctrl->stripe_size)
+               blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+       if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+               blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+       blk_queue_virt_boundary(q, ctrl->page_size - 1);
+}
+
 /*
  * Initialize the cached copies of the Identify data and various controller
  * register in our nvme_ctrl structure.  This should be called as soon as
@@ -888,6 +914,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                }
        }
 
+       nvme_set_queue_limits(ctrl, ctrl->admin_q);
+
        kfree(id);
        return 0;
 }
@@ -1118,9 +1146,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (!ns)
                return;
 
+       ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
+       if (ns->instance < 0)
+               goto out_free_ns;
+
        ns->queue = blk_mq_init_queue(ctrl->tagset);
        if (IS_ERR(ns->queue))
-               goto out_free_ns;
+               goto out_release_instance;
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
        ns->queue->queuedata = ns;
        ns->ctrl = ctrl;
@@ -1134,17 +1166,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        ns->disk = disk;
        ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
 
+
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-       if (ctrl->max_hw_sectors) {
-               blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
-               blk_queue_max_segments(ns->queue,
-                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
-       }
-       if (ctrl->stripe_size)
-               blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
-       if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
-               blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
-       blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
+       nvme_set_queue_limits(ctrl, ns->queue);
 
        disk->major = nvme_major;
        disk->first_minor = 0;
@@ -1153,7 +1177,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        disk->queue = ns->queue;
        disk->driverfs_dev = ctrl->device;
        disk->flags = GENHD_FL_EXT_DEVT;
-       sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid);
+       sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
 
        if (nvme_revalidate_disk(ns->disk))
                goto out_free_disk;
@@ -1173,40 +1197,29 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        kfree(disk);
  out_free_queue:
        blk_cleanup_queue(ns->queue);
+ out_release_instance:
+       ida_simple_remove(&ctrl->ns_ida, ns->instance);
  out_free_ns:
        kfree(ns);
 }
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       bool kill = nvme_io_incapable(ns->ctrl) &&
-                       !blk_queue_dying(ns->queue);
-
-       lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
-       if (kill) {
-               blk_set_queue_dying(ns->queue);
+       if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
+               return;
 
-               /*
-                * The controller was shutdown first if we got here through
-                * device removal. The shutdown may requeue outstanding
-                * requests. These need to be aborted immediately so
-                * del_gendisk doesn't block indefinitely for their completion.
-                */
-               blk_mq_abort_requeue_list(ns->queue);
-       }
        if (ns->disk->flags & GENHD_FL_UP) {
                if (blk_get_integrity(ns->disk))
                        blk_integrity_unregister(ns->disk);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_attr_group);
                del_gendisk(ns->disk);
-       }
-       if (kill || !blk_queue_dying(ns->queue)) {
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
+       mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
+       mutex_unlock(&ns->ctrl->namespaces_mutex);
        nvme_put_ns(ns);
 }
 
@@ -1300,10 +1313,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns, *next;
 
-       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
-       mutex_unlock(&ctrl->namespaces_mutex);
 }
 
 static DEFINE_IDA(nvme_instance_ida);
@@ -1350,6 +1361,7 @@ static void nvme_free_ctrl(struct kref *kref)
 
        put_device(ctrl->device);
        nvme_release_instance(ctrl);
+       ida_destroy(&ctrl->ns_ida);
 
        ctrl->ops->free_ctrl(ctrl);
 }
@@ -1390,6 +1402,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        }
        get_device(ctrl->device);
        dev_set_drvdata(ctrl->device, ctrl);
+       ida_init(&ctrl->ns_ida);
 
        spin_lock(&dev_list_lock);
        list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1402,6 +1415,38 @@ out:
        return ret;
 }
 
+/**
+ * nvme_kill_queues(): Ends all namespace queues
+ * @ctrl: the dead controller that needs to end
+ *
+ * Call this function when the driver determines it is unable to get the
+ * controller in a state capable of servicing IO.
+ */
+void nvme_kill_queues(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
+               if (!kref_get_unless_zero(&ns->kref))
+                       continue;
+
+               /*
+                * Revalidating a dead namespace sets capacity to 0. This will
+                * end buffered writers dirtying pages that can't be synced.
+                */
+               if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+                       revalidate_disk(ns->disk);
+
+               blk_set_queue_dying(ns->queue);
+               blk_mq_abort_requeue_list(ns->queue);
+               blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+               nvme_put_ns(ns);
+       }
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+
 void nvme_stop_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
index 9664d07..fb15ba5 100644 (file)
@@ -72,6 +72,7 @@ struct nvme_ctrl {
        struct mutex namespaces_mutex;
        struct device *device;  /* char device */
        struct list_head node;
+       struct ida ns_ida;
 
        char name[12];
        char serial[20];
@@ -102,6 +103,7 @@ struct nvme_ns {
        struct request_queue *queue;
        struct gendisk *disk;
        struct kref kref;
+       int instance;
 
        u8 eui[8];
        u8 uuid[16];
@@ -112,6 +114,11 @@ struct nvme_ns {
        bool ext;
        u8 pi_type;
        int type;
+       unsigned long flags;
+
+#define NVME_NS_REMOVING 0
+#define NVME_NS_DEAD     1
+
        u64 mode_select_num_blocks;
        u32 mode_select_block_len;
 };
@@ -240,6 +247,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
 void nvme_start_queues(struct nvme_ctrl *ctrl);
+void nvme_kill_queues(struct nvme_ctrl *ctrl);
 
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, unsigned int flags);
index a128672..680f578 100644 (file)
@@ -86,7 +86,6 @@ struct nvme_queue;
 
 static int nvme_reset(struct nvme_dev *dev);
 static void nvme_process_cq(struct nvme_queue *nvmeq);
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 
 /*
@@ -120,6 +119,7 @@ struct nvme_dev {
        unsigned long flags;
 
 #define NVME_CTRL_RESETTING    0
+#define NVME_CTRL_REMOVING     1
 
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
@@ -286,6 +286,17 @@ static int nvme_init_request(void *data, struct request *req,
        return 0;
 }
 
+static void nvme_queue_scan(struct nvme_dev *dev)
+{
+       /*
+        * Do not queue new scan work when a controller is reset during
+        * removal.
+        */
+       if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+               return;
+       queue_work(nvme_workq, &dev->scan_work);
+}
+
 static void nvme_complete_async_event(struct nvme_dev *dev,
                struct nvme_completion *cqe)
 {
@@ -300,7 +311,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
                dev_info(dev->dev, "rescanning\n");
-               queue_work(nvme_workq, &dev->scan_work);
+               nvme_queue_scan(dev);
        default:
                dev_warn(dev->dev, "async event result %08x\n", result);
        }
@@ -679,7 +690,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        spin_lock_irq(&nvmeq->q_lock);
        if (unlikely(nvmeq->cq_vector < 0)) {
-               ret = BLK_MQ_RQ_QUEUE_BUSY;
+               if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
+                       ret = BLK_MQ_RQ_QUEUE_BUSY;
+               else
+                       ret = BLK_MQ_RQ_QUEUE_ERROR;
                spin_unlock_irq(&nvmeq->q_lock);
                goto out;
        }
@@ -1250,6 +1264,12 @@ static struct blk_mq_ops nvme_mq_ops = {
 static void nvme_dev_remove_admin(struct nvme_dev *dev)
 {
        if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
+               /*
+                * If the controller was reset during removal, it's possible
+                * user requests may be waiting on a stopped queue. Start the
+                * queue to flush these to completion.
+                */
+               blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
                blk_cleanup_queue(dev->ctrl.admin_q);
                blk_mq_free_tag_set(&dev->admin_tagset);
        }
@@ -1690,14 +1710,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
                        return 0;
                dev->ctrl.tagset = &dev->tagset;
        }
-       queue_work(nvme_workq, &dev->scan_work);
+       nvme_queue_scan(dev);
        return 0;
 }
 
-static int nvme_dev_map(struct nvme_dev *dev)
+static int nvme_pci_enable(struct nvme_dev *dev)
 {
        u64 cap;
-       int bars, result = -ENOMEM;
+       int result = -ENOMEM;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        if (pci_enable_device_mem(pdev))
@@ -1705,24 +1725,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
 
        dev->entry[0].vector = pdev->irq;
        pci_set_master(pdev);
-       bars = pci_select_bars(pdev, IORESOURCE_MEM);
-       if (!bars)
-               goto disable_pci;
-
-       if (pci_request_selected_regions(pdev, bars, "nvme"))
-               goto disable_pci;
 
        if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
            dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
                goto disable;
 
-       dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
-       if (!dev->bar)
-               goto disable;
-
        if (readl(dev->bar + NVME_REG_CSTS) == -1) {
                result = -ENODEV;
-               goto unmap;
+               goto disable;
        }
 
        /*
@@ -1732,7 +1742,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (!pdev->irq) {
                result = pci_enable_msix(pdev, dev->entry, 1);
                if (result < 0)
-                       goto unmap;
+                       goto disable;
        }
 
        cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1759,17 +1769,19 @@ static int nvme_dev_map(struct nvme_dev *dev)
        pci_save_state(pdev);
        return 0;
 
- unmap:
-       iounmap(dev->bar);
-       dev->bar = NULL;
  disable:
-       pci_release_regions(pdev);
- disable_pci:
        pci_disable_device(pdev);
        return result;
 }
 
 static void nvme_dev_unmap(struct nvme_dev *dev)
+{
+       if (dev->bar)
+               iounmap(dev->bar);
+       pci_release_regions(to_pci_dev(dev->dev));
+}
+
+static void nvme_pci_disable(struct nvme_dev *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
@@ -1778,12 +1790,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
        else if (pdev->msix_enabled)
                pci_disable_msix(pdev);
 
-       if (dev->bar) {
-               iounmap(dev->bar);
-               dev->bar = NULL;
-               pci_release_regions(pdev);
-       }
-
        if (pci_is_enabled(pdev)) {
                pci_disable_pcie_error_reporting(pdev);
                pci_disable_device(pdev);
@@ -1842,7 +1848,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        nvme_dev_list_remove(dev);
 
        mutex_lock(&dev->shutdown_lock);
-       if (dev->bar) {
+       if (pci_is_enabled(to_pci_dev(dev->dev))) {
                nvme_stop_queues(&dev->ctrl);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
@@ -1855,7 +1861,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
                nvme_disable_io_queues(dev);
                nvme_disable_admin_queue(dev, shutdown);
        }
-       nvme_dev_unmap(dev);
+       nvme_pci_disable(dev);
 
        for (i = dev->queue_count - 1; i >= 0; i--)
                nvme_clear_queue(dev->queues[i]);
@@ -1899,10 +1905,20 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
        kfree(dev);
 }
 
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
+{
+       dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
+
+       kref_get(&dev->ctrl.kref);
+       nvme_dev_disable(dev, false);
+       if (!schedule_work(&dev->remove_work))
+               nvme_put_ctrl(&dev->ctrl);
+}
+
 static void nvme_reset_work(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
-       int result;
+       int result = -ENODEV;
 
        if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
                goto out;
@@ -1911,37 +1927,37 @@ static void nvme_reset_work(struct work_struct *work)
         * If we're called to reset a live controller first shut it down before
         * moving on.
         */
-       if (dev->bar)
+       if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
        set_bit(NVME_CTRL_RESETTING, &dev->flags);
 
-       result = nvme_dev_map(dev);
+       result = nvme_pci_enable(dev);
        if (result)
                goto out;
 
        result = nvme_configure_admin_queue(dev);
        if (result)
-               goto unmap;
+               goto out;
 
        nvme_init_queue(dev->queues[0], 0);
        result = nvme_alloc_admin_tags(dev);
        if (result)
-               goto disable;
+               goto out;
 
        result = nvme_init_identify(&dev->ctrl);
        if (result)
-               goto free_tags;
+               goto out;
 
        result = nvme_setup_io_queues(dev);
        if (result)
-               goto free_tags;
+               goto out;
 
        dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
 
        result = nvme_dev_list_add(dev);
        if (result)
-               goto remove;
+               goto out;
 
        /*
         * Keep the controller around but remove all namespaces if we don't have
@@ -1958,19 +1974,8 @@ static void nvme_reset_work(struct work_struct *work)
        clear_bit(NVME_CTRL_RESETTING, &dev->flags);
        return;
 
- remove:
-       nvme_dev_list_remove(dev);
- free_tags:
-       nvme_dev_remove_admin(dev);
-       blk_put_queue(dev->ctrl.admin_q);
-       dev->ctrl.admin_q = NULL;
-       dev->queues[0]->tags = NULL;
- disable:
-       nvme_disable_admin_queue(dev, false);
- unmap:
-       nvme_dev_unmap(dev);
  out:
-       nvme_remove_dead_ctrl(dev);
+       nvme_remove_dead_ctrl(dev, result);
 }
 
 static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -1978,19 +1983,12 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
+       nvme_kill_queues(&dev->ctrl);
        if (pci_get_drvdata(pdev))
                pci_stop_and_remove_bus_device_locked(pdev);
        nvme_put_ctrl(&dev->ctrl);
 }
 
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
-{
-       dev_warn(dev->dev, "Removing after probe failure\n");
-       kref_get(&dev->ctrl.kref);
-       if (!schedule_work(&dev->remove_work))
-               nvme_put_ctrl(&dev->ctrl);
-}
-
 static int nvme_reset(struct nvme_dev *dev)
 {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@ -2042,6 +2040,27 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .free_ctrl              = nvme_pci_free_ctrl,
 };
 
+static int nvme_dev_map(struct nvme_dev *dev)
+{
+       int bars;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       if (!bars)
+               return -ENODEV;
+       if (pci_request_selected_regions(pdev, bars, "nvme"))
+               return -ENODEV;
+
+       dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+       if (!dev->bar)
+               goto release;
+
+       return 0;
+  release:
+       pci_release_regions(pdev);
+       return -ENODEV;
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
@@ -2066,6 +2085,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
 
+       result = nvme_dev_map(dev);
+       if (result)
+               goto free;
+
        INIT_LIST_HEAD(&dev->node);
        INIT_WORK(&dev->scan_work, nvme_dev_scan);
        INIT_WORK(&dev->reset_work, nvme_reset_work);
@@ -2089,6 +2112,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        nvme_release_prp_pools(dev);
  put_pci:
        put_device(dev->dev);
+       nvme_dev_unmap(dev);
  free:
        kfree(dev->queues);
        kfree(dev->entry);
@@ -2112,10 +2136,16 @@ static void nvme_shutdown(struct pci_dev *pdev)
        nvme_dev_disable(dev, true);
 }
 
+/*
+ * The driver's remove may be called on a device in a partially initialized
+ * state. This function must not have any dependencies on the device state in
+ * order to proceed.
+ */
 static void nvme_remove(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
 
+       set_bit(NVME_CTRL_REMOVING, &dev->flags);
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->scan_work);
        nvme_remove_namespaces(&dev->ctrl);
@@ -2126,6 +2156,7 @@ static void nvme_remove(struct pci_dev *pdev)
        nvme_free_queues(dev, 0);
        nvme_release_cmb(dev);
        nvme_release_prp_pools(dev);
+       nvme_dev_unmap(dev);
        nvme_put_ctrl(&dev->ctrl);
 }
 
index 75a6054..d1cdd9c 100644 (file)
@@ -14,6 +14,7 @@ config PCI_DRA7XX
 config PCI_MVEBU
        bool "Marvell EBU PCIe controller"
        depends on ARCH_MVEBU || ARCH_DOVE
+       depends on ARM
        depends on OF
 
 config PCIE_DW
index ed34c95..6153853 100644 (file)
 
 #define to_keystone_pcie(x)    container_of(x, struct keystone_pcie, pp)
 
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
-       return sys->private_data;
-}
-
 static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
                                             u32 *bit_pos)
 {
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
        struct pcie_port *pp;
 
        msi = irq_data_get_msi_desc(d);
-       pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+       pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
        ks_pcie = to_keystone_pcie(pp);
        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
        update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
        u32 offset;
 
        msi = irq_data_get_msi_desc(d);
-       pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+       pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
        ks_pcie = to_keystone_pcie(pp);
        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
        u32 offset;
 
        msi = irq_data_get_msi_desc(d);
-       pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+       pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
        ks_pcie = to_keystone_pcie(pp);
        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 
index 3923bed..f39961b 100644 (file)
@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
        iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
 }
 
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+       u32 val;
+
+       val = ioread32(pcie->dbi + PCIE_STRFMR1);
+       val &= 0xDFFFFFFF;
+       iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+}
+
 static int ls1021_pcie_link_up(struct pcie_port *pp)
 {
        u32 state;
@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
 static void ls1021_pcie_host_init(struct pcie_port *pp)
 {
        struct ls_pcie *pcie = to_ls_pcie(pp);
-       u32 val, index[2];
+       u32 index[2];
 
        pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
                                                     "fsl,pcie-scfg");
@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
 
        dw_pcie_setup_rc(pp);
 
-       /*
-        * LS1021A Workaround for internal TKT228622
-        * to fix the INTx hang issue
-        */
-       val = ioread32(pcie->dbi + PCIE_STRFMR1);
-       val &= 0xffff;
-       iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+       ls_pcie_drop_msg_tlp(pcie);
 }
 
 static int ls_pcie_link_up(struct pcie_port *pp)
@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
        iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
        ls_pcie_fix_class(pcie);
        ls_pcie_clear_multifunction(pcie);
+       ls_pcie_drop_msg_tlp(pcie);
        iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
 }
 
index c777b97..5f70fee 100644 (file)
@@ -53,7 +53,7 @@ struct pcifront_device {
 };
 
 struct pcifront_sd {
-       int domain;
+       struct pci_sysdata sd;
        struct pcifront_device *pdev;
 };
 
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
                                    unsigned int domain, unsigned int bus,
                                    struct pcifront_device *pdev)
 {
-       sd->domain = domain;
+       /* Because we do not expose that information via XenBus. */
+       sd->sd.node = first_online_node;
+       sd->sd.domain = domain;
        sd->pdev = pdev;
 }
 
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
        dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
                 domain, bus);
 
-       bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
-       sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+       bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+       sd = kzalloc(sizeof(*sd), GFP_KERNEL);
        if (!bus_entry || !sd) {
                err = -ENOMEM;
                goto err_out;
index 9429e66..8eafc6f 100644 (file)
@@ -21,6 +21,9 @@
 
 #include <linux/power/bq27xxx_battery.h>
 
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
 static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
 {
        struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
 {
        struct bq27xxx_device_info *di;
        int ret;
+       char *name;
+       int num;
+
+       /* Get new ID for the new battery device */
+       mutex_lock(&battery_mutex);
+       num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
+       mutex_unlock(&battery_mutex);
+       if (num < 0)
+               return num;
+
+       name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+       if (!name)
+               goto err_mem;
 
        di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
        if (!di)
-               return -ENOMEM;
+               goto err_mem;
 
+       di->id = num;
        di->dev = &client->dev;
        di->chip = id->driver_data;
-       di->name = id->name;
+       di->name = name;
        di->bus.read = bq27xxx_battery_i2c_read;
 
        ret = bq27xxx_battery_setup(di);
        if (ret)
-               return ret;
+               goto err_failed;
 
        /* Schedule a polling after about 1 min */
        schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
        }
 
        return 0;
+
+err_mem:
+       ret = -ENOMEM;
+
+err_failed:
+       mutex_lock(&battery_mutex);
+       idr_remove(&battery_id, num);
+       mutex_unlock(&battery_mutex);
+
+       return ret;
 }
 
 static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
 
        bq27xxx_battery_teardown(di);
 
+       mutex_lock(&battery_mutex);
+       idr_remove(&battery_id, di->id);
+       mutex_unlock(&battery_mutex);
+
        return 0;
 }
 
index 3b3e099..d6a691e 100644 (file)
@@ -4002,6 +4002,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
        struct ipr_sglist *sglist;
        char fname[100];
        char *src;
+       char *endline;
        int result, dnld_size;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -4009,6 +4010,10 @@ static ssize_t ipr_store_update_fw(struct device *dev,
 
        snprintf(fname, sizeof(fname), "%s", buf);
 
+       endline = strchr(fname, '\n');
+       if (endline)
+               *endline = '\0';
+
        if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
                dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
                return -EIO;
index fa6b2c4..8c6e318 100644 (file)
@@ -1344,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
 
        switch (ret) {
        case BLKPREP_KILL:
+       case BLKPREP_INVALID:
                req->errors = DID_NO_CONNECT << 16;
                /* release the command and kill it */
                if (req->special) {
index 91a0030..a9bac3b 100644 (file)
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
 
 static int __init sh_pm_runtime_init(void)
 {
-       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
+       if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
                if (!of_find_compatible_node(NULL, NULL,
                                             "renesas,cpg-mstp-clocks"))
                        return 0;
index 3ec7e65..db49af9 100644 (file)
@@ -147,7 +147,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
        mutex_lock(&mdev->graph_mutex);
        ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
        if (ret) {
-               mutex_unlock(&video->lock);
+               mutex_unlock(&mdev->graph_mutex);
                return -ENOMEM;
        }
        media_entity_graph_walk_start(&graph, entity);
index b59195e..b635ab6 100644 (file)
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
 
        /* register a nop PHY */
        ci->phy = usb_phy_generic_register();
-       if (!ci->phy)
-               return -ENOMEM;
+       if (IS_ERR(ci->phy))
+               return PTR_ERR(ci->phy);
 
        memset(res, 0, sizeof(res));
        res[0].start    = pci_resource_start(pdev, 0);
index a4f7db2..df47110 100644 (file)
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
        if (sscanf(buf, "%u", &mode) != 1)
                return -EINVAL;
 
+       if (mode > 255)
+               return -EBADRQC;
+
        pm_runtime_get_sync(ci->dev);
        spin_lock_irqsave(&ci->lock, flags);
        ret = hw_port_test_set(ci, mode);
index 45f86da..03b6743 100644 (file)
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
 int ci_hdrc_otg_init(struct ci_hdrc *ci)
 {
        INIT_WORK(&ci->work, ci_otg_work);
-       ci->wq = create_singlethread_workqueue("ci_otg");
+       ci->wq = create_freezable_workqueue("ci_otg");
        if (!ci->wq) {
                dev_err(ci->dev, "can't create workqueue\n");
                return -ENODEV;
index 350dcd9..51b4369 100644 (file)
@@ -5401,6 +5401,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
        }
 
        bos = udev->bos;
+       udev->bos = NULL;
 
        for (i = 0; i < SET_CONFIG_TRIES; ++i) {
 
@@ -5493,11 +5494,8 @@ done:
        usb_set_usb2_hardware_lpm(udev, 1);
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
-       /* release the new BOS descriptor allocated  by hub_port_init() */
-       if (udev->bos != bos) {
-               usb_release_bos_descriptor(udev);
-               udev->bos = bos;
-       }
+       usb_release_bos_descriptor(udev);
+       udev->bos = bos;
        return 0;
 
 re_enumerate:
index fd95ba6..f0decc0 100644 (file)
@@ -1,5 +1,6 @@
 config USB_DWC2
        tristate "DesignWare USB2 DRD Core Support"
+       depends on HAS_DMA
        depends on USB || USB_GADGET
        help
          Say Y here if your system has a Dual Role Hi-Speed USB
index e991d55..46c4ba7 100644 (file)
@@ -619,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
                         __func__, hsotg->dr_mode);
                break;
        }
+
+       /*
+        * NOTE: This is required for some rockchip soc based
+        * platforms.
+        */
+       msleep(50);
 }
 
 /*
index 36606fc..a41274a 100644 (file)
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
        failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
                                                     halt_status, n_bytes,
                                                     xfer_done);
-       if (*xfer_done && urb->status != -EINPROGRESS)
-               failed = 1;
-
-       if (failed) {
+       if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
                dwc2_host_complete(hsotg, qtd, urb->status);
                dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
-               dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
-                        failed, *xfer_done, urb->status);
+               dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
+                        failed, *xfer_done);
                return failed;
        }
 
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
 
        list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
                int i;
+               int qtd_desc_count;
 
                qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
                xfer_done = 0;
+               qtd_desc_count = qtd->n_desc;
 
-               for (i = 0; i < qtd->n_desc; i++) {
+               for (i = 0; i < qtd_desc_count; i++) {
                        if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
                                                       desc_num, halt_status,
-                                                      &xfer_done)) {
-                               qtd = NULL;
-                               break;
-                       }
+                                                      &xfer_done))
+                               goto stop_scan;
+
                        desc_num++;
                }
        }
 
+stop_scan:
        if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
                /*
                 * Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
                 */
                if (halt_status == DWC2_HC_XFER_STALL)
                        qh->data_toggle = DWC2_HC_PID_DATA0;
-               else if (qtd)
+               else
                        dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
        }
 
index f825380..cadba8b 100644 (file)
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
        u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
 
        if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+               if (WARN(!chan || !chan->qh,
+                        "chan->qh must be specified for non-control eps\n"))
+                       return;
+
                if (pid == TSIZ_SC_MC_PID_DATA0)
                        chan->qh->data_toggle = DWC2_HC_PID_DATA0;
                else
                        chan->qh->data_toggle = DWC2_HC_PID_DATA1;
        } else {
+               if (WARN(!qtd,
+                        "qtd must be specified for control eps\n"))
+                       return;
+
                if (pid == TSIZ_SC_MC_PID_DATA0)
                        qtd->data_toggle = DWC2_HC_PID_DATA0;
                else
index 2913068..e4f8b90 100644 (file)
@@ -856,7 +856,6 @@ struct dwc3 {
        unsigned                pullups_connected:1;
        unsigned                resize_fifos:1;
        unsigned                setup_packet_pending:1;
-       unsigned                start_config_issued:1;
        unsigned                three_stage_setup:1;
        unsigned                usb3_lpm_capable:1;
 
index 3a9354a..8d6b75c 100644 (file)
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
        int ret;
        u32 reg;
 
-       dwc->start_config_issued = false;
        cfg = le16_to_cpu(ctrl->wValue);
 
        switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
                dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
                ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
                break;
-       case USB_REQ_SET_INTERFACE:
-               dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
-               dwc->start_config_issued = false;
-               /* Fall through */
        default:
                dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
                ret = dwc3_ep0_delegate_req(dwc, ctrl);
index 7d1dd82..2363bad 100644 (file)
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
        dep->trb_pool_dma = 0;
 }
 
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/**
+ * dwc3_gadget_start_config - Configure EP resources
+ * @dwc: pointer to our controller context structure
+ * @dep: endpoint that is being enabled
+ *
+ * The assignment of transfer resources cannot perfectly follow the
+ * data book due to the fact that the controller driver does not have
+ * all knowledge of the configuration in advance. It is given this
+ * information piecemeal by the composite gadget framework after every
+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
+ * programming model in this scenario can cause errors. For two
+ * reasons:
+ *
+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
+ * multiple interfaces.
+ *
+ * 2) The databook does not mention doing more DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this
+ * setting will stay persistent until either a core reset or
+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
+ * do DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled
+ * but is triggered only when called for EP0-out, which always happens
+ * first, and which should only happen in one of the above conditions.
+ */
 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
        struct dwc3_gadget_ep_cmd_params params;
        u32                     cmd;
+       int                     i;
+       int                     ret;
+
+       if (dep->number)
+               return 0;
 
        memset(&params, 0x00, sizeof(params));
+       cmd = DWC3_DEPCMD_DEPSTARTCFG;
 
-       if (dep->number != 1) {
-               cmd = DWC3_DEPCMD_DEPSTARTCFG;
-               /* XferRscIdx == 0 for ep0 and 2 for the remaining */
-               if (dep->number > 1) {
-                       if (dwc->start_config_issued)
-                               return 0;
-                       dwc->start_config_issued = true;
-                       cmd |= DWC3_DEPCMD_PARAM(2);
-               }
+       ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+       if (ret)
+               return ret;
 
-               return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+       for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+               struct dwc3_ep *dep = dwc->eps[i];
+
+               if (!dep)
+                       continue;
+
+               ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                struct dwc3_trb *trb_st_hw;
                struct dwc3_trb *trb_link;
 
-               ret = dwc3_gadget_set_xfer_resource(dwc, dep);
-               if (ret)
-                       return ret;
-
                dep->endpoint.desc = desc;
                dep->comp_desc = comp_desc;
                dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
        }
        dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 
-       dwc->start_config_issued = false;
-
        /* Start with SuperSpeed Default */
        dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
        dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
        dwc3_disconnect_gadget(dwc);
-       dwc->start_config_issued = false;
 
        dwc->gadget.speed = USB_SPEED_UNKNOWN;
        dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 
        dwc3_stop_active_transfers(dwc);
        dwc3_clear_stall_all_ep(dwc);
-       dwc->start_config_issued = false;
 
        /* Reset device address to zero */
        reg = dwc3_readl(dwc->regs, DWC3_DCFG);
index 7e179f8..87fb0fd 100644 (file)
@@ -130,7 +130,8 @@ struct dev_data {
                                        setup_can_stall : 1,
                                        setup_out_ready : 1,
                                        setup_out_error : 1,
-                                       setup_abort : 1;
+                                       setup_abort : 1,
+                                       gadget_registered : 1;
        unsigned                        setup_wLength;
 
        /* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
 
        /* closing ep0 === shutdown all */
 
-       usb_gadget_unregister_driver (&gadgetfs_driver);
+       if (dev->gadget_registered)
+               usb_gadget_unregister_driver (&gadgetfs_driver);
 
        /* at this point "good" hardware has disconnected the
         * device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                 * kick in after the ep0 descriptor is closed.
                 */
                value = len;
+               dev->gadget_registered = true;
        }
        return value;
 
index 53c0692..93d28cb 100644 (file)
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
 {
        struct qe_udc *udc;
        struct device_node *np = ofdev->dev.of_node;
-       unsigned int tmp_addr = 0;
+       unsigned long tmp_addr = 0;
        struct usb_device_para __iomem *usbpram;
        unsigned int i;
        u64 size;
index 4dff60d..0d32052 100644 (file)
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
        static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
                                          0x50, 0x20, 0x70, 0x40, 0x90 };
 
-       if (ep->dev->enhanced_mode)
+       if (ep->dev->enhanced_mode) {
                reg = ep_enhanced[ep->num];
-       else{
+               switch (ep->dev->gadget.speed) {
+               case USB_SPEED_SUPER:
+                       reg += 2;
+                       break;
+               case USB_SPEED_FULL:
+                       reg += 1;
+                       break;
+               case USB_SPEED_HIGH:
+               default:
+                       break;
+               }
+       } else {
                reg = (ep->num + 1) * 0x10;
                if (ep->dev->gadget.speed != USB_SPEED_HIGH)
                        reg += 1;
index fd73a3e..b86a6f0 100644 (file)
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
                if (!driver->udc_name || strcmp(driver->udc_name,
                                                dev_name(&udc->dev)) == 0) {
                        ret = udc_bind_to_driver(udc, driver);
+                       if (ret != -EPROBE_DEFER)
+                               list_del(&driver->pending);
                        if (ret)
                                goto err4;
-                       list_del(&driver->pending);
                        break;
                }
        }
index 795a45b..58487a4 100644 (file)
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
                csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
                csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
        }
-       channel->desired_mode = mode;
+       channel->desired_mode = *mode;
        musb_writew(epio, MUSB_TXCSR, csr);
 
        return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                                qh->offset,
                                urb->transfer_buffer_length);
 
-                       done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh,
-                                                             urb, xfer_len,
-                                                             iso_err);
-                       if (done)
+                       if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
+                                                          xfer_len, iso_err))
                                goto finish;
                        else
                                dev_err(musb->controller, "error: rx_dma failed\n");
index 970a30e..72b387d 100644 (file)
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
        otg->host = host;
        dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
 
-       /*
-        * Kick the state machine work, if peripheral is not supported
-        * or peripheral is already registered with us.
-        */
-       if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
-               pm_runtime_get_sync(otg->usb_phy->dev);
-               schedule_work(&motg->sm_work);
-       }
+       pm_runtime_get_sync(otg->usb_phy->dev);
+       schedule_work(&motg->sm_work);
 
        return 0;
 }
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
        dev_dbg(otg->usb_phy->dev,
                "peripheral driver registered w/ tranceiver\n");
 
-       /*
-        * Kick the state machine work, if host is not supported
-        * or host is already registered with us.
-        */
-       if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
-               pm_runtime_get_sync(otg->usb_phy->dev);
-               schedule_work(&motg->sm_work);
-       }
+       pm_runtime_get_sync(otg->usb_phy->dev);
+       schedule_work(&motg->sm_work);
 
        return 0;
 }
index f612dda..56ecb8b 100644 (file)
@@ -475,22 +475,6 @@ config USB_SERIAL_MOS7840
          To compile this driver as a module, choose M here: the
          module will be called mos7840.  If unsure, choose N.
 
-config USB_SERIAL_MXUPORT11
-       tristate "USB Moxa UPORT 11x0 Serial Driver"
-       ---help---
-         Say Y here if you want to use a MOXA UPort 11x0 Serial hub.
-
-         This driver supports:
-
-         - UPort 1110  : 1 port RS-232 USB to Serial Hub.
-         - UPort 1130  : 1 port RS-422/485 USB to Serial Hub.
-         - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation.
-         - UPort 1150  : 1 port RS-232/422/485 USB to Serial Hub.
-         - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation.
-
-         To compile this driver as a module, choose M here: the
-         module will be called mxu11x0.
-
 config USB_SERIAL_MXUPORT
        tristate "USB Moxa UPORT Serial Driver"
        ---help---
index f3fa5e5..349d9df 100644 (file)
@@ -38,7 +38,6 @@ obj-$(CONFIG_USB_SERIAL_METRO)                        += metro-usb.o
 obj-$(CONFIG_USB_SERIAL_MOS7720)               += mos7720.o
 obj-$(CONFIG_USB_SERIAL_MOS7840)               += mos7840.o
 obj-$(CONFIG_USB_SERIAL_MXUPORT)               += mxuport.o
-obj-$(CONFIG_USB_SERIAL_MXUPORT11)             += mxu11x0.o
 obj-$(CONFIG_USB_SERIAL_NAVMAN)                        += navman.o
 obj-$(CONFIG_USB_SERIAL_OMNINET)               += omninet.o
 obj-$(CONFIG_USB_SERIAL_OPTICON)               += opticon.o
index 987813b..73a366d 100644 (file)
@@ -163,6 +163,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+       { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+       { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+       { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
        { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
        { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
deleted file mode 100644 (file)
index 6196073..0000000
+++ /dev/null
@@ -1,1006 +0,0 @@
-/*
- * USB Moxa UPORT 11x0 Serial Driver
- *
- * Copyright (C) 2007 MOXA Technologies Co., Ltd.
- * Copyright (C) 2015 Mathieu Othacehe <m.othacehe@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
- * Supports the following Moxa USB to serial converters:
- *  UPort 1110,  1 port RS-232 USB to Serial Hub.
- *  UPort 1130,  1 port RS-422/485 USB to Serial Hub.
- *  UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation
- *    protection.
- *  UPort 1150,  1 port RS-232/422/485 USB to Serial Hub.
- *  UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation
- *  protection.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-/* Vendor and product ids */
-#define MXU1_VENDOR_ID                         0x110a
-#define MXU1_1110_PRODUCT_ID                   0x1110
-#define MXU1_1130_PRODUCT_ID                   0x1130
-#define MXU1_1150_PRODUCT_ID                   0x1150
-#define MXU1_1151_PRODUCT_ID                   0x1151
-#define MXU1_1131_PRODUCT_ID                   0x1131
-
-/* Commands */
-#define MXU1_GET_VERSION                       0x01
-#define MXU1_GET_PORT_STATUS                   0x02
-#define MXU1_GET_PORT_DEV_INFO                 0x03
-#define MXU1_GET_CONFIG                                0x04
-#define MXU1_SET_CONFIG                                0x05
-#define MXU1_OPEN_PORT                         0x06
-#define MXU1_CLOSE_PORT                                0x07
-#define MXU1_START_PORT                                0x08
-#define MXU1_STOP_PORT                         0x09
-#define MXU1_TEST_PORT                         0x0A
-#define MXU1_PURGE_PORT                                0x0B
-#define MXU1_RESET_EXT_DEVICE                  0x0C
-#define MXU1_GET_OUTQUEUE                      0x0D
-#define MXU1_WRITE_DATA                                0x80
-#define MXU1_READ_DATA                         0x81
-#define MXU1_REQ_TYPE_CLASS                    0x82
-
-/* Module identifiers */
-#define MXU1_I2C_PORT                          0x01
-#define MXU1_IEEE1284_PORT                     0x02
-#define MXU1_UART1_PORT                                0x03
-#define MXU1_UART2_PORT                                0x04
-#define MXU1_RAM_PORT                          0x05
-
-/* Modem status */
-#define MXU1_MSR_DELTA_CTS                     0x01
-#define MXU1_MSR_DELTA_DSR                     0x02
-#define MXU1_MSR_DELTA_RI                      0x04
-#define MXU1_MSR_DELTA_CD                      0x08
-#define MXU1_MSR_CTS                           0x10
-#define MXU1_MSR_DSR                           0x20
-#define MXU1_MSR_RI                            0x40
-#define MXU1_MSR_CD                            0x80
-#define MXU1_MSR_DELTA_MASK                    0x0F
-#define MXU1_MSR_MASK                          0xF0
-
-/* Line status */
-#define MXU1_LSR_OVERRUN_ERROR                 0x01
-#define MXU1_LSR_PARITY_ERROR                  0x02
-#define MXU1_LSR_FRAMING_ERROR                 0x04
-#define MXU1_LSR_BREAK                         0x08
-#define MXU1_LSR_ERROR                         0x0F
-#define MXU1_LSR_RX_FULL                       0x10
-#define MXU1_LSR_TX_EMPTY                      0x20
-
-/* Modem control */
-#define MXU1_MCR_LOOP                          0x04
-#define MXU1_MCR_DTR                           0x10
-#define MXU1_MCR_RTS                           0x20
-
-/* Mask settings */
-#define MXU1_UART_ENABLE_RTS_IN                        0x0001
-#define MXU1_UART_DISABLE_RTS                  0x0002
-#define MXU1_UART_ENABLE_PARITY_CHECKING       0x0008
-#define MXU1_UART_ENABLE_DSR_OUT               0x0010
-#define MXU1_UART_ENABLE_CTS_OUT               0x0020
-#define MXU1_UART_ENABLE_X_OUT                 0x0040
-#define MXU1_UART_ENABLE_XA_OUT                        0x0080
-#define MXU1_UART_ENABLE_X_IN                  0x0100
-#define MXU1_UART_ENABLE_DTR_IN                        0x0800
-#define MXU1_UART_DISABLE_DTR                  0x1000
-#define MXU1_UART_ENABLE_MS_INTS               0x2000
-#define MXU1_UART_ENABLE_AUTO_START_DMA                0x4000
-#define MXU1_UART_SEND_BREAK_SIGNAL            0x8000
-
-/* Parity */
-#define MXU1_UART_NO_PARITY                    0x00
-#define MXU1_UART_ODD_PARITY                   0x01
-#define MXU1_UART_EVEN_PARITY                  0x02
-#define MXU1_UART_MARK_PARITY                  0x03
-#define MXU1_UART_SPACE_PARITY                 0x04
-
-/* Stop bits */
-#define MXU1_UART_1_STOP_BITS                  0x00
-#define MXU1_UART_1_5_STOP_BITS                        0x01
-#define MXU1_UART_2_STOP_BITS                  0x02
-
-/* Bits per character */
-#define MXU1_UART_5_DATA_BITS                  0x00
-#define MXU1_UART_6_DATA_BITS                  0x01
-#define MXU1_UART_7_DATA_BITS                  0x02
-#define MXU1_UART_8_DATA_BITS                  0x03
-
-/* Operation modes */
-#define MXU1_UART_232                          0x00
-#define MXU1_UART_485_RECEIVER_DISABLED                0x01
-#define MXU1_UART_485_RECEIVER_ENABLED         0x02
-
-/* Pipe transfer mode and timeout */
-#define MXU1_PIPE_MODE_CONTINUOUS              0x01
-#define MXU1_PIPE_MODE_MASK                    0x03
-#define MXU1_PIPE_TIMEOUT_MASK                 0x7C
-#define MXU1_PIPE_TIMEOUT_ENABLE               0x80
-
-/* Config struct */
-struct mxu1_uart_config {
-       __be16  wBaudRate;
-       __be16  wFlags;
-       u8      bDataBits;
-       u8      bParity;
-       u8      bStopBits;
-       char    cXon;
-       char    cXoff;
-       u8      bUartMode;
-} __packed;
-
-/* Purge modes */
-#define MXU1_PURGE_OUTPUT                      0x00
-#define MXU1_PURGE_INPUT                       0x80
-
-/* Read/Write data */
-#define MXU1_RW_DATA_ADDR_SFR                  0x10
-#define MXU1_RW_DATA_ADDR_IDATA                        0x20
-#define MXU1_RW_DATA_ADDR_XDATA                        0x30
-#define MXU1_RW_DATA_ADDR_CODE                 0x40
-#define MXU1_RW_DATA_ADDR_GPIO                 0x50
-#define MXU1_RW_DATA_ADDR_I2C                  0x60
-#define MXU1_RW_DATA_ADDR_FLASH                        0x70
-#define MXU1_RW_DATA_ADDR_DSP                  0x80
-
-#define MXU1_RW_DATA_UNSPECIFIED               0x00
-#define MXU1_RW_DATA_BYTE                      0x01
-#define MXU1_RW_DATA_WORD                      0x02
-#define MXU1_RW_DATA_DOUBLE_WORD               0x04
-
-struct mxu1_write_data_bytes {
-       u8      bAddrType;
-       u8      bDataType;
-       u8      bDataCounter;
-       __be16  wBaseAddrHi;
-       __be16  wBaseAddrLo;
-       u8      bData[0];
-} __packed;
-
-/* Interrupt codes */
-#define MXU1_CODE_HARDWARE_ERROR               0xFF
-#define MXU1_CODE_DATA_ERROR                   0x03
-#define MXU1_CODE_MODEM_STATUS                 0x04
-
-static inline int mxu1_get_func_from_code(unsigned char code)
-{
-       return code & 0x0f;
-}
-
-/* Download firmware max packet size */
-#define MXU1_DOWNLOAD_MAX_PACKET_SIZE          64
-
-/* Firmware image header */
-struct mxu1_firmware_header {
-       __le16 wLength;
-       u8 bCheckSum;
-} __packed;
-
-#define MXU1_UART_BASE_ADDR        0xFFA0
-#define MXU1_UART_OFFSET_MCR       0x0004
-
-#define MXU1_BAUD_BASE              923077
-
-#define MXU1_TRANSFER_TIMEOUT      2
-#define MXU1_DOWNLOAD_TIMEOUT       1000
-#define MXU1_DEFAULT_CLOSING_WAIT   4000 /* in .01 secs */
-
-struct mxu1_port {
-       u8 msr;
-       u8 mcr;
-       u8 uart_mode;
-       spinlock_t spinlock; /* Protects msr */
-       struct mutex mutex; /* Protects mcr */
-       bool send_break;
-};
-
-struct mxu1_device {
-       u16 mxd_model;
-};
-
-static const struct usb_device_id mxu1_idtable[] = {
-       { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
-       { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
-       { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
-       { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
-       { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
-       { }
-};
-
-MODULE_DEVICE_TABLE(usb, mxu1_idtable);
-
-/* Write the given buffer out to the control pipe.  */
-static int mxu1_send_ctrl_data_urb(struct usb_serial *serial,
-                                  u8 request,
-                                  u16 value, u16 index,
-                                  void *data, size_t size)
-{
-       int status;
-
-       status = usb_control_msg(serial->dev,
-                                usb_sndctrlpipe(serial->dev, 0),
-                                request,
-                                (USB_DIR_OUT | USB_TYPE_VENDOR |
-                                 USB_RECIP_DEVICE), value, index,
-                                data, size,
-                                USB_CTRL_SET_TIMEOUT);
-       if (status < 0) {
-               dev_err(&serial->interface->dev,
-                       "%s - usb_control_msg failed: %d\n",
-                       __func__, status);
-               return status;
-       }
-
-       if (status != size) {
-               dev_err(&serial->interface->dev,
-                       "%s - short write (%d / %zd)\n",
-                       __func__, status, size);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-/* Send a vendor request without any data */
-static int mxu1_send_ctrl_urb(struct usb_serial *serial,
-                             u8 request, u16 value, u16 index)
-{
-       return mxu1_send_ctrl_data_urb(serial, request, value, index,
-                                      NULL, 0);
-}
-
-static int mxu1_download_firmware(struct usb_serial *serial,
-                                 const struct firmware *fw_p)
-{
-       int status = 0;
-       int buffer_size;
-       int pos;
-       int len;
-       int done;
-       u8 cs = 0;
-       u8 *buffer;
-       struct usb_device *dev = serial->dev;
-       struct mxu1_firmware_header *header;
-       unsigned int pipe;
-
-       pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress);
-
-       buffer_size = fw_p->size + sizeof(*header);
-       buffer = kmalloc(buffer_size, GFP_KERNEL);
-       if (!buffer)
-               return -ENOMEM;
-
-       memcpy(buffer, fw_p->data, fw_p->size);
-       memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
-
-       for (pos = sizeof(*header); pos < buffer_size; pos++)
-               cs = (u8)(cs + buffer[pos]);
-
-       header = (struct mxu1_firmware_header *)buffer;
-       header->wLength = cpu_to_le16(buffer_size - sizeof(*header));
-       header->bCheckSum = cs;
-
-       dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
-
-       for (pos = 0; pos < buffer_size; pos += done) {
-               len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE);
-
-               status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done,
-                               MXU1_DOWNLOAD_TIMEOUT);
-               if (status)
-                       break;
-       }
-
-       kfree(buffer);
-
-       if (status) {
-               dev_err(&dev->dev, "failed to download firmware: %d\n", status);
-               return status;
-       }
-
-       msleep_interruptible(100);
-       usb_reset_device(dev);
-
-       dev_dbg(&dev->dev, "%s - download successful\n", __func__);
-
-       return 0;
-}
-
-static int mxu1_port_probe(struct usb_serial_port *port)
-{
-       struct mxu1_port *mxport;
-       struct mxu1_device *mxdev;
-
-       if (!port->interrupt_in_urb) {
-               dev_err(&port->dev, "no interrupt urb\n");
-               return -ENODEV;
-       }
-
-       mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL);
-       if (!mxport)
-               return -ENOMEM;
-
-       spin_lock_init(&mxport->spinlock);
-       mutex_init(&mxport->mutex);
-
-       mxdev = usb_get_serial_data(port->serial);
-
-       switch (mxdev->mxd_model) {
-       case MXU1_1110_PRODUCT_ID:
-       case MXU1_1150_PRODUCT_ID:
-       case MXU1_1151_PRODUCT_ID:
-               mxport->uart_mode = MXU1_UART_232;
-               break;
-       case MXU1_1130_PRODUCT_ID:
-       case MXU1_1131_PRODUCT_ID:
-               mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED;
-               break;
-       }
-
-       usb_set_serial_port_data(port, mxport);
-
-       port->port.closing_wait =
-                       msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10);
-       port->port.drain_delay = 1;
-
-       return 0;
-}
-
-static int mxu1_port_remove(struct usb_serial_port *port)
-{
-       struct mxu1_port *mxport;
-
-       mxport = usb_get_serial_port_data(port);
-       kfree(mxport);
-
-       return 0;
-}
-
-static int mxu1_startup(struct usb_serial *serial)
-{
-       struct mxu1_device *mxdev;
-       struct usb_device *dev = serial->dev;
-       struct usb_host_interface *cur_altsetting;
-       char fw_name[32];
-       const struct firmware *fw_p = NULL;
-       int err;
-
-       dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n",
-               __func__, le16_to_cpu(dev->descriptor.idProduct),
-               dev->descriptor.bNumConfigurations,
-               dev->actconfig->desc.bConfigurationValue);
-
-       /* create device structure */
-       mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL);
-       if (!mxdev)
-               return -ENOMEM;
-
-       usb_set_serial_data(serial, mxdev);
-
-       mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct);
-
-       cur_altsetting = serial->interface->cur_altsetting;
-
-       /* if we have only 1 configuration, download firmware */
-       if (cur_altsetting->desc.bNumEndpoints == 1) {
-
-               snprintf(fw_name,
-                        sizeof(fw_name),
-                        "moxa/moxa-%04x.fw",
-                        mxdev->mxd_model);
-
-               err = request_firmware(&fw_p, fw_name, &serial->interface->dev);
-               if (err) {
-                       dev_err(&serial->interface->dev, "failed to request firmware: %d\n",
-                               err);
-                       goto err_free_mxdev;
-               }
-
-               err = mxu1_download_firmware(serial, fw_p);
-               if (err)
-                       goto err_release_firmware;
-
-               /* device is being reset */
-               err = -ENODEV;
-               goto err_release_firmware;
-       }
-
-       return 0;
-
-err_release_firmware:
-       release_firmware(fw_p);
-err_free_mxdev:
-       kfree(mxdev);
-
-       return err;
-}
-
-static void mxu1_release(struct usb_serial *serial)
-{
-       struct mxu1_device *mxdev;
-
-       mxdev = usb_get_serial_data(serial);
-       kfree(mxdev);
-}
-
-static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
-                          u8 mask, u8 byte)
-{
-       int status;
-       size_t size;
-       struct mxu1_write_data_bytes *data;
-
-       dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n",
-               __func__, addr, mask, byte);
-
-       size = sizeof(struct mxu1_write_data_bytes) + 2;
-       data = kzalloc(size, GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       data->bAddrType = MXU1_RW_DATA_ADDR_XDATA;
-       data->bDataType = MXU1_RW_DATA_BYTE;
-       data->bDataCounter = 1;
-       data->wBaseAddrHi = cpu_to_be16(addr >> 16);
-       data->wBaseAddrLo = cpu_to_be16(addr);
-       data->bData[0] = mask;
-       data->bData[1] = byte;
-
-       status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0,
-                                        MXU1_RAM_PORT, data, size);
-       if (status < 0)
-               dev_err(&port->dev, "%s - failed: %d\n", __func__, status);
-
-       kfree(data);
-
-       return status;
-}
-
-static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr)
-{
-       int status;
-
-       status = mxu1_write_byte(port,
-                                MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR,
-                                MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP,
-                                mcr);
-       return status;
-}
-
-static void mxu1_set_termios(struct tty_struct *tty,
-                            struct usb_serial_port *port,
-                            struct ktermios *old_termios)
-{
-       struct mxu1_port *mxport = usb_get_serial_port_data(port);
-       struct mxu1_uart_config *config;
-       tcflag_t cflag, iflag;
-       speed_t baud;
-       int status;
-       unsigned int mcr;
-
-       cflag = tty->termios.c_cflag;
-       iflag = tty->termios.c_iflag;
-
-       if (old_termios &&
-           !tty_termios_hw_change(&tty->termios, old_termios) &&
-           tty->termios.c_iflag == old_termios->c_iflag) {
-               dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
-               return;
-       }
-
-       dev_dbg(&port->dev,
-               "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag);
-
-       if (old_termios) {
-               dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n",
-                       __func__,
-                       old_termios->c_cflag,
-                       old_termios->c_iflag);
-       }
-
-       config = kzalloc(sizeof(*config), GFP_KERNEL);
-       if (!config)
-               return;
-
-       /* these flags must be set */
-       config->wFlags |= MXU1_UART_ENABLE_MS_INTS;
-       config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA;
-       if (mxport->send_break)
-               config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL;
-       config->bUartMode = mxport->uart_mode;
-
-       switch (C_CSIZE(tty)) {
-       case CS5:
-               config->bDataBits = MXU1_UART_5_DATA_BITS;
-               break;
-       case CS6:
-               config->bDataBits = MXU1_UART_6_DATA_BITS;
-               break;
-       case CS7:
-               config->bDataBits = MXU1_UART_7_DATA_BITS;
-               break;
-       default:
-       case CS8:
-               config->bDataBits = MXU1_UART_8_DATA_BITS;
-               break;
-       }
-
-       if (C_PARENB(tty)) {
-               config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING;
-               if (C_CMSPAR(tty)) {
-                       if (C_PARODD(tty))
-                               config->bParity = MXU1_UART_MARK_PARITY;
-                       else
-                               config->bParity = MXU1_UART_SPACE_PARITY;
-               } else {
-                       if (C_PARODD(tty))
-                               config->bParity = MXU1_UART_ODD_PARITY;
-                       else
-                               config->bParity = MXU1_UART_EVEN_PARITY;
-               }
-       } else {
-               config->bParity = MXU1_UART_NO_PARITY;
-       }
-
-       if (C_CSTOPB(tty))
-               config->bStopBits = MXU1_UART_2_STOP_BITS;
-       else
-               config->bStopBits = MXU1_UART_1_STOP_BITS;
-
-       if (C_CRTSCTS(tty)) {
-               /* RTS flow control must be off to drop RTS for baud rate B0 */
-               if (C_BAUD(tty) != B0)
-                       config->wFlags |= MXU1_UART_ENABLE_RTS_IN;
-               config->wFlags |= MXU1_UART_ENABLE_CTS_OUT;
-       }
-
-       if (I_IXOFF(tty) || I_IXON(tty)) {
-               config->cXon  = START_CHAR(tty);
-               config->cXoff = STOP_CHAR(tty);
-
-               if (I_IXOFF(tty))
-                       config->wFlags |= MXU1_UART_ENABLE_X_IN;
-
-               if (I_IXON(tty))
-                       config->wFlags |= MXU1_UART_ENABLE_X_OUT;
-       }
-
-       baud = tty_get_baud_rate(tty);
-       if (!baud)
-               baud = 9600;
-       config->wBaudRate = MXU1_BAUD_BASE / baud;
-
-       dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
-               __func__, baud, config->wBaudRate, config->wFlags,
-               config->bDataBits, config->bParity, config->bStopBits,
-               config->cXon, config->cXoff, config->bUartMode);
-
-       cpu_to_be16s(&config->wBaudRate);
-       cpu_to_be16s(&config->wFlags);
-
-       status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0,
-                                        MXU1_UART1_PORT, config,
-                                        sizeof(*config));
-       if (status)
-               dev_err(&port->dev, "cannot set config: %d\n", status);
-
-       mutex_lock(&mxport->mutex);
-       mcr = mxport->mcr;
-
-       if (C_BAUD(tty) == B0)
-               mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS);
-       else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
-               mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS;
-
-       status = mxu1_set_mcr(port, mcr);
-       if (status)
-               dev_err(&port->dev, "cannot set modem control: %d\n", status);
-       else
-               mxport->mcr = mcr;
-
-       mutex_unlock(&mxport->mutex);
-
-       kfree(config);
-}
-
-static int mxu1_get_serial_info(struct usb_serial_port *port,
-                               struct serial_struct __user *ret_arg)
-{
-       struct serial_struct ret_serial;
-       unsigned cwait;
-
-       if (!ret_arg)
-               return -EFAULT;
-
-       cwait = port->port.closing_wait;
-       if (cwait != ASYNC_CLOSING_WAIT_NONE)
-               cwait = jiffies_to_msecs(cwait) / 10;
-
-       memset(&ret_serial, 0, sizeof(ret_serial));
-
-       ret_serial.type = PORT_16550A;
-       ret_serial.line = port->minor;
-       ret_serial.port = 0;
-       ret_serial.xmit_fifo_size = port->bulk_out_size;
-       ret_serial.baud_base = MXU1_BAUD_BASE;
-       ret_serial.close_delay = 5*HZ;
-       ret_serial.closing_wait = cwait;
-
-       if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
-               return -EFAULT;
-
-       return 0;
-}
-
-
-static int mxu1_set_serial_info(struct usb_serial_port *port,
-                               struct serial_struct __user *new_arg)
-{
-       struct serial_struct new_serial;
-       unsigned cwait;
-
-       if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
-               return -EFAULT;
-
-       cwait = new_serial.closing_wait;
-       if (cwait != ASYNC_CLOSING_WAIT_NONE)
-               cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
-
-       port->port.closing_wait = cwait;
-
-       return 0;
-}
-
-static int mxu1_ioctl(struct tty_struct *tty,
-                     unsigned int cmd, unsigned long arg)
-{
-       struct usb_serial_port *port = tty->driver_data;
-
-       switch (cmd) {
-       case TIOCGSERIAL:
-               return mxu1_get_serial_info(port,
-                                           (struct serial_struct __user *)arg);
-       case TIOCSSERIAL:
-               return mxu1_set_serial_info(port,
-                                           (struct serial_struct __user *)arg);
-       }
-
-       return -ENOIOCTLCMD;
-}
-
-static int mxu1_tiocmget(struct tty_struct *tty)
-{
-       struct usb_serial_port *port = tty->driver_data;
-       struct mxu1_port *mxport = usb_get_serial_port_data(port);
-       unsigned int result;
-       unsigned int msr;
-       unsigned int mcr;
-       unsigned long flags;
-
-       mutex_lock(&mxport->mutex);
-       spin_lock_irqsave(&mxport->spinlock, flags);
-
-       msr = mxport->msr;
-       mcr = mxport->mcr;
-
-       spin_unlock_irqrestore(&mxport->spinlock, flags);
-       mutex_unlock(&mxport->mutex);
-
-       result = ((mcr & MXU1_MCR_DTR)  ? TIOCM_DTR     : 0) |
-                ((mcr & MXU1_MCR_RTS)  ? TIOCM_RTS     : 0) |
-                ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP    : 0) |
-                ((msr & MXU1_MSR_CTS)  ? TIOCM_CTS     : 0) |
-                ((msr & MXU1_MSR_CD)   ? TIOCM_CAR     : 0) |
-                ((msr & MXU1_MSR_RI)   ? TIOCM_RI      : 0) |
-                ((msr & MXU1_MSR_DSR)  ? TIOCM_DSR     : 0);
-
-       dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
-
-       return result;
-}
-
-static int mxu1_tiocmset(struct tty_struct *tty,
-                        unsigned int set, unsigned int clear)
-{
-       struct usb_serial_port *port = tty->driver_data;
-       struct mxu1_port *mxport = usb_get_serial_port_data(port);
-       int err;
-       unsigned int mcr;
-
-       mutex_lock(&mxport->mutex);
-       mcr = mxport->mcr;
-
-       if (set & TIOCM_RTS)
-               mcr |= MXU1_MCR_RTS;
-       if (set & TIOCM_DTR)
-               mcr |= MXU1_MCR_DTR;
-       if (set & TIOCM_LOOP)
-               mcr |= MXU1_MCR_LOOP;
-
-       if (clear & TIOCM_RTS)
-               mcr &= ~MXU1_MCR_RTS;
-       if (clear & TIOCM_DTR)
-               mcr &= ~MXU1_MCR_DTR;
-       if (clear & TIOCM_LOOP)
-               mcr &= ~MXU1_MCR_LOOP;
-
-       err = mxu1_set_mcr(port, mcr);
-       if (!err)
-               mxport->mcr = mcr;
-
-       mutex_unlock(&mxport->mutex);
-
-       return err;
-}
-
-static void mxu1_break(struct tty_struct *tty, int break_state)
-{
-       struct usb_serial_port *port = tty->driver_data;
-       struct mxu1_port *mxport = usb_get_serial_port_data(port);
-
-       if (break_state == -1)
-               mxport->send_break = true;
-       else
-               mxport->send_break = false;
-
-       mxu1_set_termios(tty, port, NULL);
-}
-
-static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
-       struct mxu1_port *mxport = usb_get_serial_port_data(port);
-       struct usb_serial *serial = port->serial;
-       int status;
-       u16 open_settings;
-
-       open_settings = (MXU1_PIPE_MODE_CONTINUOUS |
-                        MXU1_PIPE_TIMEOUT_ENABLE |
-                        (MXU1_TRANSFER_TIMEOUT << 2));
-
-       mxport->msr = 0;
-
-       status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
-       if (status) {
-               dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
-                       status);
-               return status;
-       }
-
-       if (tty)
-               mxu1_set_termios(tty, port, NULL);
-
-       status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
-                                   open_settings, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "cannot send open command: %d\n", status);
-               goto unlink_int_urb;
-       }
-
-       status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
-                                   0, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "cannot send start command: %d\n", status);
-               goto unlink_int_urb;
-       }
-
-       status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
-                                   MXU1_PURGE_INPUT, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "cannot clear input buffers: %d\n",
-                       status);
-
-               goto unlink_int_urb;
-       }
-
-       status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
-                                   MXU1_PURGE_OUTPUT, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "cannot clear output buffers: %d\n",
-                       status);
-
-               goto unlink_int_urb;
-       }
-
-       /*
-        * reset the data toggle on the bulk endpoints to work around bug in
-        * host controllers where things get out of sync some times
-        */
-       usb_clear_halt(serial->dev, port->write_urb->pipe);
-       usb_clear_halt(serial->dev, port->read_urb->pipe);
-
-       if (tty)
-               mxu1_set_termios(tty, port, NULL);
-
-       status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
-                                   open_settings, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "cannot send open command: %d\n", status);
-               goto unlink_int_urb;
-       }
-
-       status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
-                                   0, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "cannot send start command: %d\n", status);
-               goto unlink_int_urb;
-       }
-
-       status = usb_serial_generic_open(tty, port);
-       if (status)
-               goto unlink_int_urb;
-
-       return 0;
-
-unlink_int_urb:
-       usb_kill_urb(port->interrupt_in_urb);
-
-       return status;
-}
-
-static void mxu1_close(struct usb_serial_port *port)
-{
-       int status;
-
-       usb_serial_generic_close(port);
-       usb_kill_urb(port->interrupt_in_urb);
-
-       status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT,
-                                   0, MXU1_UART1_PORT);
-       if (status) {
-               dev_err(&port->dev, "failed to send close port command: %d\n",
-                       status);
-       }
-}
-
-static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr)
-{
-       struct mxu1_port *mxport = usb_get_serial_port_data(port);
-       struct async_icount *icount;
-       unsigned long flags;
-
-       dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr);
-
-       spin_lock_irqsave(&mxport->spinlock, flags);
-       mxport->msr = msr & MXU1_MSR_MASK;
-       spin_unlock_irqrestore(&mxport->spinlock, flags);
-
-       if (msr & MXU1_MSR_DELTA_MASK) {
-               icount = &port->icount;
-               if (msr & MXU1_MSR_DELTA_CTS)
-                       icount->cts++;
-               if (msr & MXU1_MSR_DELTA_DSR)
-                       icount->dsr++;
-               if (msr & MXU1_MSR_DELTA_CD)
-                       icount->dcd++;
-               if (msr & MXU1_MSR_DELTA_RI)
-                       icount->rng++;
-
-               wake_up_interruptible(&port->port.delta_msr_wait);
-       }
-}
-
-static void mxu1_interrupt_callback(struct urb *urb)
-{
-       struct usb_serial_port *port = urb->context;
-       unsigned char *data = urb->transfer_buffer;
-       int length = urb->actual_length;
-       int function;
-       int status;
-       u8 msr;
-
-       switch (urb->status) {
-       case 0:
-               break;
-       case -ECONNRESET:
-       case -ENOENT:
-       case -ESHUTDOWN:
-               dev_dbg(&port->dev, "%s - urb shutting down: %d\n",
-                       __func__, urb->status);
-               return;
-       default:
-               dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
-                       __func__, urb->status);
-               goto exit;
-       }
-
-       if (length != 2) {
-               dev_dbg(&port->dev, "%s - bad packet size: %d\n",
-                       __func__, length);
-               goto exit;
-       }
-
-       if (data[0] == MXU1_CODE_HARDWARE_ERROR) {
-               dev_err(&port->dev, "hardware error: %d\n", data[1]);
-               goto exit;
-       }
-
-       function = mxu1_get_func_from_code(data[0]);
-
-       dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n",
-                __func__, function, data[1]);
-
-       switch (function) {
-       case MXU1_CODE_DATA_ERROR:
-               dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n",
-                        __func__, data[1]);
-               break;
-
-       case MXU1_CODE_MODEM_STATUS:
-               msr = data[1];
-               mxu1_handle_new_msr(port, msr);
-               break;
-
-       default:
-               dev_err(&port->dev, "unknown interrupt code: 0x%02X\n",
-                       data[1]);
-               break;
-       }
-
-exit:
-       status = usb_submit_urb(urb, GFP_ATOMIC);
-       if (status) {
-               dev_err(&port->dev, "resubmit interrupt urb failed: %d\n",
-                       status);
-       }
-}
-
-static struct usb_serial_driver mxu11x0_device = {
-       .driver = {
-               .owner          = THIS_MODULE,
-               .name           = "mxu11x0",
-       },
-       .description            = "MOXA UPort 11x0",
-       .id_table               = mxu1_idtable,
-       .num_ports              = 1,
-       .port_probe             = mxu1_port_probe,
-       .port_remove            = mxu1_port_remove,
-       .attach                 = mxu1_startup,
-       .release                = mxu1_release,
-       .open                   = mxu1_open,
-       .close                  = mxu1_close,
-       .ioctl                  = mxu1_ioctl,
-       .set_termios            = mxu1_set_termios,
-       .tiocmget               = mxu1_tiocmget,
-       .tiocmset               = mxu1_tiocmset,
-       .tiocmiwait             = usb_serial_generic_tiocmiwait,
-       .get_icount             = usb_serial_generic_get_icount,
-       .break_ctl              = mxu1_break,
-       .read_int_callback      = mxu1_interrupt_callback,
-};
-
-static struct usb_serial_driver *const serial_drivers[] = {
-       &mxu11x0_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, mxu1_idtable);
-
-MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
-MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("moxa/moxa-1110.fw");
-MODULE_FIRMWARE("moxa/moxa-1130.fw");
-MODULE_FIRMWARE("moxa/moxa-1131.fw");
-MODULE_FIRMWARE("moxa/moxa-1150.fw");
-MODULE_FIRMWARE("moxa/moxa-1151.fw");
index db86e51..348e198 100644 (file)
@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_UE910_V2                 0x1012
 #define TELIT_PRODUCT_LE922_USBCFG0            0x1042
 #define TELIT_PRODUCT_LE922_USBCFG3            0x1043
+#define TELIT_PRODUCT_LE922_USBCFG5            0x1045
 #define TELIT_PRODUCT_LE920                    0x1200
 #define TELIT_PRODUCT_LE910                    0x1201
 
@@ -315,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
 #define TOSHIBA_PRODUCT_G450                   0x0d45
 
 #define ALINK_VENDOR_ID                                0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E                        0x9001 /* Yes, ALINK_VENDOR_ID */
 #define ALINK_PRODUCT_PH300                    0x9100
 #define ALINK_PRODUCT_3GU                      0x9200
 
@@ -607,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
        .reserved = BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+       .reserved = BIT(5) | BIT(6),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
        .sendsetup = BIT(0),
        .reserved = BIT(1) | BIT(2),
@@ -1122,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+       { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+         .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1176,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
                .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
                .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1645,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
        { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
        { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+         .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
        },
index 9919d2a..1bc6089 100644 (file)
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x9056)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9060)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9061)},   /* Sierra Wireless Modem */
-       {DEVICE_SWI(0x1199, 0x9070)},   /* Sierra Wireless MC74xx/EM74xx */
-       {DEVICE_SWI(0x1199, 0x9071)},   /* Sierra Wireless MC74xx/EM74xx */
+       {DEVICE_SWI(0x1199, 0x9070)},   /* Sierra Wireless MC74xx */
+       {DEVICE_SWI(0x1199, 0x9071)},   /* Sierra Wireless MC74xx */
+       {DEVICE_SWI(0x1199, 0x9078)},   /* Sierra Wireless EM74xx */
+       {DEVICE_SWI(0x1199, 0x9079)},   /* Sierra Wireless EM74xx */
        {DEVICE_SWI(0x413c, 0x81a2)},   /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a3)},   /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a8)},   /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 2760a7b..8c80a48 100644 (file)
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
                info.num_regions = VFIO_PCI_NUM_REGIONS;
                info.num_irqs = VFIO_PCI_NUM_IRQS;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
                struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
                        return -EINVAL;
                }
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
                struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
                else
                        info.flags |= VFIO_IRQ_INFO_NORESIZE;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_SET_IRQS) {
                struct vfio_irq_set hdr;
index 418cdd9..e65b142 100644 (file)
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
                info.num_regions = vdev->num_regions;
                info.num_irqs = vdev->num_irqs;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
                struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
                info.size = vdev->regions[info.index].size;
                info.flags = vdev->regions[info.index].flags;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
                struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
                info.flags = vdev->irqs[info.index].flags;
                info.count = vdev->irqs[info.index].count;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_SET_IRQS) {
                struct vfio_irq_set hdr;
index 6f1ea3d..75b24e9 100644 (file)
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 
                info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_IOMMU_MAP_DMA) {
                struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
                if (ret)
                        return ret;
 
-               return copy_to_user((void __user *)arg, &unmap, minsz);
+               return copy_to_user((void __user *)arg, &unmap, minsz) ?
+                       -EFAULT : 0;
        }
 
        return -ENOTTY;
index ad2146a..236553e 100644 (file)
@@ -1156,6 +1156,8 @@ int vhost_init_used(struct vhost_virtqueue *vq)
 {
        __virtio16 last_used_idx;
        int r;
+       bool is_le = vq->is_le;
+
        if (!vq->private_data) {
                vq->is_le = virtio_legacy_is_little_endian();
                return 0;
@@ -1165,15 +1167,20 @@ int vhost_init_used(struct vhost_virtqueue *vq)
 
        r = vhost_update_used_flags(vq);
        if (r)
-               return r;
+               goto err;
        vq->signalled_used_valid = false;
-       if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
-               return -EFAULT;
+       if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
+               r = -EFAULT;
+               goto err;
+       }
        r = __get_user(last_used_idx, &vq->used->idx);
        if (r)
-               return r;
+               goto err;
        vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
        return 0;
+err:
+       vq->is_le = is_le;
+       return r;
 }
 EXPORT_SYMBOL_GPL(vhost_init_used);
 
index 92f3949..6e92917 100644 (file)
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
        }
 
        if (!err) {
+               ops->cur_blink_jiffies = HZ / 5;
                info->fbcon_par = ops;
 
                if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
        ops->currcon = -1;
        ops->graphics = 1;
        ops->cur_rotate = -1;
+       ops->cur_blink_jiffies = HZ / 5;
        info->fbcon_par = ops;
        p->con_rotate = initial_rotation;
        set_blitting_type(vc, info);
index c0c11fa..7760fc1 100644 (file)
@@ -679,7 +679,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
 
        pci_read_config_dword(pci_dev,
                              notify + offsetof(struct virtio_pci_notify_cap,
-                                               cap.length),
+                                               cap.offset),
                              &notify_offset);
 
        /* We don't know how many VQs we'll map, ahead of the time.
index 0f6d851..80825a7 100644 (file)
@@ -1569,6 +1569,17 @@ config WATCHDOG_RIO
          machines.  The watchdog timeout period is normally one minute but
          can be changed with a boot-time parameter.
 
+config WATCHDOG_SUN4V
+       tristate "Sun4v Watchdog support"
+       select WATCHDOG_CORE
+       depends on SPARC64
+       help
+         Say Y here to support the hypervisor watchdog capability embedded
+         in the SPARC sun4v architecture.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sun4v_wdt.
+
 # XTENSA Architecture
 
 # Xen Architecture
index f566753..f6a6a38 100644 (file)
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
 
 obj-$(CONFIG_WATCHDOG_RIO)             += riowd.o
 obj-$(CONFIG_WATCHDOG_CP1XXX)          += cpwd.o
+obj-$(CONFIG_WATCHDOG_SUN4V)           += sun4v_wdt.o
 
 # XTENSA Architecture
 
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644 (file)
index 0000000..1467fe5
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ *     sun4v watchdog timer
+ *     (c) Copyright 2016 Oracle Corporation
+ *
+ *     Implement a simple watchdog driver using the built-in sun4v hypervisor
+ *     watchdog support. If time expires, the hypervisor stops or bounces
+ *     the guest domain.
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/watchdog.h>
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#define WDT_TIMEOUT                    60
+#define WDT_MAX_TIMEOUT                        31536000
+#define WDT_MIN_TIMEOUT                        1
+#define WDT_DEFAULT_RESOLUTION_MS      1000    /* 1 second */
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+       __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+       __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int sun4v_wdt_stop(struct watchdog_device *wdd)
+{
+       sun4v_mach_set_watchdog(0, NULL);
+
+       return 0;
+}
+
+static int sun4v_wdt_ping(struct watchdog_device *wdd)
+{
+       int hverr;
+
+       /*
+        * HV watchdog timer will round up the timeout
+        * passed in to the nearest multiple of the
+        * watchdog resolution in milliseconds.
+        */
+       hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
+       if (hverr == HV_EINVAL)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
+                                unsigned int timeout)
+{
+       wdd->timeout = timeout;
+
+       return 0;
+}
+
+static const struct watchdog_info sun4v_wdt_ident = {
+       .options =      WDIOF_SETTIMEOUT |
+                       WDIOF_MAGICCLOSE |
+                       WDIOF_KEEPALIVEPING,
+       .identity =     "sun4v hypervisor watchdog",
+       .firmware_version = 0,
+};
+
+static struct watchdog_ops sun4v_wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        sun4v_wdt_ping,
+       .stop =         sun4v_wdt_stop,
+       .ping =         sun4v_wdt_ping,
+       .set_timeout =  sun4v_wdt_set_timeout,
+};
+
+static struct watchdog_device wdd = {
+       .info = &sun4v_wdt_ident,
+       .ops = &sun4v_wdt_ops,
+       .min_timeout = WDT_MIN_TIMEOUT,
+       .max_timeout = WDT_MAX_TIMEOUT,
+       .timeout = WDT_TIMEOUT,
+};
+
+static int __init sun4v_wdt_init(void)
+{
+       struct mdesc_handle *handle;
+       u64 node;
+       const u64 *value;
+       int err = 0;
+       unsigned long major = 1, minor = 1;
+
+       /*
+        * There are 2 properties that can be set from the control
+        * domain for the watchdog.
+        * watchdog-resolution
+        * watchdog-max-timeout
+        *
+        * We can expect a handle to be returned otherwise something
+        * serious is wrong. Correct to return -ENODEV here.
+        */
+
+       handle = mdesc_grab();
+       if (!handle)
+               return -ENODEV;
+
+       node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
+       err = -ENODEV;
+       if (node == MDESC_NODE_NULL)
+               goto out_release;
+
+       /*
+        * This is a safe way to validate if we are on the right
+        * platform.
+        */
+       if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
+               goto out_hv_unreg;
+
+       /* Allow value of watchdog-resolution up to 1s (default) */
+       value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
+       err = -EINVAL;
+       if (value) {
+               if (*value == 0 ||
+                   *value > WDT_DEFAULT_RESOLUTION_MS)
+                       goto out_hv_unreg;
+       }
+
+       value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
+       if (value) {
+               /*
+                * If the property value (in ms) is smaller than
+                * min_timeout, return -EINVAL.
+                */
+               if (*value < wdd.min_timeout * 1000)
+                       goto out_hv_unreg;
+
+               /*
+                * If the property value is smaller than
+                * default max_timeout  then set watchdog max_timeout to
+                * the value of the property in seconds.
+                */
+               if (*value < wdd.max_timeout * 1000)
+                       wdd.max_timeout = *value  / 1000;
+       }
+
+       watchdog_init_timeout(&wdd, timeout, NULL);
+
+       watchdog_set_nowayout(&wdd, nowayout);
+
+       err = watchdog_register_device(&wdd);
+       if (err)
+               goto out_hv_unreg;
+
+       pr_info("initialized (timeout=%ds, nowayout=%d)\n",
+                wdd.timeout, nowayout);
+
+       mdesc_release(handle);
+
+       return 0;
+
+out_hv_unreg:
+       sun4v_hvapi_unregister(HV_GRP_CORE);
+
+out_release:
+       mdesc_release(handle);
+       return err;
+}
+
+static void __exit sun4v_wdt_exit(void)
+{
+       sun4v_hvapi_unregister(HV_GRP_CORE);
+       watchdog_unregister_device(&wdd);
+}
+
+module_init(sun4v_wdt_init);
+module_exit(sun4v_wdt_exit);
+
+MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
+MODULE_DESCRIPTION("sun4v watchdog driver");
+MODULE_LICENSE("GPL");
index 73dafdc..fb02214 100644 (file)
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
        /*
         * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
         * to access the BARs where the MSI-X entries reside.
+        * But VF devices are unique in which the PF needs to be checked.
         */
-       pci_read_config_word(dev, PCI_COMMAND, &cmd);
+       pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
        if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
                return -ENXIO;
 
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
        struct xen_pcibk_dev_data *dev_data = NULL;
        struct xen_pci_op *op = &pdev->op;
        int test_intx = 0;
+#ifdef CONFIG_PCI_MSI
+       unsigned int nr = 0;
+#endif
 
        *op = pdev->sh_info->op;
        barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
                        op->err = xen_pcibk_disable_msi(pdev, dev, op);
                        break;
                case XEN_PCI_OP_enable_msix:
+                       nr = op->value;
                        op->err = xen_pcibk_enable_msix(pdev, dev, op);
                        break;
                case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
        if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
                unsigned int i;
 
-               for (i = 0; i < op->value; i++)
+               for (i = 0; i < nr; i++)
                        pdev->sh_info->op.msix_entries[i].vector =
                                op->msix_entries[i].vector;
        }
index ad4eb10..c46ee18 100644 (file)
@@ -848,6 +848,24 @@ static int scsiback_map(struct vscsibk_info *info)
        return scsiback_init_sring(info, ring_ref, evtchn);
 }
 
+/*
+  Check for a translation entry being present
+*/
+static struct v2p_entry *scsiback_chk_translation_entry(
+       struct vscsibk_info *info, struct ids_tuple *v)
+{
+       struct list_head *head = &(info->v2p_entry_lists);
+       struct v2p_entry *entry;
+
+       list_for_each_entry(entry, head, l)
+               if ((entry->v.chn == v->chn) &&
+                   (entry->v.tgt == v->tgt) &&
+                   (entry->v.lun == v->lun))
+                       return entry;
+
+       return NULL;
+}
+
 /*
   Add a new translation entry
 */
@@ -855,9 +873,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
                                          char *phy, struct ids_tuple *v)
 {
        int err = 0;
-       struct v2p_entry *entry;
        struct v2p_entry *new;
-       struct list_head *head = &(info->v2p_entry_lists);
        unsigned long flags;
        char *lunp;
        unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
        spin_lock_irqsave(&info->v2p_lock, flags);
 
        /* Check double assignment to identical virtual ID */
-       list_for_each_entry(entry, head, l) {
-               if ((entry->v.chn == v->chn) &&
-                   (entry->v.tgt == v->tgt) &&
-                   (entry->v.lun == v->lun)) {
-                       pr_warn("Virtual ID is already used. Assignment was not performed.\n");
-                       err = -EEXIST;
-                       goto out;
-               }
-
+       if (scsiback_chk_translation_entry(info, v)) {
+               pr_warn("Virtual ID is already used. Assignment was not performed.\n");
+               err = -EEXIST;
+               goto out;
        }
 
        /* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
        new->v = *v;
        new->tpg = tpg;
        new->lun = unpacked_lun;
-       list_add_tail(&new->l, head);
+       list_add_tail(&new->l, &info->v2p_entry_lists);
 
 out:
        spin_unlock_irqrestore(&info->v2p_lock, flags);
 
 out_free:
-       mutex_lock(&tpg->tv_tpg_mutex);
-       tpg->tv_tpg_fe_count--;
-       mutex_unlock(&tpg->tv_tpg_mutex);
-
-       if (err)
+       if (err) {
+               mutex_lock(&tpg->tv_tpg_mutex);
+               tpg->tv_tpg_fe_count--;
+               mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(new);
+       }
 
        return err;
 }
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
 }
 
 /*
-  Delete the translation entry specfied
+  Delete the translation entry specified
 */
 static int scsiback_del_translation_entry(struct vscsibk_info *info,
                                          struct ids_tuple *v)
 {
        struct v2p_entry *entry;
-       struct list_head *head = &(info->v2p_entry_lists);
        unsigned long flags;
+       int ret = 0;
 
        spin_lock_irqsave(&info->v2p_lock, flags);
        /* Find out the translation entry specified */
-       list_for_each_entry(entry, head, l) {
-               if ((entry->v.chn == v->chn) &&
-                   (entry->v.tgt == v->tgt) &&
-                   (entry->v.lun == v->lun)) {
-                       goto found;
-               }
-       }
-
-       spin_unlock_irqrestore(&info->v2p_lock, flags);
-       return 1;
-
-found:
-       /* Delete the translation entry specfied */
-       __scsiback_del_translation_entry(entry);
+       entry = scsiback_chk_translation_entry(info, v);
+       if (entry)
+               __scsiback_del_translation_entry(entry);
+       else
+               ret = -ENOENT;
 
        spin_unlock_irqrestore(&info->v2p_lock, flags);
-       return 0;
+       return ret;
 }
 
 static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
                                char *phy, struct ids_tuple *vir, int try)
 {
+       struct v2p_entry *entry;
+       unsigned long flags;
+
+       if (try) {
+               spin_lock_irqsave(&info->v2p_lock, flags);
+               entry = scsiback_chk_translation_entry(info, vir);
+               spin_unlock_irqrestore(&info->v2p_lock, flags);
+               if (entry)
+                       return;
+       }
        if (!scsiback_add_translation_entry(info, phy, vir)) {
                if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
                                  "%d", XenbusStateInitialised)) {
index 9433e46..912b64e 100644 (file)
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
 
        if (len == 0)
                return 0;
+       if (len > XENSTORE_PAYLOAD_MAX)
+               return -EINVAL;
 
        rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
        if (rb == NULL)
index 0548c53..22fc7c8 100644 (file)
@@ -511,8 +511,6 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
        pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
                 page->index, to);
        BUG_ON(to > PAGE_CACHE_SIZE);
-       kmap(page);
-       data = page_address(page);
        bsize = AFFS_SB(sb)->s_data_blksize;
        tmp = page->index << PAGE_CACHE_SHIFT;
        bidx = tmp / bsize;
@@ -524,14 +522,15 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
                        return PTR_ERR(bh);
                tmp = min(bsize - boff, to - pos);
                BUG_ON(pos + tmp > to || tmp > bsize);
+               data = kmap_atomic(page);
                memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
+               kunmap_atomic(data);
                affs_brelse(bh);
                bidx++;
                pos += tmp;
                boff = 0;
        }
        flush_dcache_page(page);
-       kunmap(page);
        return 0;
 }
 
index 051ea48..7d914c6 100644 (file)
@@ -653,7 +653,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
 
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               random_variable = (unsigned long) get_random_int();
+               random_variable = get_random_long();
                random_variable &= STACK_RND_MASK;
                random_variable <<= PAGE_SHIFT;
        }
index 39b3a17..826b164 100644 (file)
@@ -1201,7 +1201,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                bdev->bd_disk = disk;
                bdev->bd_queue = disk->queue;
                bdev->bd_contains = bdev;
-               bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
+               if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
+                       bdev->bd_inode->i_flags = S_DAX;
+               else
+                       bdev->bd_inode->i_flags = 0;
+
                if (!partno) {
                        ret = -ENXIO;
                        bdev->bd_part = disk_get_part(disk, partno);
@@ -1693,13 +1697,24 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
        return try_to_free_buffers(page);
 }
 
+static int blkdev_writepages(struct address_space *mapping,
+                            struct writeback_control *wbc)
+{
+       if (dax_mapping(mapping)) {
+               struct block_device *bdev = I_BDEV(mapping->host);
+
+               return dax_writeback_mapping_range(mapping, bdev, wbc);
+       }
+       return generic_writepages(mapping, wbc);
+}
+
 static const struct address_space_operations def_blk_aops = {
        .readpage       = blkdev_readpage,
        .readpages      = blkdev_readpages,
        .writepage      = blkdev_writepage,
        .write_begin    = blkdev_write_begin,
        .write_end      = blkdev_write_end,
-       .writepages     = generic_writepages,
+       .writepages     = blkdev_writepages,
        .releasepage    = blkdev_releasepage,
        .direct_IO      = blkdev_direct_IO,
        .is_dirty_writeback = buffer_check_dirty_writeback,
index 7cf8509..2c849b0 100644 (file)
@@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
 
                err = btrfs_insert_fs_root(root->fs_info, root);
+               /*
+                * The root might have been inserted already, as before we look
+                * for orphan roots, log replay might have happened, which
+                * triggers a transaction commit and qgroup accounting, which
+                * in turn reads and inserts fs roots while doing backref
+                * walking.
+                */
+               if (err == -EEXIST)
+                       err = 0;
                if (err) {
-                       BUG_ON(err == -EEXIST);
                        btrfs_free_fs_root(root);
                        break;
                }
index c222137..19adeb0 100644 (file)
@@ -1756,6 +1756,10 @@ int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
        u32 pool;
        int ret, flags;
 
+       /* does not support pool namespace yet */
+       if (ci->i_pool_ns_len)
+               return -EIO;
+
        if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
                                NOPOOLPERM))
                return 0;
index cdbf8cf..6fe0ad2 100644 (file)
@@ -2753,7 +2753,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
                             void *inline_data, int inline_len,
                             struct ceph_buffer *xattr_buf,
                             struct ceph_mds_session *session,
-                            struct ceph_cap *cap, int issued)
+                            struct ceph_cap *cap, int issued,
+                            u32 pool_ns_len)
        __releases(ci->i_ceph_lock)
        __releases(mdsc->snap_rwsem)
 {
@@ -2873,6 +2874,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
                /* file layout may have changed */
                ci->i_layout = grant->layout;
+               ci->i_pool_ns_len = pool_ns_len;
+
                /* size/truncate_seq? */
                queue_trunc = ceph_fill_file_size(inode, issued,
                                        le32_to_cpu(grant->truncate_seq),
@@ -3411,6 +3414,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        u32  inline_len = 0;
        void *snaptrace;
        size_t snaptrace_len;
+       u32 pool_ns_len = 0;
        void *p, *end;
 
        dout("handle_caps from mds%d\n", mds);
@@ -3463,6 +3467,21 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                p += inline_len;
        }
 
+       if (le16_to_cpu(msg->hdr.version) >= 8) {
+               u64 flush_tid;
+               u32 caller_uid, caller_gid;
+               u32 osd_epoch_barrier;
+               /* version >= 5 */
+               ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
+               /* version >= 6 */
+               ceph_decode_64_safe(&p, end, flush_tid, bad);
+               /* version >= 7 */
+               ceph_decode_32_safe(&p, end, caller_uid, bad);
+               ceph_decode_32_safe(&p, end, caller_gid, bad);
+               /* version >= 8 */
+               ceph_decode_32_safe(&p, end, pool_ns_len, bad);
+       }
+
        /* lookup ino */
        inode = ceph_find_inode(sb, vino);
        ci = ceph_inode(inode);
@@ -3518,7 +3537,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                                  &cap, &issued);
                handle_cap_grant(mdsc, inode, h,
                                 inline_version, inline_data, inline_len,
-                                msg->middle, session, cap, issued);
+                                msg->middle, session, cap, issued,
+                                pool_ns_len);
                if (realm)
                        ceph_put_snap_realm(mdsc, realm);
                goto done_unlocked;
@@ -3542,7 +3562,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                issued |= __ceph_caps_dirty(ci);
                handle_cap_grant(mdsc, inode, h,
                                 inline_version, inline_data, inline_len,
-                                msg->middle, session, cap, issued);
+                                msg->middle, session, cap, issued,
+                                pool_ns_len);
                goto done_unlocked;
 
        case CEPH_CAP_OP_FLUSH_ACK:
index fb4ba2e..5849b88 100644 (file)
@@ -396,6 +396,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        ci->i_symlink = NULL;
 
        memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
+       ci->i_pool_ns_len = 0;
 
        ci->i_fragtree = RB_ROOT;
        mutex_init(&ci->i_fragtree_mutex);
@@ -756,6 +757,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
                if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
                        ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
                ci->i_layout = info->layout;
+               ci->i_pool_ns_len = iinfo->pool_ns_len;
 
                queue_trunc = ceph_fill_file_size(inode, issued,
                                        le32_to_cpu(info->truncate_seq),
index e7b130a..911d64d 100644 (file)
@@ -100,6 +100,14 @@ static int parse_reply_info_in(void **p, void *end,
        } else
                info->inline_version = CEPH_INLINE_NONE;
 
+       if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
+               ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
+               ceph_decode_need(p, end, info->pool_ns_len, bad);
+               *p += info->pool_ns_len;
+       } else {
+               info->pool_ns_len = 0;
+       }
+
        return 0;
 bad:
        return err;
@@ -2298,6 +2306,14 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
                ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
                                  CEPH_CAP_PIN);
 
+       /* deny access to directories with pool_ns layouts */
+       if (req->r_inode && S_ISDIR(req->r_inode->i_mode) &&
+           ceph_inode(req->r_inode)->i_pool_ns_len)
+               return -EIO;
+       if (req->r_locked_dir &&
+           ceph_inode(req->r_locked_dir)->i_pool_ns_len)
+               return -EIO;
+
        /* issue */
        mutex_lock(&mdsc->mutex);
        __register_request(mdsc, req, dir);
index ccf11ef..37712cc 100644 (file)
@@ -44,6 +44,7 @@ struct ceph_mds_reply_info_in {
        u64 inline_version;
        u32 inline_len;
        char *inline_data;
+       u32 pool_ns_len;
 };
 
 /*
index 75b7d12..9c458eb 100644 (file)
@@ -287,6 +287,7 @@ struct ceph_inode_info {
 
        struct ceph_dir_layout i_dir_layout;
        struct ceph_file_layout i_layout;
+       size_t i_pool_ns_len;
        char *i_symlink;
 
        /* for dirs */
index c48ca13..2eea403 100644 (file)
@@ -1013,7 +1013,6 @@ const struct file_operations cifs_file_strict_ops = {
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
        .clone_file_range = cifs_clone_file_range,
-       .clone_file_range = cifs_clone_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
 };
index 68c4547..83aac8b 100644 (file)
  * so that it will fit. We use hash_64 to convert the value to 31 bits, and
  * then add 1, to ensure that we don't end up with a 0 as the value.
  */
-#if BITS_PER_LONG == 64
 static inline ino_t
 cifs_uniqueid_to_ino_t(u64 fileid)
 {
+       if ((sizeof(ino_t)) < (sizeof(u64)))
+               return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+
        return (ino_t)fileid;
+
 }
-#else
-static inline ino_t
-cifs_uniqueid_to_ino_t(u64 fileid)
-{
-       return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
-}
-#endif
 
 extern struct file_system_type cifs_fs_type;
 extern const struct address_space_operations cifs_addr_ops;
index 90b4f9f..76fcb50 100644 (file)
@@ -1396,11 +1396,10 @@ openRetry:
  * current bigbuf.
  */
 static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+discard_remaining_data(struct TCP_Server_Info *server)
 {
        unsigned int rfclen = get_rfc1002_length(server->smallbuf);
        int remaining = rfclen + 4 - server->total_read;
-       struct cifs_readdata *rdata = mid->callback_data;
 
        while (remaining > 0) {
                int length;
@@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                remaining -= length;
        }
 
-       dequeue_mid(mid, rdata->result);
        return 0;
 }
 
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+       int length;
+       struct cifs_readdata *rdata = mid->callback_data;
+
+       length = discard_remaining_data(server);
+       dequeue_mid(mid, rdata->result);
+       return length;
+}
+
 int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
@@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                return length;
        server->total_read += length;
 
+       if (server->ops->is_status_pending &&
+           server->ops->is_status_pending(buf, server, 0)) {
+               discard_remaining_data(server);
+               return -1;
+       }
+
        /* Was the SMB read successful? */
        rdata->result = server->ops->map_error(buf, false);
        if (rdata->result != 0) {
index 10f8d5c..42e1f44 100644 (file)
@@ -1106,21 +1106,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
 {
        char *data_offset;
        struct create_context *cc;
-       unsigned int next = 0;
+       unsigned int next;
+       unsigned int remaining;
        char *name;
 
        data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
+       remaining = le32_to_cpu(rsp->CreateContextsLength);
        cc = (struct create_context *)data_offset;
-       do {
-               cc = (struct create_context *)((char *)cc + next);
+       while (remaining >= sizeof(struct create_context)) {
                name = le16_to_cpu(cc->NameOffset) + (char *)cc;
-               if (le16_to_cpu(cc->NameLength) != 4 ||
-                   strncmp(name, "RqLs", 4)) {
-                       next = le32_to_cpu(cc->Next);
-                       continue;
-               }
-               return server->ops->parse_lease_buf(cc, epoch);
-       } while (next != 0);
+               if (le16_to_cpu(cc->NameLength) == 4 &&
+                   strncmp(name, "RqLs", 4) == 0)
+                       return server->ops->parse_lease_buf(cc, epoch);
+
+               next = le32_to_cpu(cc->Next);
+               if (!next)
+                       break;
+               remaining -= next;
+               cc = (struct create_context *)((char *)cc + next);
+       }
 
        return 0;
 }
index fc2e314..7111724 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -79,15 +79,14 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
 }
 
 /*
- * dax_clear_blocks() is called from within transaction context from XFS,
+ * dax_clear_sectors() is called from within transaction context from XFS,
  * and hence this means the stack from this point must follow GFP_NOFS
  * semantics for all operations.
  */
-int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
 {
-       struct block_device *bdev = inode->i_sb->s_bdev;
        struct blk_dax_ctl dax = {
-               .sector = block << (inode->i_blkbits - 9),
+               .sector = _sector,
                .size = _size,
        };
 
@@ -109,7 +108,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
        wmb_pmem();
        return 0;
 }
-EXPORT_SYMBOL_GPL(dax_clear_blocks);
+EXPORT_SYMBOL_GPL(dax_clear_sectors);
 
 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
 static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
@@ -485,11 +484,10 @@ static int dax_writeback_one(struct block_device *bdev,
  * end]. This is required by data integrity operations to ensure file data is
  * on persistent storage prior to completion of the operation.
  */
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
-               loff_t end)
+int dax_writeback_mapping_range(struct address_space *mapping,
+               struct block_device *bdev, struct writeback_control *wbc)
 {
        struct inode *inode = mapping->host;
-       struct block_device *bdev = inode->i_sb->s_bdev;
        pgoff_t start_index, end_index, pmd_index;
        pgoff_t indices[PAGEVEC_SIZE];
        struct pagevec pvec;
@@ -500,8 +498,11 @@ int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
        if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
                return -EIO;
 
-       start_index = start >> PAGE_CACHE_SHIFT;
-       end_index = end >> PAGE_CACHE_SHIFT;
+       if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
+               return 0;
+
+       start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
+       end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
        pmd_index = DAX_PMD_INDEX(start_index);
 
        rcu_read_lock();
index 92d5140..2398f9f 100644 (file)
@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
        return dentry->d_name.name != dentry->d_iname;
 }
 
-/*
- * Make sure other CPUs see the inode attached before the type is set.
- */
 static inline void __d_set_inode_and_type(struct dentry *dentry,
                                          struct inode *inode,
                                          unsigned type_flags)
@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
        unsigned flags;
 
        dentry->d_inode = inode;
-       smp_wmb();
        flags = READ_ONCE(dentry->d_flags);
        flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
        flags |= type_flags;
        WRITE_ONCE(dentry->d_flags, flags);
 }
 
-/*
- * Ideally, we want to make sure that other CPUs see the flags cleared before
- * the inode is detached, but this is really a violation of RCU principles
- * since the ordering suggests we should always set inode before flags.
- *
- * We should instead replace or discard the entire dentry - but that sucks
- * performancewise on mass deletion/rename.
- */
 static inline void __d_clear_type_and_inode(struct dentry *dentry)
 {
        unsigned flags = READ_ONCE(dentry->d_flags);
 
        flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
        WRITE_ONCE(dentry->d_flags, flags);
-       smp_wmb();
        dentry->d_inode = NULL;
 }
 
@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
        __releases(dentry->d_inode->i_lock)
 {
        struct inode *inode = dentry->d_inode;
+
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
-       dentry_rcuwalk_invalidate(dentry);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
        if (!inode->i_nlink)
@@ -1758,8 +1747,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        spin_lock(&dentry->d_lock);
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_set_inode_and_type(dentry, inode, add_flags);
-       dentry_rcuwalk_invalidate(dentry);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        fsnotify_d_instantiate(dentry, inode);
 }
index 2c88d68..c1400b1 100644 (file)
@@ -80,23 +80,6 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
        return ret;
 }
 
-static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       struct inode *inode = file_inode(vma->vm_file);
-       struct ext2_inode_info *ei = EXT2_I(inode);
-       int ret;
-
-       sb_start_pagefault(inode->i_sb);
-       file_update_time(vma->vm_file);
-       down_read(&ei->dax_sem);
-
-       ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL);
-
-       up_read(&ei->dax_sem);
-       sb_end_pagefault(inode->i_sb);
-       return ret;
-}
-
 static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
                struct vm_fault *vmf)
 {
@@ -124,7 +107,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
 static const struct vm_operations_struct ext2_dax_vm_ops = {
        .fault          = ext2_dax_fault,
        .pmd_fault      = ext2_dax_pmd_fault,
-       .page_mkwrite   = ext2_dax_mkwrite,
+       .page_mkwrite   = ext2_dax_fault,
        .pfn_mkwrite    = ext2_dax_pfn_mkwrite,
 };
 
index 338eefd..6bd58e6 100644 (file)
@@ -737,8 +737,10 @@ static int ext2_get_blocks(struct inode *inode,
                 * so that it's not found by another thread before it's
                 * initialised
                 */
-               err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key),
-                                               1 << inode->i_blkbits);
+               err = dax_clear_sectors(inode->i_sb->s_bdev,
+                               le32_to_cpu(chain[depth-1].key) <<
+                               (inode->i_blkbits - 9),
+                               1 << inode->i_blkbits);
                if (err) {
                        mutex_unlock(&ei->truncate_mutex);
                        goto cleanup;
@@ -874,6 +876,14 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 static int
 ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
+#ifdef CONFIG_FS_DAX
+       if (dax_mapping(mapping)) {
+               return dax_writeback_mapping_range(mapping,
+                                                  mapping->host->i_sb->s_bdev,
+                                                  wbc);
+       }
+#endif
+
        return mpage_writepages(mapping, wbc, ext2_get_block);
 }
 
@@ -1296,7 +1306,7 @@ void ext2_set_inode_flags(struct inode *inode)
                inode->i_flags |= S_NOATIME;
        if (flags & EXT2_DIRSYNC_FL)
                inode->i_flags |= S_DIRSYNC;
-       if (test_opt(inode->i_sb, DAX))
+       if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
                inode->i_flags |= S_DAX;
 }
 
index 474f1a4..4cd318f 100644 (file)
@@ -262,23 +262,8 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
        return result;
 }
 
-static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       int err;
-       struct inode *inode = file_inode(vma->vm_file);
-
-       sb_start_pagefault(inode->i_sb);
-       file_update_time(vma->vm_file);
-       down_read(&EXT4_I(inode)->i_mmap_sem);
-       err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
-       up_read(&EXT4_I(inode)->i_mmap_sem);
-       sb_end_pagefault(inode->i_sb);
-
-       return err;
-}
-
 /*
- * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
+ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
  * handler we check for races agaist truncate. Note that since we cycle through
  * i_mmap_sem, we are sure that also any hole punching that began before we
  * were called is finished by now and so if it included part of the file we
@@ -311,7 +296,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
 static const struct vm_operations_struct ext4_dax_vm_ops = {
        .fault          = ext4_dax_fault,
        .pmd_fault      = ext4_dax_pmd_fault,
-       .page_mkwrite   = ext4_dax_mkwrite,
+       .page_mkwrite   = ext4_dax_fault,
        .pfn_mkwrite    = ext4_dax_pfn_mkwrite,
 };
 #else
index 9cc57c3..aee960b 100644 (file)
@@ -2478,6 +2478,10 @@ static int ext4_writepages(struct address_space *mapping,
 
        trace_ext4_writepages(inode, wbc);
 
+       if (dax_mapping(mapping))
+               return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
+                                                  wbc);
+
        /*
         * No pages to write? This is mainly a kludge to avoid starting
         * a transaction for special inodes like journal inode on last iput()
@@ -4155,7 +4159,7 @@ void ext4_set_inode_flags(struct inode *inode)
                new_fl |= S_NOATIME;
        if (flags & EXT4_DIRSYNC_FL)
                new_fl |= S_DIRSYNC;
-       if (test_opt(inode->i_sb, DAX))
+       if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
                new_fl |= S_DAX;
        inode_set_flags(inode, new_fl,
                        S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
index a99b010..eae5917 100644 (file)
@@ -583,6 +583,11 @@ group_extend_out:
                                 "Online defrag not supported with bigalloc");
                        err = -EOPNOTSUPP;
                        goto mext_out;
+               } else if (IS_DAX(inode)) {
+                       ext4_msg(sb, KERN_ERR,
+                                "Online defrag not supported with DAX");
+                       err = -EOPNOTSUPP;
+                       goto mext_out;
                }
 
                err = mnt_want_write_file(filp);
index 1f76d89..5c46ed9 100644 (file)
@@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
 #define WB_FRN_HIST_MAX_SLOTS  (WB_FRN_HIST_THR_SLOTS / 2 + 1)
                                        /* one round can affect upto 5 slots */
 
+static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
+static struct workqueue_struct *isw_wq;
+
 void __inode_attach_wb(struct inode *inode, struct page *page)
 {
        struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -317,7 +320,6 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
        struct inode_switch_wbs_context *isw =
                container_of(work, struct inode_switch_wbs_context, work);
        struct inode *inode = isw->inode;
-       struct super_block *sb = inode->i_sb;
        struct address_space *mapping = inode->i_mapping;
        struct bdi_writeback *old_wb = inode->i_wb;
        struct bdi_writeback *new_wb = isw->new_wb;
@@ -424,8 +426,9 @@ skip_switch:
        wb_put(new_wb);
 
        iput(inode);
-       deactivate_super(sb);
        kfree(isw);
+
+       atomic_dec(&isw_nr_in_flight);
 }
 
 static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
@@ -435,7 +438,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
 
        /* needs to grab bh-unsafe locks, bounce to work item */
        INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
-       schedule_work(&isw->work);
+       queue_work(isw_wq, &isw->work);
 }
 
 /**
@@ -471,20 +474,20 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
-
-       if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
-           inode_to_wb(inode) == isw->new_wb)
-               goto out_unlock;
-
-       if (!atomic_inc_not_zero(&inode->i_sb->s_active))
-               goto out_unlock;
-
+       if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+           inode->i_state & (I_WB_SWITCH | I_FREEING) ||
+           inode_to_wb(inode) == isw->new_wb) {
+               spin_unlock(&inode->i_lock);
+               goto out_free;
+       }
        inode->i_state |= I_WB_SWITCH;
        spin_unlock(&inode->i_lock);
 
        ihold(inode);
        isw->inode = inode;
 
+       atomic_inc(&isw_nr_in_flight);
+
        /*
         * In addition to synchronizing among switchers, I_WB_SWITCH tells
         * the RCU protected stat update paths to grab the mapping's
@@ -494,8 +497,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
        call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
        return;
 
-out_unlock:
-       spin_unlock(&inode->i_lock);
 out_free:
        if (isw->new_wb)
                wb_put(isw->new_wb);
@@ -847,6 +848,33 @@ restart:
                wb_put(last_wb);
 }
 
+/**
+ * cgroup_writeback_umount - flush inode wb switches for umount
+ *
+ * This function is called when a super_block is about to be destroyed and
+ * flushes in-flight inode wb switches.  An inode wb switch goes through
+ * RCU and then workqueue, so the two need to be flushed in order to ensure
+ * that all previously scheduled switches are finished.  As wb switches are
+ * rare occurrences and synchronize_rcu() can take a while, perform
+ * flushing iff wb switches are in flight.
+ */
+void cgroup_writeback_umount(void)
+{
+       if (atomic_read(&isw_nr_in_flight)) {
+               synchronize_rcu();
+               flush_workqueue(isw_wq);
+       }
+}
+
+static int __init cgroup_writeback_init(void)
+{
+       isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
+       if (!isw_wq)
+               return -ENOMEM;
+       return 0;
+}
+fs_initcall(cgroup_writeback_init);
+
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static struct bdi_writeback *
index 506765a..bb8d67e 100644 (file)
@@ -376,12 +376,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
        struct inode *inode = d_inode(dentry);
        dnode_secno dno;
        int r;
-       int rep = 0;
        int err;
 
        hpfs_lock(dir->i_sb);
        hpfs_adjust_length(name, &len);
-again:
+
        err = -ENOENT;
        de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
        if (!de)
@@ -401,33 +400,9 @@ again:
                hpfs_error(dir->i_sb, "there was error when removing dirent");
                err = -EFSERROR;
                break;
-       case 2:         /* no space for deleting, try to truncate file */
-
+       case 2:         /* no space for deleting */
                err = -ENOSPC;
-               if (rep++)
-                       break;
-
-               dentry_unhash(dentry);
-               if (!d_unhashed(dentry)) {
-                       hpfs_unlock(dir->i_sb);
-                       return -ENOSPC;
-               }
-               if (generic_permission(inode, MAY_WRITE) ||
-                   !S_ISREG(inode->i_mode) ||
-                   get_write_access(inode)) {
-                       d_rehash(dentry);
-               } else {
-                       struct iattr newattrs;
-                       /*pr_info("truncating file before delete.\n");*/
-                       newattrs.ia_size = 0;
-                       newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
-                       err = notify_change(dentry, &newattrs, NULL);
-                       put_write_access(inode);
-                       if (!err)
-                               goto again;
-               }
-               hpfs_unlock(dir->i_sb);
-               return -ENOSPC;
+               break;
        default:
                drop_nlink(inode);
                err = 0;
index 3ea3655..8918ac9 100644 (file)
@@ -2,10 +2,6 @@
        JFFS2 LOCKING DOCUMENTATION
        ---------------------------
 
-At least theoretically, JFFS2 does not require the Big Kernel Lock
-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
-code. It has its own locking, as described below.
-
 This document attempts to describe the existing locking rules for
 JFFS2. It is not expected to remain perfectly up to date, but ought to
 be fairly close.
@@ -69,6 +65,7 @@ Ordering constraints:
           any f->sem held.
        2. Never attempt to lock two file mutexes in one thread.
           No ordering rules have been made for doing so.
+       3. Never lock a page cache page with f->sem held.
 
 
        erase_completion_lock spinlock
index 0ae91ad..b288c8a 100644 (file)
@@ -50,7 +50,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
 
 
 static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
-                                   struct jffs2_inode_cache *ic)
+                                   struct jffs2_inode_cache *ic,
+                                   int *dir_hardlinks)
 {
        struct jffs2_full_dirent *fd;
 
@@ -69,19 +70,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
                        dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
                                  fd->name, fd->ino, ic->ino);
                        jffs2_mark_node_obsolete(c, fd->raw);
+                       /* Clear the ic/raw union so it doesn't cause problems later. */
+                       fd->ic = NULL;
                        continue;
                }
 
+               /* From this point, fd->raw is no longer used so we can set fd->ic */
+               fd->ic = child_ic;
+               child_ic->pino_nlink++;
+               /* If we appear (at this stage) to have hard-linked directories,
+                * set a flag to trigger a scan later */
                if (fd->type == DT_DIR) {
-                       if (child_ic->pino_nlink) {
-                               JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
-                                           fd->name, fd->ino, ic->ino);
-                               /* TODO: What do we do about it? */
-                       } else {
-                               child_ic->pino_nlink = ic->ino;
-                       }
-               } else
-                       child_ic->pino_nlink++;
+                       child_ic->flags |= INO_FLAGS_IS_DIR;
+                       if (child_ic->pino_nlink > 1)
+                               *dir_hardlinks = 1;
+               }
 
                dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
                /* Can't free scan_dents so far. We might need them in pass 2 */
@@ -95,8 +98,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
 */
 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
 {
-       int ret;
-       int i;
+       int ret, i, dir_hardlinks = 0;
        struct jffs2_inode_cache *ic;
        struct jffs2_full_dirent *fd;
        struct jffs2_full_dirent *dead_fds = NULL;
@@ -120,7 +122,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
        /* Now scan the directory tree, increasing nlink according to every dirent found. */
        for_each_inode(i, c, ic) {
                if (ic->scan_dents) {
-                       jffs2_build_inode_pass1(c, ic);
+                       jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
                        cond_resched();
                }
        }
@@ -156,6 +158,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
        }
 
        dbg_fsbuild("pass 2a complete\n");
+
+       if (dir_hardlinks) {
+               /* If we detected directory hardlinks earlier, *hopefully*
+                * they are gone now because some of the links were from
+                * dead directories which still had some old dirents lying
+                * around and not yet garbage-collected, but which have
+                * been discarded above. So clear the pino_nlink field
+                * in each directory, so that the final scan below can
+                * print appropriate warnings. */
+               for_each_inode(i, c, ic) {
+                       if (ic->flags & INO_FLAGS_IS_DIR)
+                               ic->pino_nlink = 0;
+               }
+       }
        dbg_fsbuild("freeing temporary data structures\n");
 
        /* Finally, we can scan again and free the dirent structs */
@@ -163,6 +179,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
                while(ic->scan_dents) {
                        fd = ic->scan_dents;
                        ic->scan_dents = fd->next;
+                       /* We do use the pino_nlink field to count nlink of
+                        * directories during fs build, so set it to the
+                        * parent ino# now. Now that there's hopefully only
+                        * one. */
+                       if (fd->type == DT_DIR) {
+                               if (!fd->ic) {
+                                       /* We'll have complained about it and marked the coresponding
+                                          raw node obsolete already. Just skip it. */
+                                       continue;
+                               }
+
+                               /* We *have* to have set this in jffs2_build_inode_pass1() */
+                               BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
+
+                               /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
+                                * is set. Otherwise, we know this should never trigger anyway, so
+                                * we don't do the check. And ic->pino_nlink still contains the nlink
+                                * value (which is 1). */
+                               if (dir_hardlinks && fd->ic->pino_nlink) {
+                                       JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
+                                                   fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
+                                       /* Should we unlink it from its previous parent? */
+                               }
+
+                               /* For directories, ic->pino_nlink holds that parent inode # */
+                               fd->ic->pino_nlink = ic->ino;
+                       }
                        jffs2_free_full_dirent(fd);
                }
                ic->scan_dents = NULL;
@@ -241,11 +284,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
 
                        /* Reduce nlink of the child. If it's now zero, stick it on the
                           dead_fds list to be cleaned up later. Else just free the fd */
-
-                       if (fd->type == DT_DIR)
-                               child_ic->pino_nlink = 0;
-                       else
-                               child_ic->pino_nlink--;
+                       child_ic->pino_nlink--;
 
                        if (!child_ic->pino_nlink) {
                                dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
index c5ac594..cad86ba 100644 (file)
@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
        struct page *pg;
        struct inode *inode = mapping->host;
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
-       struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
-       struct jffs2_raw_inode ri;
-       uint32_t alloc_len = 0;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        uint32_t pageofs = index << PAGE_CACHE_SHIFT;
        int ret = 0;
 
-       jffs2_dbg(1, "%s()\n", __func__);
-
-       if (pageofs > inode->i_size) {
-               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
-                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
-               if (ret)
-                       return ret;
-       }
-
-       mutex_lock(&f->sem);
        pg = grab_cache_page_write_begin(mapping, index, flags);
-       if (!pg) {
-               if (alloc_len)
-                       jffs2_complete_reservation(c);
-               mutex_unlock(&f->sem);
+       if (!pg)
                return -ENOMEM;
-       }
        *pagep = pg;
 
-       if (alloc_len) {
+       jffs2_dbg(1, "%s()\n", __func__);
+
+       if (pageofs > inode->i_size) {
                /* Make new hole frag from old EOF to new page */
+               struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+               struct jffs2_raw_inode ri;
                struct jffs2_full_dnode *fn;
+               uint32_t alloc_len;
 
                jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
                          (unsigned int)inode->i_size, pageofs);
 
+               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+               if (ret)
+                       goto out_page;
+
+               mutex_lock(&f->sem);
                memset(&ri, 0, sizeof(ri));
 
                ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                if (IS_ERR(fn)) {
                        ret = PTR_ERR(fn);
                        jffs2_complete_reservation(c);
+                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                        jffs2_mark_node_obsolete(c, fn->raw);
                        jffs2_free_full_dnode(fn);
                        jffs2_complete_reservation(c);
+                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                jffs2_complete_reservation(c);
                inode->i_size = pageofs;
+               mutex_unlock(&f->sem);
        }
 
        /*
@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
         * case of a short-copy.
         */
        if (!PageUptodate(pg)) {
+               mutex_lock(&f->sem);
                ret = jffs2_do_readpage_nolock(inode, pg);
+               mutex_unlock(&f->sem);
                if (ret)
                        goto out_page;
        }
-       mutex_unlock(&f->sem);
        jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
        return ret;
 
 out_page:
        unlock_page(pg);
        page_cache_release(pg);
-       mutex_unlock(&f->sem);
        return ret;
 }
 
index 5a2dec2..95d5880 100644 (file)
@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
                BUG_ON(start > orig_start);
        }
 
-       /* First, use readpage() to read the appropriate page into the page cache */
-       /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
-        *    triggered garbage collection in the first place?
-        * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
-        *    page OK. We'll actually write it out again in commit_write, which is a little
-        *    suboptimal, but at least we're correct.
-        */
+       /* The rules state that we must obtain the page lock *before* f->sem, so
+        * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
+        * actually going to *change* so we're safe; we only allow reading.
+        *
+        * It is important to note that jffs2_write_begin() will ensure that its
+        * page is marked Uptodate before allocating space. That means that if we
+        * end up here trying to GC the *same* page that jffs2_write_begin() is
+        * trying to write out, read_cache_page() will not deadlock. */
+       mutex_unlock(&f->sem);
        pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+       mutex_lock(&f->sem);
 
        if (IS_ERR(pg_ptr)) {
                pr_warn("read_cache_page() returned error: %ld\n",
index fa35ff7..0637271 100644 (file)
@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
 #define INO_STATE_CLEARING     6       /* In clear_inode() */
 
 #define INO_FLAGS_XATTR_CHECKED        0x01    /* has no duplicate xattr_ref */
+#define INO_FLAGS_IS_DIR       0x02    /* is a directory */
 
 #define RAWNODE_CLASS_INODE_CACHE      0
 #define RAWNODE_CLASS_XATTR_DATUM      1
@@ -249,7 +250,10 @@ struct jffs2_readinode_info
 
 struct jffs2_full_dirent
 {
-       struct jffs2_raw_node_ref *raw;
+       union {
+               struct jffs2_raw_node_ref *raw;
+               struct jffs2_inode_cache *ic; /* Just during part of build */
+       };
        struct jffs2_full_dirent *next;
        uint32_t version;
        uint32_t ino; /* == zero for unlink */
index f624d13..9c590e0 100644 (file)
@@ -1712,6 +1712,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
                return 0;
        if (!follow)
                return 0;
+       /* make sure that d_is_symlink above matches inode */
+       if (nd->flags & LOOKUP_RCU) {
+               if (read_seqcount_retry(&link->dentry->d_seq, seq))
+                       return -ECHILD;
+       }
        return pick_link(nd, link, inode, seq);
 }
 
@@ -1743,11 +1748,11 @@ static int walk_component(struct nameidata *nd, int flags)
                if (err < 0)
                        return err;
 
-               inode = d_backing_inode(path.dentry);
                seq = 0;        /* we are already out of RCU mode */
                err = -ENOENT;
                if (d_is_negative(path.dentry))
                        goto out_path_put;
+               inode = d_backing_inode(path.dentry);
        }
 
        if (flags & WALK_PUT)
@@ -3192,12 +3197,12 @@ retry_lookup:
                return error;
 
        BUG_ON(nd->flags & LOOKUP_RCU);
-       inode = d_backing_inode(path.dentry);
        seq = 0;        /* out of RCU mode, so the value doesn't matter */
        if (unlikely(d_is_negative(path.dentry))) {
                path_to_nameidata(&path, nd);
                return -ENOENT;
        }
+       inode = d_backing_inode(path.dentry);
 finish_lookup:
        if (nd->depth)
                put_link(nd);
@@ -3206,11 +3211,6 @@ finish_lookup:
        if (unlikely(error))
                return error;
 
-       if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
-               path_to_nameidata(&path, nd);
-               return -ELOOP;
-       }
-
        if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
                path_to_nameidata(&path, nd);
        } else {
@@ -3229,6 +3229,10 @@ finish_open:
                return error;
        }
        audit_inode(nd->name, nd->path.dentry, 0);
+       if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
+               error = -ELOOP;
+               goto out;
+       }
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
                goto out;
@@ -3273,6 +3277,10 @@ opened:
                        goto exit_fput;
        }
 out:
+       if (unlikely(error > 0)) {
+               WARN_ON(1);
+               error = -EINVAL;
+       }
        if (got_write)
                mnt_drop_write(nd->path.mnt);
        path_put(&save_parent);
index c59a59c..35ab51c 100644 (file)
@@ -476,6 +476,7 @@ static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg,
 
                for (i = 0; i < nr_pages; i++)
                        put_page(arg->layoutupdate_pages[i]);
+               vfree(arg->start_p);
                kfree(arg->layoutupdate_pages);
        } else {
                put_page(arg->layoutupdate_page);
@@ -559,10 +560,15 @@ retry:
 
        if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) {
                void *p = start_p, *end = p + arg->layoutupdate_len;
+               struct page *page = NULL;
                int i = 0;
 
-               for ( ; p < end; p += PAGE_SIZE)
-                       arg->layoutupdate_pages[i++] = vmalloc_to_page(p);
+               arg->start_p = start_p;
+               for ( ; p < end; p += PAGE_SIZE) {
+                       page = vmalloc_to_page(p);
+                       arg->layoutupdate_pages[i++] = page;
+                       get_page(page);
+               }
        }
 
        dprintk("%s found %zu ranges\n", __func__, count);
index bd25dc7..dff8346 100644 (file)
 
 #define NFSDBG_FACILITY NFSDBG_PROC
 
-static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
-                               fmode_t fmode)
-{
-       struct nfs_open_context *open;
-       struct nfs_lock_context *lock;
-       int ret;
-
-       open = get_nfs_open_context(nfs_file_open_context(file));
-       lock = nfs_get_lock_context(open);
-       if (IS_ERR(lock)) {
-               put_nfs_open_context(open);
-               return PTR_ERR(lock);
-       }
-
-       ret = nfs4_set_rw_stateid(dst, open, lock, fmode);
-
-       nfs_put_lock_context(lock);
-       put_nfs_open_context(open);
-       return ret;
-}
-
 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
-                                loff_t offset, loff_t len)
+               struct nfs_lock_context *lock, loff_t offset, loff_t len)
 {
        struct inode *inode = file_inode(filep);
        struct nfs_server *server = NFS_SERVER(inode);
@@ -56,7 +35,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
        msg->rpc_argp = &args;
        msg->rpc_resp = &res;
 
-       status = nfs42_set_rw_stateid(&args.falloc_stateid, filep, FMODE_WRITE);
+       status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
+                       lock, FMODE_WRITE);
        if (status)
                return status;
 
@@ -78,15 +58,26 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
 {
        struct nfs_server *server = NFS_SERVER(file_inode(filep));
        struct nfs4_exception exception = { };
+       struct nfs_lock_context *lock;
        int err;
 
+       lock = nfs_get_lock_context(nfs_file_open_context(filep));
+       if (IS_ERR(lock))
+               return PTR_ERR(lock);
+
+       exception.inode = file_inode(filep);
+       exception.state = lock->open_context->state;
+
        do {
-               err = _nfs42_proc_fallocate(msg, filep, offset, len);
-               if (err == -ENOTSUPP)
-                       return -EOPNOTSUPP;
+               err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
+               if (err == -ENOTSUPP) {
+                       err = -EOPNOTSUPP;
+                       break;
+               }
                err = nfs4_handle_exception(server, err, &exception);
        } while (exception.retry);
 
+       nfs_put_lock_context(lock);
        return err;
 }
 
@@ -135,7 +126,8 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
        return err;
 }
 
-static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+static loff_t _nfs42_proc_llseek(struct file *filep,
+               struct nfs_lock_context *lock, loff_t offset, int whence)
 {
        struct inode *inode = file_inode(filep);
        struct nfs42_seek_args args = {
@@ -156,7 +148,8 @@ static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
        if (!nfs_server_capable(inode, NFS_CAP_SEEK))
                return -ENOTSUPP;
 
-       status = nfs42_set_rw_stateid(&args.sa_stateid, filep, FMODE_READ);
+       status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
+                       lock, FMODE_READ);
        if (status)
                return status;
 
@@ -175,17 +168,28 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
 {
        struct nfs_server *server = NFS_SERVER(file_inode(filep));
        struct nfs4_exception exception = { };
+       struct nfs_lock_context *lock;
        loff_t err;
 
+       lock = nfs_get_lock_context(nfs_file_open_context(filep));
+       if (IS_ERR(lock))
+               return PTR_ERR(lock);
+
+       exception.inode = file_inode(filep);
+       exception.state = lock->open_context->state;
+
        do {
-               err = _nfs42_proc_llseek(filep, offset, whence);
+               err = _nfs42_proc_llseek(filep, lock, offset, whence);
                if (err >= 0)
                        break;
-               if (err == -ENOTSUPP)
-                       return -EOPNOTSUPP;
+               if (err == -ENOTSUPP) {
+                       err = -EOPNOTSUPP;
+                       break;
+               }
                err = nfs4_handle_exception(server, err, &exception);
        } while (exception.retry);
 
+       nfs_put_lock_context(lock);
        return err;
 }
 
@@ -298,8 +302,9 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
 }
 
 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
-                            struct file *dst_f, loff_t src_offset,
-                            loff_t dst_offset, loff_t count)
+               struct file *dst_f, struct nfs_lock_context *src_lock,
+               struct nfs_lock_context *dst_lock, loff_t src_offset,
+               loff_t dst_offset, loff_t count)
 {
        struct inode *src_inode = file_inode(src_f);
        struct inode *dst_inode = file_inode(dst_f);
@@ -320,11 +325,13 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
        msg->rpc_argp = &args;
        msg->rpc_resp = &res;
 
-       status = nfs42_set_rw_stateid(&args.src_stateid, src_f, FMODE_READ);
+       status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
+                       src_lock, FMODE_READ);
        if (status)
                return status;
 
-       status = nfs42_set_rw_stateid(&args.dst_stateid, dst_f, FMODE_WRITE);
+       status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
+                       dst_lock, FMODE_WRITE);
        if (status)
                return status;
 
@@ -349,22 +356,48 @@ int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
        };
        struct inode *inode = file_inode(src_f);
        struct nfs_server *server = NFS_SERVER(file_inode(src_f));
-       struct nfs4_exception exception = { };
-       int err;
+       struct nfs_lock_context *src_lock;
+       struct nfs_lock_context *dst_lock;
+       struct nfs4_exception src_exception = { };
+       struct nfs4_exception dst_exception = { };
+       int err, err2;
 
        if (!nfs_server_capable(inode, NFS_CAP_CLONE))
                return -EOPNOTSUPP;
 
+       src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
+       if (IS_ERR(src_lock))
+               return PTR_ERR(src_lock);
+
+       src_exception.inode = file_inode(src_f);
+       src_exception.state = src_lock->open_context->state;
+
+       dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
+       if (IS_ERR(dst_lock)) {
+               err = PTR_ERR(dst_lock);
+               goto out_put_src_lock;
+       }
+
+       dst_exception.inode = file_inode(dst_f);
+       dst_exception.state = dst_lock->open_context->state;
+
        do {
-               err = _nfs42_proc_clone(&msg, src_f, dst_f, src_offset,
-                                       dst_offset, count);
+               err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
+                                       src_offset, dst_offset, count);
                if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
                        NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
-                       return -EOPNOTSUPP;
+                       err = -EOPNOTSUPP;
+                       break;
                }
-               err = nfs4_handle_exception(server, err, &exception);
-       } while (exception.retry);
 
-       return err;
+               err2 = nfs4_handle_exception(server, err, &src_exception);
+               err = nfs4_handle_exception(server, err, &dst_exception);
+               if (!err)
+                       err = err2;
+       } while (src_exception.retry || dst_exception.retry);
 
+       nfs_put_lock_context(dst_lock);
+out_put_src_lock:
+       nfs_put_lock_context(src_lock);
+       return err;
 }
index 4bfc33a..1488159 100644 (file)
@@ -2466,9 +2466,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
                dentry = d_add_unique(dentry, igrab(state->inode));
                if (dentry == NULL) {
                        dentry = opendata->dentry;
-               } else if (dentry != ctx->dentry) {
+               } else {
                        dput(ctx->dentry);
-                       ctx->dentry = dget(dentry);
+                       ctx->dentry = dentry;
                }
                nfs_set_verifier(dentry,
                                nfs_save_change_attribute(d_inode(opendata->dir)));
index 482b6e9..2fa483e 100644 (file)
@@ -252,6 +252,27 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
        }
 }
 
+/*
+ * Mark a pnfs_layout_hdr and all associated layout segments as invalid
+ *
+ * In order to continue using the pnfs_layout_hdr, a full recovery
+ * is required.
+ * Note that caller must hold inode->i_lock.
+ */
+static int
+pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
+               struct list_head *lseg_list)
+{
+       struct pnfs_layout_range range = {
+               .iomode = IOMODE_ANY,
+               .offset = 0,
+               .length = NFS4_MAX_UINT64,
+       };
+
+       set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+       return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range);
+}
+
 static int
 pnfs_iomode_to_fail_bit(u32 iomode)
 {
@@ -554,9 +575,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
        spin_lock(&nfsi->vfs_inode.i_lock);
        lo = nfsi->layout;
        if (lo) {
-               lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
-               pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
                pnfs_get_layout_hdr(lo);
+               pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
                pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
                pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
                spin_unlock(&nfsi->vfs_inode.i_lock);
@@ -617,11 +637,6 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
 {
        struct pnfs_layout_hdr *lo;
        struct inode *inode;
-       struct pnfs_layout_range range = {
-               .iomode = IOMODE_ANY,
-               .offset = 0,
-               .length = NFS4_MAX_UINT64,
-       };
        LIST_HEAD(lseg_list);
        int ret = 0;
 
@@ -636,11 +651,11 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
 
                spin_lock(&inode->i_lock);
                list_del_init(&lo->plh_bulk_destroy);
-               lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
-               if (is_bulk_recall)
-                       set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
-               if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
+               if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
+                       if (is_bulk_recall)
+                               set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
                        ret = -EAGAIN;
+               }
                spin_unlock(&inode->i_lock);
                pnfs_free_lseg_list(&lseg_list);
                /* Free all lsegs that are attached to commit buckets */
@@ -1738,8 +1753,19 @@ pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
        if (lo->plh_return_iomode != 0)
                iomode = IOMODE_ANY;
        lo->plh_return_iomode = iomode;
+       set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
 }
 
+/**
+ * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
+ * @lo: pointer to layout header
+ * @tmp_list: list header to be used with pnfs_free_lseg_list()
+ * @return_range: describe layout segment ranges to be returned
+ *
+ * This function is mainly intended for use by layoutrecall. It attempts
+ * to free the layout segment immediately, or else to mark it for return
+ * as soon as its reference count drops to zero.
+ */
 int
 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
                                struct list_head *tmp_list,
@@ -1762,12 +1788,11 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
                                lseg, lseg->pls_range.iomode,
                                lseg->pls_range.offset,
                                lseg->pls_range.length);
+                       if (mark_lseg_invalid(lseg, tmp_list))
+                               continue;
+                       remaining++;
                        set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
                        pnfs_set_plh_return_iomode(lo, return_range->iomode);
-                       if (!mark_lseg_invalid(lseg, tmp_list))
-                               remaining++;
-                       set_bit(NFS_LAYOUT_RETURN_REQUESTED,
-                                       &lo->plh_flags);
                }
        return remaining;
 }
index 794fd15..cda0361 100644 (file)
@@ -956,6 +956,7 @@ clean_orphan:
                tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
                                update_isize, end);
                if (tmp_ret < 0) {
+                       ocfs2_inode_unlock(inode, 1);
                        ret = tmp_ret;
                        mlog_errno(ret);
                        brelse(di_bh);
index ed95272..52f6de5 100644 (file)
@@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
         * sole user of this dentry.  Too tricky...  Just unhash for
         * now.
         */
-       d_drop(dentry);
+       if (!err)
+               d_drop(dentry);
        inode_unlock(dir);
 
        return err;
@@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        if (!overwrite && new_is_dir && !old_opaque && new_opaque)
                ovl_remove_opaque(newdentry);
 
+       /*
+        * Old dentry now lives in different location. Dentries in
+        * lowerstack are stale. We cannot drop them here because
+        * access to them is lockless. This could be only pure upper
+        * or opaque directory - numlower is zero. Or upper non-dir
+        * entry - its pureness is tracked by flag opaque.
+        */
        if (old_opaque != new_opaque) {
                ovl_dentry_set_opaque(old, new_opaque);
                if (!overwrite)
index 49e2045..a4ff5d0 100644 (file)
@@ -65,6 +65,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 
                inode_lock(upperdentry->d_inode);
                err = notify_change(upperdentry, attr, NULL);
+               if (!err)
+                       ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
        }
        ovl_drop_write(dentry);
index 8d826bd..619ad4b 100644 (file)
@@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
        if (oe->__upperdentry) {
                type = __OVL_PATH_UPPER;
 
-               if (oe->numlower) {
-                       if (S_ISDIR(dentry->d_inode->i_mode))
-                               type |= __OVL_PATH_MERGE;
-               } else if (!oe->opaque) {
+               /*
+                * Non-dir dentry can hold lower dentry from previous
+                * location. Its purity depends only on opaque flag.
+                */
+               if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
+                       type |= __OVL_PATH_MERGE;
+               else if (!oe->opaque)
                        type |= __OVL_PATH_PURE;
-               }
        } else {
                if (oe->numlower > 1)
                        type |= __OVL_PATH_MERGE;
@@ -341,6 +343,7 @@ static const struct dentry_operations ovl_dentry_operations = {
 
 static const struct dentry_operations ovl_reval_dentry_operations = {
        .d_release = ovl_dentry_release,
+       .d_select_inode = ovl_d_select_inode,
        .d_revalidate = ovl_dentry_revalidate,
        .d_weak_revalidate = ovl_dentry_weak_revalidate,
 };
index 6367e1e..c524fdd 100644 (file)
@@ -202,6 +202,11 @@ static struct mount *last_dest, *last_source, *dest_master;
 static struct mountpoint *mp;
 static struct hlist_head *list;
 
+static inline bool peers(struct mount *m1, struct mount *m2)
+{
+       return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
+}
+
 static int propagate_one(struct mount *m)
 {
        struct mount *child;
@@ -212,7 +217,7 @@ static int propagate_one(struct mount *m)
        /* skip if mountpoint isn't covered by it */
        if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
                return 0;
-       if (m->mnt_group_id == last_dest->mnt_group_id) {
+       if (peers(m, last_dest)) {
                type = CL_MAKE_SHARED;
        } else {
                struct mount *n, *p;
@@ -223,7 +228,7 @@ static int propagate_one(struct mount *m)
                                        last_source = last_source->mnt_master;
                                        last_dest = last_source->mnt_parent;
                                }
-                               if (n->mnt_group_id != last_dest->mnt_group_id) {
+                               if (!peers(n, last_dest)) {
                                        last_source = last_source->mnt_master;
                                        last_dest = last_source->mnt_parent;
                                }
index 324ec27..dadf24e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/splice.h>
 #include <linux/compat.h>
 #include <linux/mount.h>
+#include <linux/fs.h>
 #include "internal.h"
 
 #include <asm/uaccess.h>
@@ -183,7 +184,7 @@ loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
        switch (whence) {
        case SEEK_SET: case SEEK_CUR:
                return generic_file_llseek_size(file, offset, whence,
-                                               ~0ULL, 0);
+                                               OFFSET_MAX, 0);
        default:
                return -EINVAL;
        }
@@ -1532,10 +1533,12 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 
        if (!(file_in->f_mode & FMODE_READ) ||
            !(file_out->f_mode & FMODE_WRITE) ||
-           (file_out->f_flags & O_APPEND) ||
-           !file_in->f_op->clone_file_range)
+           (file_out->f_flags & O_APPEND))
                return -EBADF;
 
+       if (!file_in->f_op->clone_file_range)
+               return -EOPNOTSUPP;
+
        ret = clone_verify_area(file_in, pos_in, len, false);
        if (ret)
                return ret;
index 1182af8..74914b1 100644 (file)
@@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb)
                sb->s_flags &= ~MS_ACTIVE;
 
                fsnotify_unmount_inodes(sb);
+               cgroup_writeback_umount();
 
                evict_inodes(sb);
 
index 5031170..66cdb44 100644 (file)
@@ -286,6 +286,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
        if (unlikely(ACCESS_ONCE(ctx->released)))
                goto out;
 
+       /*
+        * We don't do userfault handling for the final child pid update.
+        */
+       if (current->flags & PF_EXITING)
+               goto out;
+
        /*
         * Check that we can return VM_FAULT_RETRY.
         *
index 07d0e47..4861322 100644 (file)
@@ -940,7 +940,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
        bool trusted = capable(CAP_SYS_ADMIN);
        struct simple_xattr *xattr;
        ssize_t remaining_size = size;
-       int err;
+       int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
        if (inode->i_acl) {
@@ -965,11 +965,11 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
 
                err = xattr_list_one(&buffer, &remaining_size, xattr->name);
                if (err)
-                       return err;
+                       break;
        }
        spin_unlock(&xattrs->lock);
 
-       return size - remaining_size;
+       return err ? err : size - remaining_size;
 }
 
 /*
index 379c089..a9ebabf 100644 (file)
@@ -55,7 +55,7 @@ xfs_count_page_state(
        } while ((bh = bh->b_this_page) != head);
 }
 
-STATIC struct block_device *
+struct block_device *
 xfs_find_bdev_for_inode(
        struct inode            *inode)
 {
@@ -1208,6 +1208,10 @@ xfs_vm_writepages(
        struct writeback_control *wbc)
 {
        xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
+       if (dax_mapping(mapping))
+               return dax_writeback_mapping_range(mapping,
+                               xfs_find_bdev_for_inode(mapping->host), wbc);
+
        return generic_writepages(mapping, wbc);
 }
 
index f6ffc9a..a4343c6 100644 (file)
@@ -62,5 +62,6 @@ int   xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
                                 struct buffer_head *map_bh, int create);
 
 extern void xfs_count_page_state(struct page *, int *, int *);
+extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
 
 #endif /* __XFS_AOPS_H__ */
index 45ec9e4..6c87601 100644 (file)
@@ -75,7 +75,8 @@ xfs_zero_extent(
        ssize_t         size = XFS_FSB_TO_B(mp, count_fsb);
 
        if (IS_DAX(VFS_I(ip)))
-               return dax_clear_blocks(VFS_I(ip), block, size);
+               return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
+                               sector, size);
 
        /*
         * let the block layer decide on the fastest method of
index d2992bf..c1a2f34 100644 (file)
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
 };
 
 enum ata_ioctls {
-       ATA_IOC_GET_IO32        = 0x309,
-       ATA_IOC_SET_IO32        = 0x324,
+       ATA_IOC_GET_IO32        = 0x309, /* HDIO_GET_32BIT */
+       ATA_IOC_SET_IO32        = 0x324, /* HDIO_SET_32BIT */
 };
 
 /* core structures */
index 5349e68..cb68888 100644 (file)
@@ -310,6 +310,43 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
        bio->bi_flags &= ~(1U << bit);
 }
 
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+       *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+       struct bvec_iter iter = bio->bi_iter;
+       int idx;
+
+       if (!bio_flagged(bio, BIO_CLONED)) {
+               *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
+               return;
+       }
+
+       if (unlikely(!bio_multiple_segments(bio))) {
+               *bv = bio_iovec(bio);
+               return;
+       }
+
+       bio_advance_iter(bio, &iter, iter.bi_size);
+
+       if (!iter.bi_bvec_done)
+               idx = iter.bi_idx - 1;
+       else    /* in the middle of bvec */
+               idx = iter.bi_idx;
+
+       *bv = bio->bi_io_vec[idx];
+
+       /*
+        * iter.bi_bvec_done records actual length of the last bvec
+        * if this bio ends in the middle of one io vector
+        */
+       if (iter.bi_bvec_done)
+               bv->bv_len = iter.bi_bvec_done;
+}
+
 enum bip_flags {
        BIP_BLOCK_INTEGRITY     = 1 << 0, /* block layer owns integrity data */
        BIP_MAPPED_INTEGRITY    = 1 << 1, /* ref tag has been remapped */
index 4571ef1..413c84f 100644 (file)
@@ -895,7 +895,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
 {
        struct request_queue *q = rq->q;
 
-       if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+       if (unlikely(rq->cmd_type != REQ_TYPE_FS))
                return q->limits.max_hw_sectors;
 
        if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
@@ -1372,6 +1372,13 @@ static inline void put_dev_sector(Sector p)
        page_cache_release(p.v);
 }
 
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+                               struct bio_vec *bprv, unsigned int offset)
+{
+       return offset ||
+               ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
 /*
  * Check if adding a bio_vec after bprv with offset would create a gap in
  * the SG list. Most drivers don't care about this, but some do.
@@ -1381,18 +1388,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
 {
        if (!queue_virt_boundary(q))
                return false;
-       return offset ||
-               ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+       return __bvec_gap_to_prev(q, bprv, offset);
 }
 
 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
                         struct bio *next)
 {
-       if (!bio_has_data(prev))
-               return false;
+       if (bio_has_data(prev) && queue_virt_boundary(q)) {
+               struct bio_vec pb, nb;
+
+               bio_get_last_bvec(prev, &pb);
+               bio_get_first_bvec(next, &nb);
 
-       return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
-                               next->bi_io_vec[0].bv_offset);
+               return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+       }
+
+       return false;
 }
 
 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
index c1ef6f1..15151f3 100644 (file)
@@ -75,6 +75,7 @@
 #define CEPH_FEATURE_CRUSH_TUNABLES5   (1ULL<<58) /* chooseleaf stable mode */
 // duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
 #define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING   (1ULL<<58) /* New, v7 encoding */
+#define CEPH_FEATURE_FS_FILE_LAYOUT_V2       (1ULL<<58) /* file_layout_t */
 
 /*
  * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
index 818e450..636dd59 100644 (file)
@@ -7,7 +7,7 @@
 
 ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
                  get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
 int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 int dax_truncate_page(struct inode *, loff_t from, get_block_t);
 int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
@@ -52,6 +52,8 @@ static inline bool dax_mapping(struct address_space *mapping)
 {
        return mapping->host && IS_DAX(mapping->host);
 }
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
-               loff_t end);
+
+struct writeback_control;
+int dax_writeback_mapping_range(struct address_space *mapping,
+               struct block_device *bdev, struct writeback_control *wbc);
 #endif
index 7781ce1..c4b5f4b 100644 (file)
@@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
  */
 static inline unsigned __d_entry_type(const struct dentry *dentry)
 {
-       unsigned type = READ_ONCE(dentry->d_flags);
-       smp_rmb();
-       return type & DCACHE_ENTRY_TYPE;
+       return dentry->d_flags & DCACHE_ENTRY_TYPE;
 }
 
 static inline bool d_is_miss(const struct dentry *dentry)
index bec2abb..2c4ebef 100644 (file)
@@ -720,7 +720,7 @@ struct ata_device {
        union {
                u16             id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
                u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
-       };
+       } ____cacheline_aligned;
 
        /* DEVSLP Timing Variables from Identify Device Data Log */
        u8                      devslp_timing[ATA_LOG_DEVSLP_SIZE];
index bed40df..141ffdd 100644 (file)
@@ -26,9 +26,8 @@ enum {
 
        /* need to set a limit somewhere, but yes, this is likely overkill */
        ND_IOCTL_MAX_BUFLEN = SZ_4M,
-       ND_CMD_MAX_ELEM = 4,
+       ND_CMD_MAX_ELEM = 5,
        ND_CMD_MAX_ENVELOPE = 16,
-       ND_CMD_ARS_STATUS_MAX = SZ_4K,
        ND_MAX_MAPPINGS = 32,
 
        /* region flag indicating to direct-map persistent memory by default */
index 48e0320..67300f8 100644 (file)
@@ -550,9 +550,7 @@ extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
 
 static inline loff_t nfs_size_to_loff_t(__u64 size)
 {
-       if (size > (__u64) OFFSET_MAX - 1)
-               return OFFSET_MAX - 1;
-       return (loff_t) size;
+       return min_t(u64, size, OFFSET_MAX);
 }
 
 static inline ino_t
index 791098a..d320906 100644 (file)
@@ -275,6 +275,7 @@ struct nfs4_layoutcommit_args {
        size_t layoutupdate_len;
        struct page *layoutupdate_page;
        struct page **layoutupdate_pages;
+       __be32 *start_p;
 };
 
 struct nfs4_layoutcommit_res {
index 27df4a6..2771625 100644 (file)
@@ -988,23 +988,6 @@ static inline int pci_is_managed(struct pci_dev *pdev)
        return pdev->is_managed;
 }
 
-static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
-{
-       pdev->irq = irq;
-       pdev->irq_managed = 1;
-}
-
-static inline void pci_reset_managed_irq(struct pci_dev *pdev)
-{
-       pdev->irq = 0;
-       pdev->irq_managed = 0;
-}
-
-static inline bool pci_has_managed_irq(struct pci_dev *pdev)
-{
-       return pdev->irq_managed && pdev->irq > 0;
-}
-
 void pci_disable_device(struct pci_dev *dev);
 
 extern unsigned int pcibios_max_latency;
index b35a61a..f5c5a3f 100644 (file)
@@ -397,6 +397,7 @@ struct pmu {
  * enum perf_event_active_state - the states of a event
  */
 enum perf_event_active_state {
+       PERF_EVENT_STATE_DEAD           = -4,
        PERF_EVENT_STATE_EXIT           = -3,
        PERF_EVENT_STATE_ERROR          = -2,
        PERF_EVENT_STATE_OFF            = -1,
@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
        }
 }
 
-extern struct static_key_deferred perf_sched_events;
+extern struct static_key_false perf_sched_events;
 
 static __always_inline bool
 perf_sw_migrate_enabled(void)
@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
 static inline void perf_event_task_sched_in(struct task_struct *prev,
                                            struct task_struct *task)
 {
-       if (static_key_false(&perf_sched_events.key))
+       if (static_branch_unlikely(&perf_sched_events))
                __perf_event_task_sched_in(prev, task);
 
        if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
 {
        perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
 
-       if (static_key_false(&perf_sched_events.key))
+       if (static_branch_unlikely(&perf_sched_events))
                __perf_event_task_sched_out(prev, next);
 }
 
index 998d8f1..b50c049 100644 (file)
@@ -49,6 +49,7 @@ struct bq27xxx_reg_cache {
 
 struct bq27xxx_device_info {
        struct device *dev;
+       int id;
        enum bq27xxx_chip chip;
        const char *name;
        struct bq27xxx_access_methods bus;
index a75840c..9c29122 100644 (file)
@@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
 #endif
 
 unsigned int get_random_int(void);
+unsigned long get_random_long(void);
 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
 
 u32 prandom_u32(void);
index 429fdfc..925730b 100644 (file)
@@ -568,6 +568,8 @@ enum {
        FILTER_DYN_STRING,
        FILTER_PTR_STRING,
        FILTER_TRACE_FN,
+       FILTER_COMM,
+       FILTER_CPU,
 };
 
 extern int trace_event_raw_init(struct trace_event_call *call);
index b333c94..d0b5ca5 100644 (file)
@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 void wbc_detach_inode(struct writeback_control *wbc);
 void wbc_account_io(struct writeback_control *wbc, struct page *page,
                    size_t bytes);
+void cgroup_writeback_umount(void);
 
 /**
  * inode_attach_wb - associate an inode with its wb
@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
 {
 }
 
+static inline void cgroup_writeback_umount(void)
+{
+}
+
 #endif /* CONFIG_CGROUP_WRITEBACK */
 
 /*
index e2b712c..c21c38c 100644 (file)
@@ -343,7 +343,7 @@ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
 void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
 
 void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
-void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
                                    void (*ack)(struct hdac_bus *,
                                                struct hdac_stream *));
 
index 1e3c8cb..625b38f 100644 (file)
@@ -66,27 +66,33 @@ struct media_device_info {
 /*
  * DVB entities
  */
-#define MEDIA_ENT_F_DTV_DEMOD          (MEDIA_ENT_F_BASE + 1)
-#define MEDIA_ENT_F_TS_DEMUX           (MEDIA_ENT_F_BASE + 2)
-#define MEDIA_ENT_F_DTV_CA             (MEDIA_ENT_F_BASE + 3)
-#define MEDIA_ENT_F_DTV_NET_DECAP      (MEDIA_ENT_F_BASE + 4)
+#define MEDIA_ENT_F_DTV_DEMOD          (MEDIA_ENT_F_BASE + 0x00001)
+#define MEDIA_ENT_F_TS_DEMUX           (MEDIA_ENT_F_BASE + 0x00002)
+#define MEDIA_ENT_F_DTV_CA             (MEDIA_ENT_F_BASE + 0x00003)
+#define MEDIA_ENT_F_DTV_NET_DECAP      (MEDIA_ENT_F_BASE + 0x00004)
 
 /*
- * Connectors
+ * I/O entities
  */
-/* It is a responsibility of the entity drivers to add connectors and links */
-#define MEDIA_ENT_F_CONN_RF            (MEDIA_ENT_F_BASE + 21)
-#define MEDIA_ENT_F_CONN_SVIDEO                (MEDIA_ENT_F_BASE + 22)
-#define MEDIA_ENT_F_CONN_COMPOSITE     (MEDIA_ENT_F_BASE + 23)
-/* For internal test signal generators and other debug connectors */
-#define MEDIA_ENT_F_CONN_TEST          (MEDIA_ENT_F_BASE + 24)
+#define MEDIA_ENT_F_IO_DTV             (MEDIA_ENT_F_BASE + 0x01001)
+#define MEDIA_ENT_F_IO_VBI             (MEDIA_ENT_F_BASE + 0x01002)
+#define MEDIA_ENT_F_IO_SWRADIO         (MEDIA_ENT_F_BASE + 0x01003)
 
 /*
- * I/O entities
+ * Connectors
  */
-#define MEDIA_ENT_F_IO_DTV             (MEDIA_ENT_F_BASE + 31)
-#define MEDIA_ENT_F_IO_VBI             (MEDIA_ENT_F_BASE + 32)
-#define MEDIA_ENT_F_IO_SWRADIO         (MEDIA_ENT_F_BASE + 33)
+/* It is a responsibility of the entity drivers to add connectors and links */
+#ifdef __KERNEL__
+       /*
+        * For now, it should not be used in userspace, as some
+        * definitions may change
+        */
+
+#define MEDIA_ENT_F_CONN_RF            (MEDIA_ENT_F_BASE + 0x30001)
+#define MEDIA_ENT_F_CONN_SVIDEO                (MEDIA_ENT_F_BASE + 0x30002)
+#define MEDIA_ENT_F_CONN_COMPOSITE     (MEDIA_ENT_F_BASE + 0x30003)
+
+#endif
 
 /*
  * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and
@@ -291,14 +297,14 @@ struct media_v2_entity {
        __u32 id;
        char name[64];          /* FIXME: move to a property? (RFC says so) */
        __u32 function;         /* Main function of the entity */
-       __u16 reserved[12];
-};
+       __u32 reserved[6];
+} __attribute__ ((packed));
 
 /* Should match the specific fields at media_intf_devnode */
 struct media_v2_intf_devnode {
        __u32 major;
        __u32 minor;
-};
+} __attribute__ ((packed));
 
 struct media_v2_interface {
        __u32 id;
@@ -310,22 +316,22 @@ struct media_v2_interface {
                struct media_v2_intf_devnode devnode;
                __u32 raw[16];
        };
-};
+} __attribute__ ((packed));
 
 struct media_v2_pad {
        __u32 id;
        __u32 entity_id;
        __u32 flags;
-       __u16 reserved[9];
-};
+       __u32 reserved[5];
+} __attribute__ ((packed));
 
 struct media_v2_link {
        __u32 id;
        __u32 source_id;
        __u32 sink_id;
        __u32 flags;
-       __u32 reserved[5];
-};
+       __u32 reserved[6];
+} __attribute__ ((packed));
 
 struct media_v2_topology {
        __u64 topology_version;
@@ -345,7 +351,7 @@ struct media_v2_topology {
        __u32 num_links;
        __u32 reserved4;
        __u64 ptr_links;
-};
+} __attribute__ ((packed));
 
 static inline void __user *media_get_uptr(__u64 arg)
 {
index 5b4a4be..cc68b92 100644 (file)
@@ -66,14 +66,18 @@ struct nd_cmd_ars_cap {
        __u64 length;
        __u32 status;
        __u32 max_ars_out;
+       __u32 clear_err_unit;
+       __u32 reserved;
 } __packed;
 
 struct nd_cmd_ars_start {
        __u64 address;
        __u64 length;
        __u16 type;
-       __u8 reserved[6];
+       __u8 flags;
+       __u8 reserved[5];
        __u32 status;
+       __u32 scrub_time;
 } __packed;
 
 struct nd_cmd_ars_status {
@@ -81,11 +85,14 @@ struct nd_cmd_ars_status {
        __u32 out_length;
        __u64 address;
        __u64 length;
+       __u64 restart_address;
+       __u64 restart_length;
        __u16 type;
+       __u16 flags;
        __u32 num_records;
        struct nd_ars_record {
                __u32 handle;
-               __u32 flags;
+               __u32 reserved;
                __u64 err_address;
                __u64 length;
        } __packed records[0];
index 0d58522..6146148 100644 (file)
@@ -64,8 +64,17 @@ static void remote_function(void *data)
        struct task_struct *p = tfc->p;
 
        if (p) {
-               tfc->ret = -EAGAIN;
-               if (task_cpu(p) != smp_processor_id() || !task_curr(p))
+               /* -EAGAIN */
+               if (task_cpu(p) != smp_processor_id())
+                       return;
+
+               /*
+                * Now that we're on right CPU with IRQs disabled, we can test
+                * if we hit the right task without races.
+                */
+
+               tfc->ret = -ESRCH; /* No such (running) process */
+               if (p != current)
                        return;
        }
 
@@ -92,13 +101,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
                .p      = p,
                .func   = func,
                .info   = info,
-               .ret    = -ESRCH, /* No such (running) process */
+               .ret    = -EAGAIN,
        };
+       int ret;
 
-       if (task_curr(p))
-               smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+       do {
+               ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+               if (!ret)
+                       ret = data.ret;
+       } while (ret == -EAGAIN);
 
-       return data.ret;
+       return ret;
 }
 
 /**
@@ -169,19 +182,6 @@ static bool is_kernel_event(struct perf_event *event)
  *    rely on ctx->is_active and therefore cannot use event_function_call().
  *    See perf_install_in_context().
  *
- * This is because we need a ctx->lock serialized variable (ctx->is_active)
- * to reliably determine if a particular task/context is scheduled in. The
- * task_curr() use in task_function_call() is racy in that a remote context
- * switch is not a single atomic operation.
- *
- * As is, the situation is 'safe' because we set rq->curr before we do the
- * actual context switch. This means that task_curr() will fail early, but
- * we'll continue spinning on ctx->is_active until we've passed
- * perf_event_task_sched_out().
- *
- * Without this ctx->lock serialized variable we could have race where we find
- * the task (and hence the context) would not be active while in fact they are.
- *
  * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
  */
 
@@ -212,7 +212,7 @@ static int event_function(void *info)
         */
        if (ctx->task) {
                if (ctx->task != current) {
-                       ret = -EAGAIN;
+                       ret = -ESRCH;
                        goto unlock;
                }
 
@@ -276,10 +276,10 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
                return;
        }
 
-again:
        if (task == TASK_TOMBSTONE)
                return;
 
+again:
        if (!task_function_call(task, event_function, &efs))
                return;
 
@@ -289,13 +289,15 @@ again:
         * a concurrent perf_event_context_sched_out().
         */
        task = ctx->task;
-       if (task != TASK_TOMBSTONE) {
-               if (ctx->is_active) {
-                       raw_spin_unlock_irq(&ctx->lock);
-                       goto again;
-               }
-               func(event, NULL, ctx, data);
+       if (task == TASK_TOMBSTONE) {
+               raw_spin_unlock_irq(&ctx->lock);
+               return;
        }
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               goto again;
+       }
+       func(event, NULL, ctx, data);
        raw_spin_unlock_irq(&ctx->lock);
 }
 
@@ -314,6 +316,7 @@ again:
 enum event_type_t {
        EVENT_FLEXIBLE = 0x1,
        EVENT_PINNED = 0x2,
+       EVENT_TIME = 0x4,
        EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
 };
 
@@ -321,7 +324,13 @@ enum event_type_t {
  * perf_sched_events : >0 events exist
  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  */
-struct static_key_deferred perf_sched_events __read_mostly;
+
+static void perf_sched_delayed(struct work_struct *work);
+DEFINE_STATIC_KEY_FALSE(perf_sched_events);
+static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
+static DEFINE_MUTEX(perf_sched_mutex);
+static atomic_t perf_sched_count;
+
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
 
@@ -1288,16 +1297,18 @@ static u64 perf_event_time(struct perf_event *event)
 
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
- * The caller of this function needs to hold the ctx->lock.
  */
 static void update_event_times(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
        u64 run_end;
 
+       lockdep_assert_held(&ctx->lock);
+
        if (event->state < PERF_EVENT_STATE_INACTIVE ||
            event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
                return;
+
        /*
         * in cgroup mode, time_enabled represents
         * the time the event was enabled AND active
@@ -1645,7 +1656,7 @@ out:
 
 static bool is_orphaned_event(struct perf_event *event)
 {
-       return event->state == PERF_EVENT_STATE_EXIT;
+       return event->state == PERF_EVENT_STATE_DEAD;
 }
 
 static inline int pmu_filter_match(struct perf_event *event)
@@ -1690,14 +1701,14 @@ event_sched_out(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
+       event->tstamp_stopped = tstamp;
+       event->pmu->del(event, 0);
+       event->oncpu = -1;
        event->state = PERF_EVENT_STATE_INACTIVE;
        if (event->pending_disable) {
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
-       event->tstamp_stopped = tstamp;
-       event->pmu->del(event, 0);
-       event->oncpu = -1;
 
        if (!is_software_event(event))
                cpuctx->active_oncpu--;
@@ -1732,7 +1743,6 @@ group_sched_out(struct perf_event *group_event,
 }
 
 #define DETACH_GROUP   0x01UL
-#define DETACH_STATE   0x02UL
 
 /*
  * Cross CPU call to remove a performance event
@@ -1752,8 +1762,6 @@ __perf_remove_from_context(struct perf_event *event,
        if (flags & DETACH_GROUP)
                perf_group_detach(event);
        list_del_event(event, ctx);
-       if (flags & DETACH_STATE)
-               event->state = PERF_EVENT_STATE_EXIT;
 
        if (!ctx->nr_events && ctx->is_active) {
                ctx->is_active = 0;
@@ -2063,14 +2071,27 @@ static void add_event_to_ctx(struct perf_event *event,
        event->tstamp_stopped = tstamp;
 }
 
-static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
-                              struct perf_event_context *ctx);
+static void ctx_sched_out(struct perf_event_context *ctx,
+                         struct perf_cpu_context *cpuctx,
+                         enum event_type_t event_type);
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
             enum event_type_t event_type,
             struct task_struct *task);
 
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                              struct perf_event_context *ctx)
+{
+       if (!cpuctx->task_ctx)
+               return;
+
+       if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
+               return;
+
+       ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+}
+
 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
                                struct perf_event_context *ctx,
                                struct task_struct *task)
@@ -2097,49 +2118,68 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
 /*
  * Cross CPU call to install and enable a performance event
  *
- * Must be called with ctx->mutex held
+ * Very similar to remote_function() + event_function() but cannot assume that
+ * things like ctx->is_active and cpuctx->task_ctx are set.
  */
 static int  __perf_install_in_context(void *info)
 {
-       struct perf_event_context *ctx = info;
+       struct perf_event *event = info;
+       struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        struct perf_event_context *task_ctx = cpuctx->task_ctx;
+       bool activate = true;
+       int ret = 0;
 
        raw_spin_lock(&cpuctx->ctx.lock);
        if (ctx->task) {
                raw_spin_lock(&ctx->lock);
-               /*
-                * If we hit the 'wrong' task, we've since scheduled and
-                * everything should be sorted, nothing to do!
-                */
                task_ctx = ctx;
-               if (ctx->task != current)
+
+               /* If we're on the wrong CPU, try again */
+               if (task_cpu(ctx->task) != smp_processor_id()) {
+                       ret = -ESRCH;
                        goto unlock;
+               }
 
                /*
-                * If task_ctx is set, it had better be to us.
+                * If we're on the right CPU, see if the task we target is
+                * current, if not we don't have to activate the ctx, a future
+                * context switch will do that for us.
                 */
-               WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx);
+               if (ctx->task != current)
+                       activate = false;
+               else
+                       WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
+
        } else if (task_ctx) {
                raw_spin_lock(&task_ctx->lock);
        }
 
-       ctx_resched(cpuctx, task_ctx);
+       if (activate) {
+               ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+               add_event_to_ctx(event, ctx);
+               ctx_resched(cpuctx, task_ctx);
+       } else {
+               add_event_to_ctx(event, ctx);
+       }
+
 unlock:
        perf_ctx_unlock(cpuctx, task_ctx);
 
-       return 0;
+       return ret;
 }
 
 /*
- * Attach a performance event to a context
+ * Attach a performance event to a context.
+ *
+ * Very similar to event_function_call, see comment there.
  */
 static void
 perf_install_in_context(struct perf_event_context *ctx,
                        struct perf_event *event,
                        int cpu)
 {
-       struct task_struct *task = NULL;
+       struct task_struct *task = READ_ONCE(ctx->task);
 
        lockdep_assert_held(&ctx->mutex);
 
@@ -2147,40 +2187,46 @@ perf_install_in_context(struct perf_event_context *ctx,
        if (event->cpu != -1)
                event->cpu = cpu;
 
+       if (!task) {
+               cpu_function_call(cpu, __perf_install_in_context, event);
+               return;
+       }
+
+       /*
+        * Should not happen, we validate the ctx is still alive before calling.
+        */
+       if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
+               return;
+
        /*
         * Installing events is tricky because we cannot rely on ctx->is_active
         * to be set in case this is the nr_events 0 -> 1 transition.
-        *
-        * So what we do is we add the event to the list here, which will allow
-        * a future context switch to DTRT and then send a racy IPI. If the IPI
-        * fails to hit the right task, this means a context switch must have
-        * happened and that will have taken care of business.
         */
-       raw_spin_lock_irq(&ctx->lock);
-       task = ctx->task;
+again:
        /*
-        * Worse, we cannot even rely on the ctx actually existing anymore. If
-        * between find_get_context() and perf_install_in_context() the task
-        * went through perf_event_exit_task() its dead and we should not be
-        * adding new events.
+        * Cannot use task_function_call() because we need to run on the task's
+        * CPU regardless of whether its current or not.
         */
-       if (task == TASK_TOMBSTONE) {
+       if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+               return;
+
+       raw_spin_lock_irq(&ctx->lock);
+       task = ctx->task;
+       if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
+               /*
+                * Cannot happen because we already checked above (which also
+                * cannot happen), and we hold ctx->mutex, which serializes us
+                * against perf_event_exit_task_context().
+                */
                raw_spin_unlock_irq(&ctx->lock);
                return;
        }
-       update_context_time(ctx);
+       raw_spin_unlock_irq(&ctx->lock);
        /*
-        * Update cgrp time only if current cgrp matches event->cgrp.
-        * Must be done before calling add_event_to_ctx().
+        * Since !ctx->is_active doesn't mean anything, we must IPI
+        * unconditionally.
         */
-       update_cgrp_time_from_event(event);
-       add_event_to_ctx(event, ctx);
-       raw_spin_unlock_irq(&ctx->lock);
-
-       if (task)
-               task_function_call(task, __perf_install_in_context, ctx);
-       else
-               cpu_function_call(cpu, __perf_install_in_context, ctx);
+       goto again;
 }
 
 /*
@@ -2219,17 +2265,18 @@ static void __perf_event_enable(struct perf_event *event,
            event->state <= PERF_EVENT_STATE_ERROR)
                return;
 
-       update_context_time(ctx);
+       if (ctx->is_active)
+               ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+
        __perf_event_mark_enabled(event);
 
        if (!ctx->is_active)
                return;
 
        if (!event_filter_match(event)) {
-               if (is_cgroup_event(event)) {
-                       perf_cgroup_set_timestamp(current, ctx); // XXX ?
+               if (is_cgroup_event(event))
                        perf_cgroup_defer_enabled(event);
-               }
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
                return;
        }
 
@@ -2237,8 +2284,10 @@ static void __perf_event_enable(struct perf_event *event,
         * If the event is in a group and isn't the group leader,
         * then don't put it on unless the group is on.
         */
-       if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
+       if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
                return;
+       }
 
        task_ctx = cpuctx->task_ctx;
        if (ctx->task)
@@ -2344,24 +2393,33 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        }
 
        ctx->is_active &= ~event_type;
+       if (!(ctx->is_active & EVENT_ALL))
+               ctx->is_active = 0;
+
        if (ctx->task) {
                WARN_ON_ONCE(cpuctx->task_ctx != ctx);
                if (!ctx->is_active)
                        cpuctx->task_ctx = NULL;
        }
 
-       update_context_time(ctx);
-       update_cgrp_time_from_cpuctx(cpuctx);
-       if (!ctx->nr_active)
+       is_active ^= ctx->is_active; /* changed bits */
+
+       if (is_active & EVENT_TIME) {
+               /* update (and stop) ctx time */
+               update_context_time(ctx);
+               update_cgrp_time_from_cpuctx(cpuctx);
+       }
+
+       if (!ctx->nr_active || !(is_active & EVENT_ALL))
                return;
 
        perf_pmu_disable(ctx->pmu);
-       if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
+       if (is_active & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
        }
 
-       if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
+       if (is_active & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
        }
@@ -2641,18 +2699,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
                perf_cgroup_sched_out(task, next);
 }
 
-static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
-                              struct perf_event_context *ctx)
-{
-       if (!cpuctx->task_ctx)
-               return;
-
-       if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
-               return;
-
-       ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-}
-
 /*
  * Called with IRQs disabled
  */
@@ -2735,7 +2781,7 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (likely(!ctx->nr_events))
                return;
 
-       ctx->is_active |= event_type;
+       ctx->is_active |= (event_type | EVENT_TIME);
        if (ctx->task) {
                if (!is_active)
                        cpuctx->task_ctx = ctx;
@@ -2743,18 +2789,24 @@ ctx_sched_in(struct perf_event_context *ctx,
                        WARN_ON_ONCE(cpuctx->task_ctx != ctx);
        }
 
-       now = perf_clock();
-       ctx->timestamp = now;
-       perf_cgroup_set_timestamp(task, ctx);
+       is_active ^= ctx->is_active; /* changed bits */
+
+       if (is_active & EVENT_TIME) {
+               /* start ctx time */
+               now = perf_clock();
+               ctx->timestamp = now;
+               perf_cgroup_set_timestamp(task, ctx);
+       }
+
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
         */
-       if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
+       if (is_active & EVENT_PINNED)
                ctx_pinned_sched_in(ctx, cpuctx);
 
        /* Then walk through the lower prio flexible groups */
-       if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
+       if (is_active & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
 }
 
@@ -3120,6 +3172,7 @@ static void perf_event_enable_on_exec(int ctxn)
 
        cpuctx = __get_cpu_context(ctx);
        perf_ctx_lock(cpuctx, ctx);
+       ctx_sched_out(ctx, cpuctx, EVENT_TIME);
        list_for_each_entry(event, &ctx->event_list, event_entry)
                enabled |= event_enable_on_exec(event, ctx);
 
@@ -3537,12 +3590,22 @@ static void unaccount_event(struct perf_event *event)
        if (has_branch_stack(event))
                dec = true;
 
-       if (dec)
-               static_key_slow_dec_deferred(&perf_sched_events);
+       if (dec) {
+               if (!atomic_add_unless(&perf_sched_count, -1, 1))
+                       schedule_delayed_work(&perf_sched_work, HZ);
+       }
 
        unaccount_event_cpu(event, event->cpu);
 }
 
+static void perf_sched_delayed(struct work_struct *work)
+{
+       mutex_lock(&perf_sched_mutex);
+       if (atomic_dec_and_test(&perf_sched_count))
+               static_branch_disable(&perf_sched_events);
+       mutex_unlock(&perf_sched_mutex);
+}
+
 /*
  * The following implement mutual exclusion of events on "exclusive" pmus
  * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
@@ -3752,30 +3815,42 @@ static void put_event(struct perf_event *event)
  */
 int perf_event_release_kernel(struct perf_event *event)
 {
-       struct perf_event_context *ctx;
+       struct perf_event_context *ctx = event->ctx;
        struct perf_event *child, *tmp;
 
+       /*
+        * If we got here through err_file: fput(event_file); we will not have
+        * attached to a context yet.
+        */
+       if (!ctx) {
+               WARN_ON_ONCE(event->attach_state &
+                               (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
+               goto no_ctx;
+       }
+
        if (!is_kernel_event(event))
                perf_remove_from_owner(event);
 
        ctx = perf_event_ctx_lock(event);
        WARN_ON_ONCE(ctx->parent_ctx);
-       perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
-       perf_event_ctx_unlock(event, ctx);
+       perf_remove_from_context(event, DETACH_GROUP);
 
+       raw_spin_lock_irq(&ctx->lock);
        /*
-        * At this point we must have event->state == PERF_EVENT_STATE_EXIT,
-        * either from the above perf_remove_from_context() or through
-        * perf_event_exit_event().
+        * Mark this even as STATE_DEAD, there is no external reference to it
+        * anymore.
         *
-        * Therefore, anybody acquiring event->child_mutex after the below
-        * loop _must_ also see this, most importantly inherit_event() which
-        * will avoid placing more children on the list.
+        * Anybody acquiring event->child_mutex after the below loop _must_
+        * also see this, most importantly inherit_event() which will avoid
+        * placing more children on the list.
         *
         * Thus this guarantees that we will in fact observe and kill _ALL_
         * child events.
         */
-       WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT);
+       event->state = PERF_EVENT_STATE_DEAD;
+       raw_spin_unlock_irq(&ctx->lock);
+
+       perf_event_ctx_unlock(event, ctx);
 
 again:
        mutex_lock(&event->child_mutex);
@@ -3830,8 +3905,8 @@ again:
        }
        mutex_unlock(&event->child_mutex);
 
-       /* Must be the last reference */
-       put_event(event);
+no_ctx:
+       put_event(event); /* Must be the 'last' reference */
        return 0;
 }
 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -3988,7 +4063,7 @@ static bool is_event_hup(struct perf_event *event)
 {
        bool no_children;
 
-       if (event->state != PERF_EVENT_STATE_EXIT)
+       if (event->state > PERF_EVENT_STATE_EXIT)
                return false;
 
        mutex_lock(&event->child_mutex);
@@ -7769,8 +7844,28 @@ static void account_event(struct perf_event *event)
        if (is_cgroup_event(event))
                inc = true;
 
-       if (inc)
-               static_key_slow_inc(&perf_sched_events.key);
+       if (inc) {
+               if (atomic_inc_not_zero(&perf_sched_count))
+                       goto enabled;
+
+               mutex_lock(&perf_sched_mutex);
+               if (!atomic_read(&perf_sched_count)) {
+                       static_branch_enable(&perf_sched_events);
+                       /*
+                        * Guarantee that all CPUs observe they key change and
+                        * call the perf scheduling hooks before proceeding to
+                        * install events that need them.
+                        */
+                       synchronize_sched();
+               }
+               /*
+                * Now that we have waited for the sync_sched(), allow further
+                * increments to by-pass the mutex.
+                */
+               atomic_inc(&perf_sched_count);
+               mutex_unlock(&perf_sched_mutex);
+       }
+enabled:
 
        account_event_cpu(event, event->cpu);
 }
@@ -8389,10 +8484,19 @@ SYSCALL_DEFINE5(perf_event_open,
        if (move_group) {
                gctx = group_leader->ctx;
                mutex_lock_double(&gctx->mutex, &ctx->mutex);
+               if (gctx->task == TASK_TOMBSTONE) {
+                       err = -ESRCH;
+                       goto err_locked;
+               }
        } else {
                mutex_lock(&ctx->mutex);
        }
 
+       if (ctx->task == TASK_TOMBSTONE) {
+               err = -ESRCH;
+               goto err_locked;
+       }
+
        if (!perf_event_validate_size(event)) {
                err = -E2BIG;
                goto err_locked;
@@ -8509,7 +8613,12 @@ err_context:
        perf_unpin_context(ctx);
        put_ctx(ctx);
 err_alloc:
-       free_event(event);
+       /*
+        * If event_file is set, the fput() above will have called ->release()
+        * and that will take care of freeing the event.
+        */
+       if (!event_file)
+               free_event(event);
 err_cpus:
        put_online_cpus();
 err_task:
@@ -8563,12 +8672,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
+       if (ctx->task == TASK_TOMBSTONE) {
+               err = -ESRCH;
+               goto err_unlock;
+       }
+
        if (!exclusive_event_installable(event, ctx)) {
-               mutex_unlock(&ctx->mutex);
-               perf_unpin_context(ctx);
-               put_ctx(ctx);
                err = -EBUSY;
-               goto err_free;
+               goto err_unlock;
        }
 
        perf_install_in_context(ctx, event, cpu);
@@ -8577,6 +8688,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 
        return event;
 
+err_unlock:
+       mutex_unlock(&ctx->mutex);
+       perf_unpin_context(ctx);
+       put_ctx(ctx);
 err_free:
        free_event(event);
 err:
@@ -8695,7 +8810,7 @@ perf_event_exit_event(struct perf_event *child_event,
        if (parent_event)
                perf_group_detach(child_event);
        list_del_event(child_event, child_ctx);
-       child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */
+       child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
        raw_spin_unlock_irq(&child_ctx->lock);
 
        /*
@@ -9313,9 +9428,6 @@ void __init perf_event_init(void)
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
 
-       /* do not patch jump label more than once per second */
-       jump_label_rate_limit(&perf_sched_events, HZ);
-
        /*
         * Build time assertion that we keep the data_head at the intended
         * location.  IOW, validation we got the __reserved[] size right.
index 7a1b5c3..b981a7b 100644 (file)
@@ -136,8 +136,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
        if (addr) {
                *ptr = addr;
                devres_add(dev, ptr);
-       } else
+       } else {
                devres_free(ptr);
+               return ERR_PTR(-ENXIO);
+       }
 
        return addr;
 }
index cd64c97..57b939c 100644 (file)
@@ -420,7 +420,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
         * entity.
         */
        if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
-               printk_deferred_once("sched: DL replenish lagged to much\n");
+               printk_deferred_once("sched: DL replenish lagged too much\n");
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                dl_se->runtime = pi_se->dl_runtime;
        }
index f333e57..05ddc08 100644 (file)
@@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name)
        struct ftrace_event_field *field;
        struct list_head *head;
 
-       field = __find_event_field(&ftrace_generic_fields, name);
+       head = trace_get_fields(call);
+       field = __find_event_field(head, name);
        if (field)
                return field;
 
-       field = __find_event_field(&ftrace_common_fields, name);
+       field = __find_event_field(&ftrace_generic_fields, name);
        if (field)
                return field;
 
-       head = trace_get_fields(call);
-       return __find_event_field(head, name);
+       return __find_event_field(&ftrace_common_fields, name);
 }
 
 static int __trace_define_field(struct list_head *head, const char *type,
@@ -171,8 +171,10 @@ static int trace_define_generic_fields(void)
 {
        int ret;
 
-       __generic_field(int, cpu, FILTER_OTHER);
-       __generic_field(char *, comm, FILTER_PTR_STRING);
+       __generic_field(int, CPU, FILTER_CPU);
+       __generic_field(int, cpu, FILTER_CPU);
+       __generic_field(char *, COMM, FILTER_COMM);
+       __generic_field(char *, comm, FILTER_COMM);
 
        return ret;
 }
@@ -869,7 +871,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                 * The ftrace subsystem is for showing formats only.
                 * They can not be enabled or disabled via the event files.
                 */
-               if (call->class && call->class->reg)
+               if (call->class && call->class->reg &&
+                   !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
                        return file;
        }
 
index f93a219..6816302 100644 (file)
@@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps,
                return -EINVAL;
        }
 
-       if (is_string_field(field)) {
+       if (field->filter_type == FILTER_COMM) {
+               filter_build_regex(pred);
+               fn = filter_pred_comm;
+               pred->regex.field_len = TASK_COMM_LEN;
+       } else if (is_string_field(field)) {
                filter_build_regex(pred);
 
-               if (!strcmp(field->name, "comm")) {
-                       fn = filter_pred_comm;
-                       pred->regex.field_len = TASK_COMM_LEN;
-               } else if (field->filter_type == FILTER_STATIC_STRING) {
+               if (field->filter_type == FILTER_STATIC_STRING) {
                        fn = filter_pred_string;
                        pred->regex.field_len = field->size;
                } else if (field->filter_type == FILTER_DYN_STRING)
@@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps,
                }
                pred->val = val;
 
-               if (!strcmp(field->name, "cpu"))
+               if (field->filter_type == FILTER_CPU)
                        fn = filter_pred_cpu;
                else
                        fn = select_comparison_fn(pred->op, field->size,
index 202df6c..2a1abba 100644 (file)
@@ -156,7 +156,11 @@ check_stack(unsigned long ip, unsigned long *stack)
                for (; p < top && i < stack_trace_max.nr_entries; p++) {
                        if (stack_dump_trace[i] == ULONG_MAX)
                                break;
-                       if (*p == stack_dump_trace[i]) {
+                       /*
+                        * The READ_ONCE_NOCHECK is used to let KASAN know that
+                        * this is not a stack-out-of-bounds error.
+                        */
+                       if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
                                stack_dump_trace[x] = stack_dump_trace[i++];
                                this_size = stack_trace_index[x++] =
                                        (top - p) * sizeof(unsigned long);
index 23edcce..3461d97 100644 (file)
@@ -446,7 +446,8 @@ int filemap_write_and_wait(struct address_space *mapping)
 {
        int err = 0;
 
-       if (mapping->nrpages) {
+       if ((!dax_mapping(mapping) && mapping->nrpages) ||
+           (dax_mapping(mapping) && mapping->nrexceptional)) {
                err = filemap_fdatawrite(mapping);
                /*
                 * Even if the above returned error, the pages may be
@@ -482,13 +483,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
 {
        int err = 0;
 
-       if (dax_mapping(mapping) && mapping->nrexceptional) {
-               err = dax_writeback_mapping_range(mapping, lstart, lend);
-               if (err)
-                       return err;
-       }
-
-       if (mapping->nrpages) {
+       if ((!dax_mapping(mapping) && mapping->nrpages) ||
+           (dax_mapping(mapping) && mapping->nrexceptional)) {
                err = __filemap_fdatawrite_range(mapping, lstart, lend,
                                                 WB_SYNC_ALL);
                /* See comment of filemap_write_and_wait() */
index 1c317b8..e10a4fe 100644 (file)
@@ -2836,6 +2836,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        pgtable_t pgtable;
        pmd_t _pmd;
        bool young, write, dirty;
+       unsigned long addr;
        int i;
 
        VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
@@ -2865,7 +2866,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
 
-       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+       for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
                pte_t entry, *pte;
                /*
                 * Note that NUMA hinting access restrictions are not
@@ -2886,9 +2887,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                }
                if (dirty)
                        SetPageDirty(page + i);
-               pte = pte_offset_map(&_pmd, haddr);
+               pte = pte_offset_map(&_pmd, addr);
                BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, haddr, pte, entry);
+               set_pte_at(mm, addr, pte, entry);
                atomic_inc(&page[i]._mapcount);
                pte_unmap(pte);
        }
@@ -2938,7 +2939,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        pmd_populate(mm, pmd, pgtable);
 
        if (freeze) {
-               for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               for (i = 0; i < HPAGE_PMD_NR; i++) {
                        page_remove_rmap(page + i, false);
                        put_page(page + i);
                }
index 635451a..8132787 100644 (file)
@@ -3404,8 +3404,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(pmd_none(*pmd)) &&
            unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
-       /* if an huge pmd materialized from under us just retry later */
-       if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
+       /*
+        * If a huge pmd materialized under us just retry later.  Use
+        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
+        * didn't become pmd_trans_huge under us and then back to pmd_none, as
+        * a result of MADV_DONTNEED running immediately after a huge pmd fault
+        * in a different thread of this mm, in turn leading to a misleading
+        * pmd_trans_huge() retval.  All we have to ensure is that it is a
+        * regular pmd that we can walk with pte_offset_map() and we can do that
+        * through an atomic read in C, which is what pmd_trans_unstable()
+        * provides.
+        */
+       if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
                return 0;
        /*
         * A regular pmd is established and it can't morph into a huge pmd
index b1034f9..3ad0fea 100644 (file)
@@ -1582,7 +1582,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                         (GFP_HIGHUSER_MOVABLE |
                                          __GFP_THISNODE | __GFP_NOMEMALLOC |
                                          __GFP_NORETRY | __GFP_NOWARN) &
-                                        ~(__GFP_IO | __GFP_FS), 0);
+                                        ~__GFP_RECLAIM, 0);
 
        return newpage;
 }
index 9cfedf5..9382619 100644 (file)
@@ -1197,6 +1197,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
        return new_piece;
 }
 
+static size_t sizeof_footer(struct ceph_connection *con)
+{
+       return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
+           sizeof(struct ceph_msg_footer) :
+           sizeof(struct ceph_msg_footer_old);
+}
+
 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
 {
        BUG_ON(!msg);
@@ -2335,9 +2342,9 @@ static int read_partial_message(struct ceph_connection *con)
                        ceph_pr_addr(&con->peer_addr.in_addr),
                        seq, con->in_seq + 1);
                con->in_base_pos = -front_len - middle_len - data_len -
-                       sizeof(m->footer);
+                       sizeof_footer(con);
                con->in_tag = CEPH_MSGR_TAG_READY;
-               return 0;
+               return 1;
        } else if ((s64)seq - (s64)con->in_seq > 1) {
                pr_err("read_partial_message bad seq %lld expected %lld\n",
                       seq, con->in_seq + 1);
@@ -2360,10 +2367,10 @@ static int read_partial_message(struct ceph_connection *con)
                        /* skip this message */
                        dout("alloc_msg said skip message\n");
                        con->in_base_pos = -front_len - middle_len - data_len -
-                               sizeof(m->footer);
+                               sizeof_footer(con);
                        con->in_tag = CEPH_MSGR_TAG_READY;
                        con->in_seq++;
-                       return 0;
+                       return 1;
                }
 
                BUG_ON(!con->in_msg);
index 3534e12..5bc0537 100644 (file)
@@ -2853,8 +2853,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
        mutex_lock(&osdc->request_mutex);
        req = __lookup_request(osdc, tid);
        if (!req) {
-               pr_warn("%s osd%d tid %llu unknown, skipping\n",
-                       __func__, osd->o_osd, tid);
+               dout("%s osd%d tid %llu unknown, skipping\n", __func__,
+                    osd->o_osd, tid);
                m = NULL;
                *skip = 1;
                goto out;
index 799e65b..cabf586 100644 (file)
@@ -740,7 +740,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
                default:
                        printk(KERN_CRIT "%s: bad return from "
                                "gss_fill_context: %zd\n", __func__, err);
-                       BUG();
+                       gss_msg->msg.errno = -EIO;
                }
                goto err_release_msg;
        }
index 2b32fd6..273bc3a 100644 (file)
@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
        if (bp[0] == '\\' && bp[1] == 'x') {
                /* HEX STRING */
                bp += 2;
-               while (len < bufsize) {
+               while (len < bufsize - 1) {
                        int h, l;
 
                        h = hex_to_bin(bp[0]);
index cc1251d..2dcd764 100644 (file)
@@ -341,6 +341,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
        rqst->rq_reply_bytes_recvd = 0;
        rqst->rq_bytes_sent = 0;
        rqst->rq_xid = headerp->rm_xid;
+
+       rqst->rq_private_buf.len = size;
        set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
 
        buf = &rqst->rq_rcv_buf;
index f8110cf..f1ab715 100644 (file)
@@ -3249,7 +3249,7 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
 
 static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
 {
-       struct inode_security_struct *isec = inode_security(inode);
+       struct inode_security_struct *isec = inode_security_novalidate(inode);
        *secid = isec->sid;
 }
 
index b9c0910..0608f21 100644 (file)
@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
         unsigned char reserved[128];
 };
 
+#ifdef CONFIG_X86_X32
+/* x32 has a different alignment for 64bit values from ia32 */
+struct snd_ctl_elem_value_x32 {
+       struct snd_ctl_elem_id id;
+       unsigned int indirect;  /* bit-field causes misalignment */
+       union {
+               s32 integer[128];
+               unsigned char data[512];
+               s64 integer64[64];
+       } value;
+       unsigned char reserved[128];
+};
+#endif /* CONFIG_X86_X32 */
 
 /* get the value type and count of the control */
 static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
 
 static int copy_ctl_value_from_user(struct snd_card *card,
                                    struct snd_ctl_elem_value *data,
-                                   struct snd_ctl_elem_value32 __user *data32,
+                                   void __user *userdata,
+                                   void __user *valuep,
                                    int *typep, int *countp)
 {
+       struct snd_ctl_elem_value32 __user *data32 = userdata;
        int i, type, size;
        int uninitialized_var(count);
        unsigned int indirect;
@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
            type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
                for (i = 0; i < count; i++) {
+                       s32 __user *intp = valuep;
                        int val;
-                       if (get_user(val, &data32->value.integer[i]))
+                       if (get_user(val, &intp[i]))
                                return -EFAULT;
                        data->value.integer.value[i] = val;
                }
@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
                        dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
                        return -EINVAL;
                }
-               if (copy_from_user(data->value.bytes.data,
-                                  data32->value.data, size))
+               if (copy_from_user(data->value.bytes.data, valuep, size))
                        return -EFAULT;
        }
 
@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
 }
 
 /* restore the value to 32bit */
-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+static int copy_ctl_value_to_user(void __user *userdata,
+                                 void __user *valuep,
                                  struct snd_ctl_elem_value *data,
                                  int type, int count)
 {
@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
            type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
                for (i = 0; i < count; i++) {
+                       s32 __user *intp = valuep;
                        int val;
                        val = data->value.integer.value[i];
-                       if (put_user(val, &data32->value.integer[i]))
+                       if (put_user(val, &intp[i]))
                                return -EFAULT;
                }
        } else {
                size = get_elem_size(type, count);
-               if (copy_to_user(data32->value.data,
-                                data->value.bytes.data, size))
+               if (copy_to_user(valuep, data->value.bytes.data, size))
                        return -EFAULT;
        }
        return 0;
 }
 
-static int snd_ctl_elem_read_user_compat(struct snd_card *card, 
-                                        struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_read_user(struct snd_card *card,
+                             void __user *userdata, void __user *valuep)
 {
        struct snd_ctl_elem_value *data;
        int err, type, count;
@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
        if (data == NULL)
                return -ENOMEM;
 
-       if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+       err = copy_ctl_value_from_user(card, data, userdata, valuep,
+                                      &type, &count);
+       if (err < 0)
                goto error;
 
        snd_power_lock(card);
@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
                err = snd_ctl_elem_read(card, data);
        snd_power_unlock(card);
        if (err >= 0)
-               err = copy_ctl_value_to_user(data32, data, type, count);
+               err = copy_ctl_value_to_user(userdata, valuep, data,
+                                            type, count);
  error:
        kfree(data);
        return err;
 }
 
-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
-                                         struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_write_user(struct snd_ctl_file *file,
+                              void __user *userdata, void __user *valuep)
 {
        struct snd_ctl_elem_value *data;
        struct snd_card *card = file->card;
@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
        if (data == NULL)
                return -ENOMEM;
 
-       if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+       err = copy_ctl_value_from_user(card, data, userdata, valuep,
+                                      &type, &count);
+       if (err < 0)
                goto error;
 
        snd_power_lock(card);
@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
                err = snd_ctl_elem_write(card, file, data);
        snd_power_unlock(card);
        if (err >= 0)
-               err = copy_ctl_value_to_user(data32, data, type, count);
+               err = copy_ctl_value_to_user(userdata, valuep, data,
+                                            type, count);
  error:
        kfree(data);
        return err;
 }
 
+static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+                                        struct snd_ctl_elem_value32 __user *data32)
+{
+       return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+                                         struct snd_ctl_elem_value32 __user *data32)
+{
+       return ctl_elem_write_user(file, data32, &data32->value);
+}
+
+#ifdef CONFIG_X86_X32
+static int snd_ctl_elem_read_user_x32(struct snd_card *card,
+                                     struct snd_ctl_elem_value_x32 __user *data32)
+{
+       return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
+                                      struct snd_ctl_elem_value_x32 __user *data32)
+{
+       return ctl_elem_write_user(file, data32, &data32->value);
+}
+#endif /* CONFIG_X86_X32 */
+
 /* add or replace a user control */
 static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
                                   struct snd_ctl_elem_info32 __user *data32,
@@ -393,6 +441,10 @@ enum {
        SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
        SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
        SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
+#ifdef CONFIG_X86_X32
+       SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
+       SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
                return snd_ctl_elem_add_compat(ctl, argp, 0);
        case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
                return snd_ctl_elem_add_compat(ctl, argp, 1);
+#ifdef CONFIG_X86_X32
+       case SNDRV_CTL_IOCTL_ELEM_READ_X32:
+               return snd_ctl_elem_read_user_x32(ctl->card, argp);
+       case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
+               return snd_ctl_elem_write_user_x32(ctl, argp);
+#endif /* CONFIG_X86_X32 */
        }
 
        down_read(&snd_ioctl_rwsem);
index 9630e9f..1f64ab0 100644 (file)
@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
        return err;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
+static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
+                                    struct snd_pcm_channel_info __user *src);
+#define snd_pcm_ioctl_channel_info_x32(s, p)   \
+       snd_pcm_channel_info_user(s, p)
+#endif /* CONFIG_X86_X32 */
+
 struct snd_pcm_status32 {
        s32 state;
        struct compat_timespec trigger_tstamp;
@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
        return err;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_status_x32 {
+       s32 state;
+       u32 rsvd; /* alignment */
+       struct timespec trigger_tstamp;
+       struct timespec tstamp;
+       u32 appl_ptr;
+       u32 hw_ptr;
+       s32 delay;
+       u32 avail;
+       u32 avail_max;
+       u32 overrange;
+       s32 suspended_state;
+       u32 audio_tstamp_data;
+       struct timespec audio_tstamp;
+       struct timespec driver_tstamp;
+       u32 audio_tstamp_accuracy;
+       unsigned char reserved[52-2*sizeof(struct timespec)];
+} __packed;
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
+                                  struct snd_pcm_status_x32 __user *src,
+                                  bool ext)
+{
+       struct snd_pcm_status status;
+       int err;
+
+       memset(&status, 0, sizeof(status));
+       /*
+        * with extension, parameters are read/write,
+        * get audio_tstamp_data from user,
+        * ignore rest of status structure
+        */
+       if (ext && get_user(status.audio_tstamp_data,
+                               (u32 __user *)(&src->audio_tstamp_data)))
+               return -EFAULT;
+       err = snd_pcm_status(substream, &status);
+       if (err < 0)
+               return err;
+
+       if (clear_user(src, sizeof(*src)))
+               return -EFAULT;
+       if (put_user(status.state, &src->state) ||
+           put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+           put_timespec(&status.tstamp, &src->tstamp) ||
+           put_user(status.appl_ptr, &src->appl_ptr) ||
+           put_user(status.hw_ptr, &src->hw_ptr) ||
+           put_user(status.delay, &src->delay) ||
+           put_user(status.avail, &src->avail) ||
+           put_user(status.avail_max, &src->avail_max) ||
+           put_user(status.overrange, &src->overrange) ||
+           put_user(status.suspended_state, &src->suspended_state) ||
+           put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
+           put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
+           put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
+           put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
+               return -EFAULT;
+
+       return err;
+}
+#endif /* CONFIG_X86_X32 */
+
 /* both for HW_PARAMS and HW_REFINE */
 static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
                                          int refine, 
@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
        return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_mmap_status_x32 {
+       s32 state;
+       s32 pad1;
+       u32 hw_ptr;
+       u32 pad2; /* alignment */
+       struct timespec tstamp;
+       s32 suspended_state;
+       struct timespec audio_tstamp;
+} __packed;
+
+struct snd_pcm_mmap_control_x32 {
+       u32 appl_ptr;
+       u32 avail_min;
+};
+
+struct snd_pcm_sync_ptr_x32 {
+       u32 flags;
+       u32 rsvd; /* alignment */
+       union {
+               struct snd_pcm_mmap_status_x32 status;
+               unsigned char reserved[64];
+       } s;
+       union {
+               struct snd_pcm_mmap_control_x32 control;
+               unsigned char reserved[64];
+       } c;
+} __packed;
+
+static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
+                                     struct snd_pcm_sync_ptr_x32 __user *src)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       volatile struct snd_pcm_mmap_status *status;
+       volatile struct snd_pcm_mmap_control *control;
+       u32 sflags;
+       struct snd_pcm_mmap_control scontrol;
+       struct snd_pcm_mmap_status sstatus;
+       snd_pcm_uframes_t boundary;
+       int err;
+
+       if (snd_BUG_ON(!runtime))
+               return -EINVAL;
+
+       if (get_user(sflags, &src->flags) ||
+           get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+           get_user(scontrol.avail_min, &src->c.control.avail_min))
+               return -EFAULT;
+       if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+               err = snd_pcm_hwsync(substream);
+               if (err < 0)
+                       return err;
+       }
+       status = runtime->status;
+       control = runtime->control;
+       boundary = recalculate_boundary(runtime);
+       if (!boundary)
+               boundary = 0x7fffffff;
+       snd_pcm_stream_lock_irq(substream);
+       /* FIXME: we should consider the boundary for the sync from app */
+       if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+               control->appl_ptr = scontrol.appl_ptr;
+       else
+               scontrol.appl_ptr = control->appl_ptr % boundary;
+       if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+               control->avail_min = scontrol.avail_min;
+       else
+               scontrol.avail_min = control->avail_min;
+       sstatus.state = status->state;
+       sstatus.hw_ptr = status->hw_ptr % boundary;
+       sstatus.tstamp = status->tstamp;
+       sstatus.suspended_state = status->suspended_state;
+       sstatus.audio_tstamp = status->audio_tstamp;
+       snd_pcm_stream_unlock_irq(substream);
+       if (put_user(sstatus.state, &src->s.status.state) ||
+           put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
+           put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
+           put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
+           put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
+           put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+           put_user(scontrol.avail_min, &src->c.control.avail_min))
+               return -EFAULT;
+
+       return 0;
+}
+#endif /* CONFIG_X86_X32 */
 
 /*
  */
@@ -487,7 +647,12 @@ enum {
        SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
        SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
        SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
-
+#ifdef CONFIG_X86_X32
+       SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
+       SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
+       SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
+       SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
                return snd_pcm_ioctl_rewind_compat(substream, argp);
        case SNDRV_PCM_IOCTL_FORWARD32:
                return snd_pcm_ioctl_forward_compat(substream, argp);
+#ifdef CONFIG_X86_X32
+       case SNDRV_PCM_IOCTL_STATUS_X32:
+               return snd_pcm_status_user_x32(substream, argp, false);
+       case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
+               return snd_pcm_status_user_x32(substream, argp, true);
+       case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
+               return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
+       case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
+               return snd_pcm_ioctl_channel_info_x32(substream, argp);
+#endif /* CONFIG_X86_X32 */
        }
 
        return -ENOIOCTLCMD;
index 5268c1f..f69764d 100644 (file)
@@ -85,8 +85,7 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
        if (err < 0)
                return err;
 
-       if (put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) ||
-           put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
+       if (compat_put_timespec(&status.tstamp, &src->tstamp) ||
            put_user(status.avail, &src->avail) ||
            put_user(status.xruns, &src->xruns))
                return -EFAULT;
@@ -94,9 +93,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
        return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_rawmidi_status_x32 {
+       s32 stream;
+       u32 rsvd; /* alignment */
+       struct timespec tstamp;
+       u32 avail;
+       u32 xruns;
+       unsigned char reserved[16];
+} __attribute__((packed));
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+                                       struct snd_rawmidi_status_x32 __user *src)
+{
+       int err;
+       struct snd_rawmidi_status status;
+
+       if (rfile->output == NULL)
+               return -EINVAL;
+       if (get_user(status.stream, &src->stream))
+               return -EFAULT;
+
+       switch (status.stream) {
+       case SNDRV_RAWMIDI_STREAM_OUTPUT:
+               err = snd_rawmidi_output_status(rfile->output, &status);
+               break;
+       case SNDRV_RAWMIDI_STREAM_INPUT:
+               err = snd_rawmidi_input_status(rfile->input, &status);
+               break;
+       default:
+               return -EINVAL;
+       }
+       if (err < 0)
+               return err;
+
+       if (put_timespec(&status.tstamp, &src->tstamp) ||
+           put_user(status.avail, &src->avail) ||
+           put_user(status.xruns, &src->xruns))
+               return -EFAULT;
+
+       return 0;
+}
+#endif /* CONFIG_X86_X32 */
+
 enum {
        SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
        SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
+#ifdef CONFIG_X86_X32
+       SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -115,6 +163,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
                return snd_rawmidi_ioctl_params_compat(rfile, argp);
        case SNDRV_RAWMIDI_IOCTL_STATUS32:
                return snd_rawmidi_ioctl_status_compat(rfile, argp);
+#ifdef CONFIG_X86_X32
+       case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
+               return snd_rawmidi_ioctl_status_x32(rfile, argp);
+#endif /* CONFIG_X86_X32 */
        }
        return -ENOIOCTLCMD;
 }
index 8db156b..8cdf489 100644 (file)
@@ -149,8 +149,6 @@ odev_release(struct inode *inode, struct file *file)
        if ((dp = file->private_data) == NULL)
                return 0;
 
-       snd_seq_oss_drain_write(dp);
-
        mutex_lock(&register_mutex);
        snd_seq_oss_release(dp);
        mutex_unlock(&register_mutex);
index b439243..d7b4d01 100644 (file)
@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
 unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
 
 void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
 
 /* */
 void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
index 6779e82..92c96a9 100644 (file)
@@ -435,22 +435,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
 }
 
 
-/*
- * Wait until the queue is empty (if we don't have nonblock)
- */
-void
-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
-{
-       if (! dp->timer->running)
-               return;
-       if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
-           dp->writeq) {
-               while (snd_seq_oss_writeq_sync(dp->writeq))
-                       ;
-       }
-}
-
-
 /*
  * reset sequencer devices
  */
index e05802a..2e90822 100644 (file)
@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
                                        struct snd_timer_status32 __user *_status)
 {
        struct snd_timer_user *tu;
-       struct snd_timer_status status;
+       struct snd_timer_status32 status;
        
        tu = file->private_data;
        if (snd_BUG_ON(!tu->timeri))
                return -ENXIO;
        memset(&status, 0, sizeof(status));
-       status.tstamp = tu->tstamp;
+       status.tstamp.tv_sec = tu->tstamp.tv_sec;
+       status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
        status.resolution = snd_timer_resolution(tu->timeri);
        status.lost = tu->timeri->lost;
        status.overrun = tu->overrun;
@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
        return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 */
+#define snd_timer_user_status_x32(file, s) \
+       snd_timer_user_status(file, s)
+#endif /* CONFIG_X86_X32 */
+
 /*
  */
 
 enum {
        SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
        SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
+#ifdef CONFIG_X86_X32
+       SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
                return snd_timer_user_info_compat(file, argp);
        case SNDRV_TIMER_IOCTL_STATUS32:
                return snd_timer_user_status_compat(file, argp);
+#ifdef CONFIG_X86_X32
+       case SNDRV_TIMER_IOCTL_STATUS_X32:
+               return snd_timer_user_status_x32(file, argp);
+#endif /* CONFIG_X86_X32 */
        }
        return -ENOIOCTLCMD;
 }
index b5a17cb..8c48623 100644 (file)
@@ -426,18 +426,22 @@ EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_chip);
  * @bus: HD-audio core bus
  * @status: INTSTS register value
  * @ask: callback to be called for woken streams
+ *
+ * Returns the bits of handled streams, or zero if no stream is handled.
  */
-void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
                                    void (*ack)(struct hdac_bus *,
                                                struct hdac_stream *))
 {
        struct hdac_stream *azx_dev;
        u8 sd_status;
+       int handled = 0;
 
        list_for_each_entry(azx_dev, &bus->stream_list, list) {
                if (status & azx_dev->sd_int_sta_mask) {
                        sd_status = snd_hdac_stream_readb(azx_dev, SD_STS);
                        snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK);
+                       handled |= 1 << azx_dev->index;
                        if (!azx_dev->substream || !azx_dev->running ||
                            !(sd_status & SD_INT_COMPLETE))
                                continue;
@@ -445,6 +449,7 @@ void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
                                ack(bus, azx_dev);
                }
        }
+       return handled;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq);
 
index 37cf9ce..27de801 100644 (file)
@@ -930,6 +930,8 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
        struct azx *chip = dev_id;
        struct hdac_bus *bus = azx_bus(chip);
        u32 status;
+       bool active, handled = false;
+       int repeat = 0; /* count for avoiding endless loop */
 
 #ifdef CONFIG_PM
        if (azx_has_pm_runtime(chip))
@@ -939,33 +941,36 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
 
        spin_lock(&bus->reg_lock);
 
-       if (chip->disabled) {
-               spin_unlock(&bus->reg_lock);
-               return IRQ_NONE;
-       }
-
-       status = azx_readl(chip, INTSTS);
-       if (status == 0 || status == 0xffffffff) {
-               spin_unlock(&bus->reg_lock);
-               return IRQ_NONE;
-       }
+       if (chip->disabled)
+               goto unlock;
 
-       snd_hdac_bus_handle_stream_irq(bus, status, stream_update);
+       do {
+               status = azx_readl(chip, INTSTS);
+               if (status == 0 || status == 0xffffffff)
+                       break;
 
-       /* clear rirb int */
-       status = azx_readb(chip, RIRBSTS);
-       if (status & RIRB_INT_MASK) {
-               if (status & RIRB_INT_RESPONSE) {
-                       if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
-                               udelay(80);
-                       snd_hdac_bus_update_rirb(bus);
+               handled = true;
+               active = false;
+               if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
+                       active = true;
+
+               /* clear rirb int */
+               status = azx_readb(chip, RIRBSTS);
+               if (status & RIRB_INT_MASK) {
+                       active = true;
+                       if (status & RIRB_INT_RESPONSE) {
+                               if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+                                       udelay(80);
+                               snd_hdac_bus_update_rirb(bus);
+                       }
+                       azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
                }
-               azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
-       }
+       } while (active && ++repeat < 10);
 
+ unlock:
        spin_unlock(&bus->reg_lock);
 
-       return IRQ_HANDLED;
+       return IRQ_RETVAL(handled);
 }
 EXPORT_SYMBOL_GPL(azx_interrupt);
 
index ce6b97f..e5240cb 100644 (file)
@@ -363,7 +363,10 @@ enum {
                                        ((pci)->device == 0x0d0c) || \
                                        ((pci)->device == 0x160c))
 
-#define IS_BROXTON(pci)        ((pci)->device == 0x5a98)
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -540,13 +543,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                snd_hdac_set_codec_wakeup(bus, true);
-       if (IS_BROXTON(pci)) {
+       if (IS_SKL_PLUS(pci)) {
                pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
                val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
                pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
        }
        azx_init_chip(chip, full_reset);
-       if (IS_BROXTON(pci)) {
+       if (IS_SKL_PLUS(pci)) {
                pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
                val = val | INTEL_HDA_CGCTL_MISCBDCGE;
                pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -555,7 +558,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
                snd_hdac_set_codec_wakeup(bus, false);
 
        /* reduce dma latency to avoid noise */
-       if (IS_BROXTON(pci))
+       if (IS_BXT(pci))
                bxt_reduce_dma_latency(chip);
 }
 
@@ -977,11 +980,6 @@ static int azx_resume(struct device *dev)
 /* put codec down to D3 at hibernation for Intel SKL+;
  * otherwise BIOS may still access the codec and screw up the driver
  */
-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
-
 static int azx_freeze_noirq(struct device *dev)
 {
        struct pci_dev *pci = to_pci_dev(dev);
index 8ee78db..bcbc4ee 100644 (file)
@@ -2477,13 +2477,6 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                        is_broxton(codec))
                codec->core.link_power_control = 1;
 
-       if (codec_has_acomp(codec)) {
-               codec->depop_delay = 0;
-               spec->i915_audio_ops.audio_ptr = codec;
-               spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
-               snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
-       }
-
        if (hdmi_parse_codec(codec) < 0) {
                if (spec->i915_bound)
                        snd_hdac_i915_exit(&codec->bus->core);
@@ -2505,6 +2498,18 @@ static int patch_generic_hdmi(struct hda_codec *codec)
 
        init_channel_allocations();
 
+       if (codec_has_acomp(codec)) {
+               codec->depop_delay = 0;
+               spec->i915_audio_ops.audio_ptr = codec;
+               /* intel_audio_codec_enable() or intel_audio_codec_disable()
+                * will call pin_eld_notify with using audio_ptr pointer
+                * We need make sure audio_ptr is really setup
+                */
+               wmb();
+               spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+               snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+       }
+
        return 0;
 }
 
index efd4980..93d2156 100644 (file)
@@ -3801,6 +3801,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
 
 static void alc_headset_mode_default(struct hda_codec *codec)
 {
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+               {}
+       };
        static struct coef_fw coef0255[] = {
                WRITE_COEF(0x45, 0xc089),
                WRITE_COEF(0x45, 0xc489),
@@ -3842,6 +3846,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
        };
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        case 0x10ec0255:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
@@ -4749,6 +4756,9 @@ enum {
        ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
        ALC293_FIXUP_LENOVO_SPK_NOISE,
        ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+       ALC255_FIXUP_DELL_SPK_NOISE,
+       ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+       ALC280_FIXUP_HP_HEADSET_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5368,6 +5378,29 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
        },
+       [ALC255_FIXUP_DELL_SPK_NOISE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
+       [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* Disable pass-through path for FRONT 14h */
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
+       [ALC280_FIXUP_HP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5379,6 +5412,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+       SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
@@ -5410,6 +5444,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5470,6 +5505,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5638,10 +5674,10 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
-       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
                {0x14, 0x901701a0}),
-       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
                {0x14, 0x901701b0}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
index 2875b4f..7c8941b 100644 (file)
@@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
 {
        struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
 
-       ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
+       ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
        return 0;
 }
 
@@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
 
        if (!snd_hdsp_use_is_exclusive(hdsp))
                return -EBUSY;
-       val = ucontrol->value.enumerated.item[0];
+       val = ucontrol->value.integer.value[0];
        spin_lock_irq(&hdsp->lock);
        if (val != hdsp_dds_offset(hdsp))
                change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
index 8bc8016..a4a999a 100644 (file)
@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
 {
        u64 n;
 
+       if (snd_BUG_ON(rate <= 0))
+               return;
+
        if (rate >= 112000)
                rate /= 4;
        else if (rate >= 56000)
@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
                } else {
                        /* slave mode, return external sample rate */
                        rate = hdspm_external_sample_rate(hdspm);
+                       if (!rate)
+                               rate = hdspm->system_sample_rate;
                }
        }
 
@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
                                            ucontrol)
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+       int rate = ucontrol->value.integer.value[0];
 
-       hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
+       if (rate < 27000 || rate > 207000)
+               return -EINVAL;
+       hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
        return 0;
 }
 
@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-       ucontrol->value.enumerated.item[0] = hdspm->tco->term;
+       ucontrol->value.integer.value[0] = hdspm->tco->term;
 
        return 0;
 }
@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-       if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
-               hdspm->tco->term = ucontrol->value.enumerated.item[0];
+       if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
+               hdspm->tco->term = ucontrol->value.integer.value[0];
 
                hdspm_tco_write(hdspm);
 
index 4f6ce1c..c458d60 100644 (file)
@@ -1124,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+       case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
        case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
index 90bd2ea..b3281dc 100644 (file)
@@ -217,13 +217,16 @@ static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
        return rc;
 }
 
+#define NFIT_TEST_ARS_RECORDS 4
+
 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
                unsigned int buf_len)
 {
        if (buf_len < sizeof(*nd_cmd))
                return -EINVAL;
 
-       nd_cmd->max_ars_out = 256;
+       nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
+               + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
        nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
 
        return 0;
@@ -246,7 +249,8 @@ static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
        if (buf_len < sizeof(*nd_cmd))
                return -EINVAL;
 
-       nd_cmd->out_length = 256;
+       nd_cmd->out_length = sizeof(struct nd_cmd_ars_status);
+       /* TODO: emit error records */
        nd_cmd->num_records = 0;
        nd_cmd->address = 0;
        nd_cmd->length = -1ULL;
index 773e276..1e1abe0 100644 (file)
@@ -39,28 +39,23 @@ instance_slam() {
 }
 
 instance_slam &
-x=`jobs -l`
-p1=`echo $x | cut -d' ' -f2`
+p1=$!
 echo $p1
 
 instance_slam &
-x=`jobs -l | tail -1`
-p2=`echo $x | cut -d' ' -f2`
+p2=$!
 echo $p2
 
 instance_slam &
-x=`jobs -l | tail -1`
-p3=`echo $x | cut -d' ' -f2`
+p3=$!
 echo $p3
 
 instance_slam &
-x=`jobs -l | tail -1`
-p4=`echo $x | cut -d' ' -f2`
+p4=$!
 echo $p4
 
 instance_slam &
-x=`jobs -l | tail -1`
-p5=`echo $x | cut -d' ' -f2`
+p5=$!
 echo $p5
 
 ls -lR >/dev/null
index 043032c..00429b3 100644 (file)
@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
-       int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+       int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+       int sz = nr_longs * sizeof(unsigned long);
        vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
        vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
        vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
index 3531599..db2dd33 100644 (file)
@@ -172,7 +172,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
         * do alloc nowait since if we are going to sleep anyway we
         * may as well sleep faulting in page
         */
-       work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+       work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
        if (!work)
                return 0;