Merge tag 'armsoc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 Oct 2016 19:07:29 +0000 (12:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 Oct 2016 19:07:29 +0000 (12:07 -0700)
Pull ARM SoC fixes from Olof Johansson:
 "We haven't seen a whole lot of fixes for the first two weeks since the
  merge window, but here is the batch that we have at the moment.

  Nothing sticks out as particularly bad or scary, it's mostly a handful
  of smaller fixes to several platforms. The Uniphier reset controller
  changes could probably have been delayed to 4.10, but they're not
  scary and just plumbing up driver changes that went in during the
  merge window.

  We're also adding another maintainer to Marvell Berlin platforms, to
  help out when Sebastian is too busy. Yay teamwork!"

* tag 'armsoc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc:
  ARM: imx: mach-imx6q: Fix the PHY ID mask for AR8031
  ARM: dts: vf610: fix IRQ flag of global timer
  ARM: imx: gpc: Fix the imx_gpc_genpd_init() error path
  ARM: imx: gpc: Initialize all power domains
  arm64: dts: Updated NAND DT properties for NS2 SVK
  arm64: dts: uniphier: change MIO node to SD control node
  ARM: dts: uniphier: change MIO node to SD control node
  reset: uniphier: rename MIO reset to SD reset for Pro5, PXs2, LD20 SoCs
  arm64: uniphier: select ARCH_HAS_RESET_CONTROLLER
  ARM: uniphier: select ARCH_HAS_RESET_CONTROLLER
  arm64: dts: Add timer erratum property for LS2080A and LS1043A
  arm64: dts: rockchip: remove the abuse of keep-power-in-suspend
  ARM: multi_v7_defconfig: Enable Intel e1000e driver
  MAINTAINERS: add myself as Marvell berlin SoC maintainer
  bus: qcom-ebi2: depend on ARCH_QCOM or COMPILE_TEST
  ARM: dts: fix the SD card on the Snowball
  arm64: dts: rockchip: remove always-on and boot-on from vcc_sd
  arm64: dts: marvell: fix clocksource for CP110 master SPI0
  ARM: mvebu: Select corediv clk for all mvebu v7 SoC

539 files changed:
CREDITS
Documentation/ABI/testing/sysfs-class-cxl
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/clock/uniphier-clock.txt
Documentation/devicetree/bindings/ipmi.txt [deleted file]
Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/ipmi/ipmi-smic.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
Documentation/devicetree/bindings/serial/cdns,uart.txt
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/timer/jcore,pit.txt [new file with mode: 0644]
Documentation/devicetree/bindings/usb/dwc2.txt
Documentation/filesystems/proc.txt
Documentation/gpio/board.txt
MAINTAINERS
Makefile
arch/alpha/kernel/ptrace.c
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/Makefile
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/cache.h
arch/arc/include/asm/elf.h
arch/arc/include/asm/mcip.h
arch/arc/include/asm/module.h
arch/arc/include/asm/setup.h
arch/arc/include/asm/syscalls.h
arch/arc/include/uapi/asm/unistd.h
arch/arc/kernel/mcip.c
arch/arc/kernel/module.c
arch/arc/kernel/process.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arc/mm/tlb.c
arch/arc/mm/tlbex.S
arch/arm/kvm/arm.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/exec.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/head.S
arch/arm64/kernel/process.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/traps.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/numa.c
arch/blackfin/kernel/ptrace.c
arch/cris/arch-v32/drivers/cryptocop.c
arch/cris/arch-v32/kernel/ptrace.c
arch/h8300/include/asm/thread_info.h
arch/h8300/kernel/signal.c
arch/ia64/kernel/err_inject.c
arch/ia64/kernel/ptrace.c
arch/m32r/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kvm/mips.c
arch/mips/mm/gup.c
arch/powerpc/boot/main.c
arch/powerpc/include/asm/cpuidle.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/tlb.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace32.c
arch/powerpc/kvm/book3s_hv_rm_xics.c
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/tlb-radix.c
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/unistd.h
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/stacktrace.c
arch/s390/kvm/intercept.c
arch/s390/mm/gup.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/init.c
arch/s390/oprofile/init.c
arch/score/kernel/ptrace.c
arch/sh/Makefile
arch/sh/boards/Kconfig
arch/sh/configs/j2_defconfig
arch/sh/mm/gup.c
arch/sparc/kernel/ptrace_64.c
arch/sparc/mm/gup.c
arch/x86/entry/Makefile
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/syscalls/syscall_64.tbl
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/io.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/e820.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/mcount_64.S
arch/x86/kernel/quirks.c
arch/x86/kernel/signal_compat.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/step.c
arch/x86/kernel/unwind_guess.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/x86.c
arch/x86/mm/gup.c
arch/x86/mm/kaslr.c
arch/x86/mm/mpx.c
arch/x86/mm/pat.c
arch/x86/platform/uv/bios_uv.c
arch/x86/um/ptrace_32.c
arch/x86/um/ptrace_64.c
arch/x86/xen/enlighten.c
block/badblocks.c
block/blk-flush.c
block/blk-mq.c
drivers/Makefile
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/nsload.c
drivers/acpi/apei/ghes.c
drivers/acpi/pci_link.c
drivers/android/binder.c
drivers/ata/ahci.c
drivers/base/Kconfig
drivers/block/DAC960.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/char/hw_random/core.c
drivers/char/ipmi/Kconfig
drivers/char/ipmi/Makefile
drivers/char/ipmi/bt-bmc.c [new file with mode: 0644]
drivers/char/ipmi/ipmi_msghandler.c
drivers/clk/at91/clk-programmable.c
drivers/clk/bcm/clk-bcm2835.c
drivers/clk/clk-max77686.c
drivers/clk/hisilicon/clk-hi6220.c
drivers/clk/mediatek/Kconfig
drivers/clk/mvebu/armada-37xx-periph.c
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/uniphier/clk-uniphier-core.c
drivers/clk/uniphier/clk-uniphier-mio.c
drivers/clk/uniphier/clk-uniphier-mux.c
drivers/clk/uniphier/clk-uniphier.h
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/jcore-pit.c [new file with mode: 0644]
drivers/clocksource/timer-sun5i.c
drivers/cpufreq/intel_pstate.c
drivers/dax/Kconfig
drivers/dax/pmem.c
drivers/extcon/extcon-qcom-spmi-misc.c
drivers/firewire/nosy.c
drivers/firmware/efi/libstub/Makefile
drivers/gpio/Kconfig
drivers/gpio/gpio-ath79.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-stmpe.c
drivers/gpio/gpio-ts4800.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/hid/hid-dr.c
drivers/hid/hid-ids.h
drivers/hid/hid-led.c
drivers/hid/usbhid/hid-quirks.c
drivers/hv/hv_util.c
drivers/hwmon/adm9240.c
drivers/hwmon/max31790.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-digicolor.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-jz4780.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-xgene-slimpro.c
drivers/i2c/busses/i2c-xlp9xx.c
drivers/i2c/busses/i2c-xlr.c
drivers/i2c/i2c-core.c
drivers/iio/adc/Kconfig
drivers/iio/chemical/atlas-ph-sensor.c
drivers/iio/temperature/maxim_thermocouple.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/ipack/ipack.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-eznps.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-jcore-aic.c
drivers/md/dm-raid.c
drivers/md/dm-raid1.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/media/pci/ivtv/ivtv-udma.c
drivers/media/pci/ivtv/ivtv-yuv.c
drivers/media/platform/omap/omap_vout.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf2-memops.c
drivers/memstick/host/rtsx_usb_ms.c
drivers/misc/cxl/api.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/file.c
drivers/misc/cxl/guest.c
drivers/misc/cxl/main.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/sysfs.c
drivers/misc/genwqe/card_utils.c
drivers/misc/mei/hw-txe.c
drivers/misc/mic/scif/scif_rma.c
drivers/misc/sgi-gru/grufault.c
drivers/misc/sgi-gru/grumain.c
drivers/misc/vmw_vmci/vmci_doorbell.c
drivers/misc/vmw_vmci/vmci_driver.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.h
drivers/mmc/core/mmc.c
drivers/mmc/host/rtsx_usb_sdmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-of-arasan.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/fastmap.c
drivers/nvdimm/Kconfig
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/nvme/host/scsi.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/discovery.c
drivers/pci/host/pci-layerscape.c
drivers/pci/host/pcie-designware-plat.c
drivers/pci/msi.c
drivers/perf/xgene_pmu.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/platform/goldfish/goldfish_pipe.c
drivers/platform/x86/Kconfig
drivers/platform/x86/ideapad-laptop.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/s390/block/dasd_eckd.c
drivers/s390/cio/chp.c
drivers/s390/scsi/zfcp_dbf.c
drivers/scsi/NCR5380.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/ipr.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_scan.c
drivers/scsi/st.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_of.c
drivers/staging/greybus/arche-platform.c
drivers/staging/greybus/es2.c
drivers/staging/greybus/gpio.c
drivers/staging/greybus/module.c
drivers/staging/greybus/uart.c
drivers/staging/iio/accel/sca3000_core.c
drivers/staging/lustre/lustre/llite/lproc_llite.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/wilc1000/host_interface.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/intel_pch_thermal.c
drivers/thermal/intel_powerclamp.c
drivers/tty/serial/8250/8250_lpss.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/8250_uniphier.c
drivers/tty/serial/Kconfig
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/stm32-usart.h
drivers/tty/serial/xilinx_uartps.c
drivers/tty/vt/vt.c
drivers/usb/chipidea/host.c
drivers/usb/dwc2/core.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/host/ehci-platform.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/omap2430.c
drivers/usb/renesas_usbhs/rcar3.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/usb-serial.c
drivers/usb/wusbcore/crypto.c
drivers/video/fbdev/pvr2fb.c
drivers/virt/fsl_hypervisor.c
drivers/vme/vme.c
drivers/watchdog/wdat_wdt.c
drivers/xen/manage.c
drivers/xen/xenbus/xenbus_dev_frontend.c
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/btrfs/compression.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/super.c
fs/ceph/xattr.c
fs/crypto/crypto.c
fs/crypto/policy.c
fs/exec.c
fs/exofs/dir.c
fs/ext2/inode.c
fs/ext4/block_validity.c
fs/ext4/mballoc.h
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/ext4/xattr.c
fs/f2fs/gc.c
fs/iomap.c
fs/isofs/inode.c
fs/jbd2/transaction.c
fs/kernfs/file.c
fs/locks.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/nfs4proc.c
fs/orangefs/dcache.c
fs/orangefs/file.c
fs/orangefs/namei.c
fs/orangefs/orangefs-kernel.h
fs/proc/array.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/ubifs/dir.c
fs/ubifs/xattr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_dquot_buf.c
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_inode_buf.h
fs/xfs/xfs_file.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_reflink.h
fs/xfs/xfs_sysfs.c
fs/xfs/xfs_trace.h
include/acpi/pcc.h
include/asm-generic/export.h
include/linux/acpi.h
include/linux/clk-provider.h
include/linux/cpufreq.h
include/linux/cpuhotplug.h
include/linux/io.h
include/linux/iomap.h
include/linux/irqchip/arm-gic-v3.h
include/linux/kasan.h
include/linux/kconfig.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/nvme.h
include/linux/perf_event.h
include/linux/syscalls.h
include/linux/thread_info.h
include/target/target_core_base.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/Kbuild
include/uapi/linux/bt-bmc.h [new file with mode: 0644]
ipc/msgutil.c
kernel/cpu.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/irq/manage.c
kernel/kcov.c
kernel/power/suspend.c
kernel/printk/printk.c
kernel/ptrace.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/wait.c
kernel/softirq.c
kernel/time/alarmtimer.c
kernel/time/timer.c
lib/Kconfig.debug
lib/genalloc.c
lib/stackdepot.c
mm/Kconfig
mm/filemap.c
mm/frame_vector.c
mm/gup.c
mm/kasan/kasan.c
mm/kmemleak.c
mm/list_lru.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mprotect.c
mm/nommu.c
mm/page_alloc.c
mm/process_vm_access.c
mm/slab.c
mm/slab.h
mm/util.c
mm/vmscan.c
net/ceph/pagevec.c
security/keys/Kconfig
security/keys/big_key.c
security/keys/proc.c
security/selinux/hooks.c
security/tomoyo/domain.c
sound/core/seq/seq_timer.c
sound/pci/asihpi/hpioctl.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks-table.h
tools/arch/x86/include/asm/cpufeatures.h
tools/objtool/arch/x86/decode.c
tools/objtool/builtin-check.c
tools/perf/jvmti/Makefile
tools/perf/ui/browsers/hists.c
tools/perf/util/header.c
tools/perf/util/parse-events.l
virt/kvm/async_pf.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 513aaa3..8373676 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1864,10 +1864,11 @@ S: The Netherlands
 
 N: Martin Kepplinger
 E: martink@posteo.de
-E: martin.kepplinger@theobroma-systems.com
+E: martin.kepplinger@ginzinger.com
 W: http://www.martinkepplinger.com
 D: mma8452 accelerators iio driver
-D: Kernel cleanups
+D: pegasus_notetaker input driver
+D: Kernel fixes and cleanups
 S: Garnisonstraße 26
 S: 4020 Linz
 S: Austria
index 4ba0a2a..640f65e 100644 (file)
@@ -220,8 +220,11 @@ What:           /sys/class/cxl/<card>/reset
 Date:           October 2014
 Contact:        linuxppc-dev@lists.ozlabs.org
 Description:    write only
-                Writing 1 will issue a PERST to card which may cause the card
-                to reload the FPGA depending on load_image_on_perst.
+                Writing 1 will issue a PERST to card provided there are no
+                contexts active on any one of the card AFUs. This may cause
+                the card to reload the FPGA depending on load_image_on_perst.
+                Writing -1 will do a force PERST irrespective of any active
+                contexts on the card AFUs.
 Users:         https://github.com/ibm-capi/libcxl
 
 What:          /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest)
index e5b6497..c75b64a 100644 (file)
@@ -309,3 +309,4 @@ Version History
        with a reshape in progress.
 1.9.0   Add support for RAID level takeover/reshape/region size
        and set size reduction.
+1.9.1   Fix activation of existing RAID 4/10 mapped devices
index c7179d3..8121630 100644 (file)
@@ -24,7 +24,7 @@ Example:
                reg = <0x61840000 0x4000>;
 
                clock {
-                       compatible = "socionext,uniphier-ld20-clock";
+                       compatible = "socionext,uniphier-ld11-clock";
                        #clock-cells = <1>;
                };
 
@@ -43,8 +43,8 @@ Provided clocks:
 21: USB3 ch1 PHY1
 
 
-Media I/O (MIO) clock
----------------------
+Media I/O (MIO) clock, SD clock
+-------------------------------
 
 Required properties:
 - compatible: should be one of the following:
@@ -52,10 +52,10 @@ Required properties:
     "socionext,uniphier-ld4-mio-clock"  - for LD4 SoC.
     "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
     "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
-    "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC.
-    "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC.
+    "socionext,uniphier-pro5-sd-clock"  - for Pro5 SoC.
+    "socionext,uniphier-pxs2-sd-clock"  - for PXs2/LD6b SoC.
     "socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
-    "socionext,uniphier-ld20-mio-clock" - for LD20 SoC.
+    "socionext,uniphier-ld20-sd-clock"  - for LD20 SoC.
 - #clock-cells: should be 1.
 
 Example:
@@ -66,7 +66,7 @@ Example:
                reg = <0x59810000 0x800>;
 
                clock {
-                       compatible = "socionext,uniphier-ld20-mio-clock";
+                       compatible = "socionext,uniphier-ld11-mio-clock";
                        #clock-cells = <1>;
                };
 
@@ -112,7 +112,7 @@ Example:
                reg = <0x59820000 0x200>;
 
                clock {
-                       compatible = "socionext,uniphier-ld20-peri-clock";
+                       compatible = "socionext,uniphier-ld11-peri-clock";
                        #clock-cells = <1>;
                };
 
diff --git a/Documentation/devicetree/bindings/ipmi.txt b/Documentation/devicetree/bindings/ipmi.txt
deleted file mode 100644 (file)
index d5f1a87..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-IPMI device
-
-Required properties:
-- compatible: should be one of ipmi-kcs, ipmi-smic, or ipmi-bt
-- device_type: should be ipmi
-- reg: Address and length of the register set for the device
-
-Optional properties:
-- interrupts: The interrupt for the device.  Without this the interface
-       is polled.
-- reg-size - The size of the register.  Defaults to 1
-- reg-spacing - The number of bytes between register starts.  Defaults to 1
-- reg-shift - The amount to shift the registers to the right to get the data
-       into bit zero.
-
-Example:
-
-smic@fff3a000 {
-       compatible = "ipmi-smic";
-       device_type = "ipmi";
-       reg = <0xfff3a000 0x1000>;
-       interrupts = <0 24 4>;
-       reg-size = <4>;
-       reg-spacing = <4>;
-};
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
new file mode 100644 (file)
index 0000000..fbbacd9
--- /dev/null
@@ -0,0 +1,23 @@
+* Aspeed BT (Block Transfer) IPMI interface
+
+The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs
+(BaseBoard Management Controllers) and the BT interface can be used to
+perform in-band IPMI communication with their host.
+
+Required properties:
+
+- compatible : should be "aspeed,ast2400-bt-bmc"
+- reg: physical address and size of the registers
+
+Optional properties:
+
+- interrupts: interrupt generated by the BT interface. without an
+  interrupt, the driver will operate in poll mode.
+
+Example:
+
+       ibt@1e789140 {
+               compatible = "aspeed,ast2400-bt-bmc";
+               reg = <0x1e789140 0x18>;
+               interrupts = <8>;
+       };
diff --git a/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
new file mode 100644 (file)
index 0000000..d5f1a87
--- /dev/null
@@ -0,0 +1,25 @@
+IPMI device
+
+Required properties:
+- compatible: should be one of ipmi-kcs, ipmi-smic, or ipmi-bt
+- device_type: should be ipmi
+- reg: Address and length of the register set for the device
+
+Optional properties:
+- interrupts: The interrupt for the device.  Without this the interface
+       is polled.
+- reg-size - The size of the register.  Defaults to 1
+- reg-spacing - The number of bytes between register starts.  Defaults to 1
+- reg-shift - The amount to shift the registers to the right to get the data
+       into bit zero.
+
+Example:
+
+smic@fff3a000 {
+       compatible = "ipmi-smic";
+       device_type = "ipmi";
+       reg = <0xfff3a000 0x1000>;
+       interrupts = <0 24 4>;
+       reg-size = <4>;
+       reg-spacing = <4>;
+};
index 5e60ad1..2ad18c4 100644 (file)
@@ -43,7 +43,9 @@ aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
 
 GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
 I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
-RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8
+RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6
+TIMER7 TIMER8 VGABIOSROM
+
 
 Examples:
 
index a3eb154..227bb77 100644 (file)
@@ -1,7 +1,9 @@
 Binding for Cadence UART Controller
 
 Required properties:
-- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps"
+- compatible :
+  Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.
+  Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.
 - reg: Should contain UART controller registers location and length.
 - interrupts: Should contain UART controller interrupts.
 - clocks: Must contain phandles to the UART clocks
index 1e4000d..8d27d1a 100644 (file)
@@ -9,6 +9,14 @@ Required properties:
     - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
     - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
     - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
+    - "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
+    - "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
+    - "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
+    - "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
+    - "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
+    - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
+    - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
+    - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
     - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
     - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
     - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/timer/jcore,pit.txt b/Documentation/devicetree/bindings/timer/jcore,pit.txt
new file mode 100644 (file)
index 0000000..af5dd35
--- /dev/null
@@ -0,0 +1,24 @@
+J-Core Programmable Interval Timer and Clocksource
+
+Required properties:
+
+- compatible: Must be "jcore,pit".
+
+- reg: Memory region(s) for timer/clocksource registers. For SMP,
+  there should be one region per cpu, indexed by the sequential,
+  zero-based hardware cpu number.
+
+- interrupts: An interrupt to assign for the timer. The actual pit
+  core is integrated with the aic and allows the timer interrupt
+  assignment to be programmed by software, but this property is
+  required in order to reserve an interrupt number that doesn't
+  conflict with other devices.
+
+
+Example:
+
+timer@200 {
+       compatible = "jcore,pit";
+       reg = < 0x200 0x30 0x500 0x30 >;
+       interrupts = < 0x48 >;
+};
index 455f2c3..2c30a54 100644 (file)
@@ -28,10 +28,7 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
 - g-use-dma: enable dma usage in gadget driver.
 - g-rx-fifo-size: size of rx fifo size in gadget mode.
 - g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
-
-Deprecated properties:
-- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
-  in gadget mode.
+- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
 
 Example:
 
index 219ffd4..74329fd 100644 (file)
@@ -395,32 +395,6 @@ is not associated with a file:
 
  or if empty, the mapping is anonymous.
 
-The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
-of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. Hence, for the example above, the
-task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
-
-08048000-08049000 r-xp 00000000 03:00 8312       /opt/test
-08049000-0804a000 rw-p 00001000 03:00 8312       /opt/test
-0804a000-0806b000 rw-p 00000000 00:00 0          [heap]
-a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0
-a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0          [stack]
-a7ed5000-a8008000 r-xp 00000000 03:00 4222       /lib/libc.so.6
-a8008000-a800a000 r--p 00133000 03:00 4222       /lib/libc.so.6
-a800a000-a800b000 rw-p 00135000 03:00 4222       /lib/libc.so.6
-a800b000-a800e000 rw-p 00000000 00:00 0
-a800e000-a8022000 r-xp 00000000 03:00 14462      /lib/libpthread.so.0
-a8022000-a8023000 r--p 00013000 03:00 14462      /lib/libpthread.so.0
-a8023000-a8024000 rw-p 00014000 03:00 14462      /lib/libpthread.so.0
-a8024000-a8027000 rw-p 00000000 00:00 0
-a8027000-a8043000 r-xp 00000000 03:00 8317       /lib/ld-linux.so.2
-a8043000-a8044000 r--p 0001b000 03:00 8317       /lib/ld-linux.so.2
-a8044000-a8045000 rw-p 0001c000 03:00 8317       /lib/ld-linux.so.2
-aff35000-aff4a000 rw-p 00000000 00:00 0
-ffffe000-fffff000 r-xp 00000000 00:00 0          [vdso]
-
 The /proc/PID/smaps is an extension based on maps, showing the memory
 consumption for each of the process's mappings. For each of mappings there
 is a series of lines such as the following:
index 40884c4..a0f6189 100644 (file)
@@ -6,7 +6,7 @@ Note that it only applies to the new descriptor-based interface. For a
 description of the deprecated integer-based GPIO interface please refer to
 gpio-legacy.txt (actually, there is no real mapping possible with the old
 interface; you just fetch an integer from somewhere and request the
-corresponding GPIO.
+corresponding GPIO).
 
 All platforms can enable the GPIO library, but if the platform strictly
 requires GPIO functionality to be present, it needs to select GPIOLIB from its
@@ -162,6 +162,9 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
 
 Since the "led" GPIOs are mapped as active-high, this example will switch their
 signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
-as active-low, its actual signal will be 0 after this code. Contrary to the legacy
-integer GPIO interface, the active-low property is handled during mapping and is
-thus transparent to GPIO consumers.
+as active-low, its actual signal will be 0 after this code. Contrary to the
+legacy integer GPIO interface, the active-low property is handled during
+mapping and is thus transparent to GPIO consumers.
+
+A set of functions such as gpiod_set_value() is available to work with
+the new descriptor-oriented interface.
index 25f543c..3d838cf 100644 (file)
@@ -4621,8 +4621,9 @@ F:        sound/usb/misc/ua101.c
 
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
 M:     Matt Fleming <matt@codeblueprint.co.uk>
+M:     Ard Biesheuvel <ard.biesheuvel@linaro.org>
 L:     linux-efi@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
 S:     Maintained
 F:     Documentation/efi-stub.txt
 F:     arch/ia64/kernel/efi.c
@@ -5287,6 +5288,12 @@ M:       Joe Perches <joe@perches.com>
 S:     Maintained
 F:     scripts/get_maintainer.pl
 
+GENWQE (IBM Generic Workqueue Card)
+M:     Frank Haverkamp <haver@linux.vnet.ibm.com>
+M:     Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+S:     Supported
+F:     drivers/misc/genwqe/
+
 GFS2 FILE SYSTEM
 M:     Steven Whitehouse <swhiteho@redhat.com>
 M:     Bob Peterson <rpeterso@redhat.com>
@@ -8100,6 +8107,7 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
 MODULE SUPPORT
+M:     Jessica Yu <jeyu@redhat.com>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 S:     Maintained
 F:     include/linux/module.h
@@ -8213,7 +8221,7 @@ F:        include/linux/mfd/
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
 M:     Ulf Hansson <ulf.hansson@linaro.org>
 L:     linux-mmc@vger.kernel.org
-T:     git git://git.linaro.org/people/ulf.hansson/mmc.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/mmc/
 F:     drivers/mmc/
@@ -9300,7 +9308,7 @@ S:        Maintained
 F:     drivers/pci/host/*designware*
 
 PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
-M:     Joao Pinto <jpinto@synopsys.com>
+M:     Jose Abreu <Jose.Abreu@synopsys.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/designware-pcie.txt
index 512e47a..93beca4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index d9ee817..940dfb4 100644 (file)
@@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
 static inline int
 read_int(struct task_struct *task, unsigned long addr, int * data)
 {
-       int copied = access_process_vm(task, addr, data, sizeof(int), 0);
+       int copied = access_process_vm(task, addr, data, sizeof(int),
+                       FOLL_FORCE);
        return (copied == sizeof(int)) ? 0 : -EIO;
 }
 
 static inline int
 write_int(struct task_struct *task, unsigned long addr, int data)
 {
-       int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
+       int copied = access_process_vm(task, addr, &data, sizeof(int),
+                       FOLL_FORCE | FOLL_WRITE);
        return (copied == sizeof(int)) ? 0 : -EIO;
 }
 
@@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request,
        /* When I and D space are separate, these will need to be fixed.  */
        case PTRACE_PEEKTEXT: /* read word at location addr. */
        case PTRACE_PEEKDATA:
-               copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+               copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
+                               FOLL_FORCE);
                ret = -EIO;
                if (copied != sizeof(tmp))
                        break;
index ecd1237..bd204bf 100644 (file)
@@ -41,6 +41,8 @@ config ARC
        select PERF_USE_VMALLOC
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_GENERIC_DMA_COHERENT
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZMA
 
 config MIGHT_HAVE_PCI
        bool
@@ -186,14 +188,6 @@ if SMP
 config ARC_HAS_COH_CACHES
        def_bool n
 
-config ARC_MCIP
-       bool "ARConnect Multicore IP (MCIP) Support "
-       depends on ISA_ARCV2
-       help
-         This IP block enables SMP in ARC-HS38 cores.
-         It provides for cross-core interrupts, multi-core debug
-         hardware semaphores, shared memory,....
-
 config NR_CPUS
        int "Maximum number of CPUs (2-4096)"
        range 2 4096
@@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
 
 endif  #SMP
 
+config ARC_MCIP
+       bool "ARConnect Multicore IP (MCIP) Support "
+       depends on ISA_ARCV2
+       default y if SMP
+       help
+         This IP block enables SMP in ARC-HS38 cores.
+         It provides for cross-core interrupts, multi-core debug
+         hardware semaphores, shared memory,....
+
 menuconfig ARC_CACHE
        bool "Enable Cache Support"
        default y
@@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
        bool "Paranoia Checks in Low Level TLB Handlers"
        default n
 
-config ARC_DBG_TLB_MISS_COUNT
-       bool "Profile TLB Misses"
-       default n
-       select DEBUG_FS
-       help
-         Counts number of I and D TLB Misses and exports them via Debugfs
-         The counters can be cleared via Debugfs as well
-
 endif
 
 config ARC_UBOOT_SUPPORT
index aa82d13..864adad 100644 (file)
@@ -50,9 +50,6 @@ atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
 
 cflags-$(atleast_gcc44)                        += -fsection-anchors
 
-cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
-cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
-
 ifdef CONFIG_ISA_ARCV2
 
 ifndef CONFIG_ARC_HAS_LL64
index e597cb3..f94cf15 100644 (file)
@@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
 
 suffix-y := bin
 suffix-$(CONFIG_KERNEL_GZIP)   := gz
+suffix-$(CONFIG_KERNEL_LZMA)   := lzma
 
-targets += uImage uImage.bin uImage.gz
-extra-y += vmlinux.bin vmlinux.bin.gz
+targets += uImage
+targets += uImage.bin
+targets += uImage.gz
+targets += uImage.lzma
+extra-y += vmlinux.bin
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
 
 $(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
@@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
 
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzma)
+
 $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
        $(call if_changed,uimage,none)
 
 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+       $(call if_changed,uimage,lzma)
+
 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
        @echo '  Image $@ is ready'
index db25c65..7f3f9f6 100644 (file)
@@ -349,10 +349,11 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
        struct bcr_isa isa;
+       const char *details, *name;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
-               unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+               unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
                             fpu_sp:1, fpu_dp:1, pad2:6,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
index fb781e3..b3410ff 100644 (file)
@@ -53,7 +53,7 @@ extern void arc_cache_init(void);
 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
 extern void read_decode_cache_bcr(void);
 
-extern int ioc_exists;
+extern int ioc_enable;
 extern unsigned long perip_base, perip_end;
 
 #endif /* !__ASSEMBLY__ */
index 7096f97..aa2d6da 100644 (file)
@@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
  * the loader.  We need to make sure that it is out of the way of the program
  * that it will "exec", and that there is sufficient room for the brk.
  */
-#define ELF_ET_DYN_BASE                (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE                (2UL * TASK_SIZE / 3)
 
 /*
  * When the program starts, a1 contains a pointer to a function to be
index 847e3bb..c8fbe41 100644 (file)
@@ -55,6 +55,22 @@ struct mcip_cmd {
 #define IDU_M_DISTRI_DEST              0x2
 };
 
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad3:8,
+                            idu:1, llm:1, num_cores:6,
+                            iocoh:1,  gfrc:1, dbg:1, pad2:1,
+                            msg:1, sem:1, ipi:1, pad:1,
+                            ver:8;
+#else
+               unsigned int ver:8,
+                            pad:1, ipi:1, sem:1, msg:1,
+                            pad2:1, dbg:1, gfrc:1, iocoh:1,
+                            num_cores:6, llm:1, idu:1,
+                            pad3:8;
+#endif
+};
+
 /*
  * MCIP programming model
  *
index 518222b..6e91d8b 100644 (file)
@@ -18,6 +18,7 @@
 struct mod_arch_specific {
        void *unw_info;
        int unw_sec_idx;
+       const char *secstr;
 };
 #endif
 
index 48b37c6..cb954cd 100644 (file)
@@ -27,11 +27,6 @@ struct id_to_str {
        const char *str;
 };
 
-struct cpuinfo_data {
-       struct id_to_str info;
-       int up_range;
-};
-
 extern int root_mountflags, end_mem;
 
 void setup_processor(void);
@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
 #define IS_USED_RUN(v)         ((v) ? "" : "(not used) ")
 #define IS_USED_CFG(cfg)       IS_USED_RUN(IS_ENABLED(cfg))
 #define IS_AVAIL2(v, s, cfg)   IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s)    IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
 
 #endif /* __ASMARC_SETUP_H */
index e56f9fc..772b67c 100644 (file)
@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
 int sys_cacheflush(uint32_t, uint32_t uint32_t);
 int sys_arc_settls(void *);
 int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
 
 #include <asm-generic/syscalls.h>
 
index 41fa2ec..9a34136 100644 (file)
 
 #define NR_syscalls    __NR_syscalls
 
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs             (__NR_arch_specific_syscall + 3)
+
 /* ARC specific syscall */
 #define __NR_cacheflush                (__NR_arch_specific_syscall + 0)
 #define __NR_arc_settls                (__NR_arch_specific_syscall + 1)
 #define __NR_arc_gettls                (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg   (__NR_arch_specific_syscall + 4)
 
 __SYSCALL(__NR_cacheflush, sys_cacheflush)
 __SYSCALL(__NR_arc_settls, sys_arc_settls)
 __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs             (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
 __SYSCALL(__NR_sysfs, sys_sysfs)
 
 #undef __SYSCALL
index 72f9179..c424d5a 100644 (file)
 #include <asm/mcip.h>
 #include <asm/setup.h>
 
-static char smp_cpuinfo_buf[128];
-static int idu_detected;
-
 static DEFINE_RAW_SPINLOCK(mcip_lock);
 
+#ifdef CONFIG_SMP
+
+static char smp_cpuinfo_buf[128];
+
 static void mcip_setup_per_cpu(int cpu)
 {
        smp_ipi_irq_setup(cpu, IPI_IRQ);
@@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
 
 static void mcip_probe_n_setup(void)
 {
-       struct mcip_bcr {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-               unsigned int pad3:8,
-                            idu:1, llm:1, num_cores:6,
-                            iocoh:1,  gfrc:1, dbg:1, pad2:1,
-                            msg:1, sem:1, ipi:1, pad:1,
-                            ver:8;
-#else
-               unsigned int ver:8,
-                            pad:1, ipi:1, sem:1, msg:1,
-                            pad2:1, dbg:1, gfrc:1, iocoh:1,
-                            num_cores:6, llm:1, idu:1,
-                            pad3:8;
-#endif
-       } mp;
+       struct mcip_bcr mp;
 
        READ_BCR(ARC_REG_MCIP_BCR, mp);
 
@@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
                IS_AVAIL1(mp.gfrc, "GFRC"));
 
        cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
-       idu_detected = mp.idu;
 
        if (mp.dbg) {
                __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
        .ipi_clear      = mcip_ipi_clear,
 };
 
+#endif
+
 /***************************************************************************
  * ARCv2 Interrupt Distribution Unit (IDU)
  *
@@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
        /* Read IDU BCR to confirm nr_irqs */
        int nr_irqs = of_irq_count(intc);
        int i, irq;
+       struct mcip_bcr mp;
+
+       READ_BCR(ARC_REG_MCIP_BCR, mp);
 
-       if (!idu_detected)
+       if (!mp.idu)
                panic("IDU not detected, but DeviceTree using it");
 
        pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
index 9a28497..42e964d 100644 (file)
@@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
                              char *secstr, struct module *mod)
 {
 #ifdef CONFIG_ARC_DW2_UNWIND
-       int i;
-
        mod->arch.unw_sec_idx = 0;
        mod->arch.unw_info = NULL;
-
-       for (i = 1; i < hdr->e_shnum; i++) {
-               if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
-                       mod->arch.unw_sec_idx = i;
-                       break;
-               }
-       }
+       mod->arch.secstr = secstr;
 #endif
        return 0;
 }
@@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
                       unsigned int relsec,     /* sec index for relo sec */
                       struct module *module)
 {
-       int i, n;
+       int i, n, relo_type;
        Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
        Elf32_Sym *sym_entry, *sym_sec;
-       Elf32_Addr relocation;
-       Elf32_Addr location;
-       Elf32_Addr sec_to_patch;
-       int relo_type;
-
-       sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+       Elf32_Addr relocation, location, tgt_addr;
+       unsigned int tgtsec;
+
+       /*
+        * @relsec has relocations e.g. .rela.init.text
+        * @tgtsec is section to patch e.g. .init.text
+        */
+       tgtsec = sechdrs[relsec].sh_info;
+       tgt_addr = sechdrs[tgtsec].sh_addr;
        sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
        n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
 
-       pr_debug("\n========== Module Sym reloc ===========================\n");
-       pr_debug("Section to fixup %x\n", sec_to_patch);
+       pr_debug("\nSection to fixup %s @%x\n",
+                module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
        pr_debug("=========================================================\n");
-       pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+       pr_debug("r_off\tr_add\tst_value ADDRESS  VALUE\n");
        pr_debug("=========================================================\n");
 
        /* Loop thru entries in relocation section */
        for (i = 0; i < n; i++) {
+               const char *s;
 
                /* This is where to make the change */
-               location = sec_to_patch + rel_entry[i].r_offset;
+               location = tgt_addr + rel_entry[i].r_offset;
 
                /* This is the symbol it is referring to.  Note that all
                   undefined symbols have been resolved.  */
@@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
 
                relocation = sym_entry->st_value + rel_entry[i].r_addend;
 
-               pr_debug("\t%x\t\t%x\t\t%x  %x %x [%s]\n",
-                       rel_entry[i].r_offset, rel_entry[i].r_addend,
-                       sym_entry->st_value, location, relocation,
-                       strtab + sym_entry->st_name);
+               if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
+                       s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
+               } else {
+                       s = strtab + sym_entry->st_name;
+               }
+
+               pr_debug("   %x\t%x\t%x %x %x [%s]\n",
+                        rel_entry[i].r_offset, rel_entry[i].r_addend,
+                        sym_entry->st_value, location, relocation, s);
 
                /* This assumes modules are built with -mlong-calls
                 * so any branches/jumps are absolute 32 bit jmps
@@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
                        goto relo_err;
 
        }
+
+       if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
+               module->arch.unw_sec_idx = tgtsec;
+
        return 0;
 
 relo_err:
index be1972b..59aa43c 100644 (file)
@@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
        return task_thread_info(current)->thr_ptr;
 }
 
+SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
+{
+       int uval;
+       int ret;
+
+       /*
+        * This is only for old cores lacking LLOCK/SCOND, which by defintion
+        * can't possibly be SMP. Thus doesn't need to be SMP safe.
+        * And this also helps reduce the overhead for serializing in
+        * the UP case
+        */
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+               return -EFAULT;
+
+       preempt_disable();
+
+       ret = __get_user(uval, uaddr);
+       if (ret)
+               goto done;
+
+       if (uval != expected)
+               ret = -EAGAIN;
+       else
+               ret = __put_user(new, uaddr);
+
+done:
+       preempt_enable();
+
+       return ret;
+}
+
 void arch_cpu_idle(void)
 {
        /* sleep, but enable all interrupts before committing */
index 3df7f9c..0385df7 100644 (file)
@@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS];  /* For stack switching */
 
 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 
+static const struct id_to_str arc_cpu_rel[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+       { 0x34, "R4.10"},
+       { 0x35, "R4.11"},
+#else
+       { 0x51, "R2.0" },
+       { 0x52, "R2.1" },
+       { 0x53, "R3.0" },
+#endif
+       { 0x00, NULL   }
+};
+
+static const struct id_to_str arc_cpu_nm[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+       { 0x20, "ARC 600"   },
+       { 0x30, "ARC 770"   },  /* 750 identified seperately */
+#else
+       { 0x40, "ARC EM"  },
+       { 0x50, "ARC HS38"  },
+#endif
+       { 0x00, "Unknown"   }
+};
+
 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
 {
        if (is_isa_arcompact()) {
@@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
        struct bcr_timer timer;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+       const struct id_to_str *tbl;
+
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
        READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
+       for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
+               if (cpu->core.family == tbl->id) {
+                       cpu->details = tbl->str;
+                       break;
+               }
+       }
+
+       for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
+               if ((cpu->core.family & 0xF0) == tbl->id)
+                       break;
+       }
+       cpu->name = tbl->str;
+
        READ_BCR(ARC_REG_TIMERS_BCR, timer);
        cpu->extn.timer0 = timer.t0;
        cpu->extn.timer1 = timer.t1;
@@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
        cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0;        /* 1,3 */
        cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
        cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
+       cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
+                               IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
+
        READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
 
        /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
        cpu->extn.rtt = bcr.ver ? 1 : 0;
 
        cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-}
 
-static const struct cpuinfo_data arc_cpu_tbl[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
-       { {0x20, "ARC 600"      }, 0x2F},
-       { {0x30, "ARC 700"      }, 0x33},
-       { {0x34, "ARC 700 R4.10"}, 0x34},
-       { {0x35, "ARC 700 R4.11"}, 0x35},
-#else
-       { {0x50, "ARC HS38 R2.0"}, 0x51},
-       { {0x52, "ARC HS38 R2.1"}, 0x52},
-       { {0x53, "ARC HS38 R3.0"}, 0x53},
-#endif
-       { {0x00, NULL           } }
-};
+       /* some hacks for lack of feature BCR info in old ARC700 cores */
+       if (is_isa_arcompact()) {
+               if (!cpu->isa.ver)      /* ISA BCR absent, use Kconfig info */
+                       cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+               else
+                       cpu->isa.atomic = cpu->isa.atomic1;
 
+               cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+
+                /* there's no direct way to distinguish 750 vs. 770 */
+               if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
+                       cpu->name = "ARC750";
+       }
+}
 
 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
        struct bcr_identity *core = &cpu->core;
-       const struct cpuinfo_data *tbl;
-       char *isa_nm;
-       int i, be, atomic;
-       int n = 0;
+       int i, n = 0;
 
        FIX_PTR(cpu);
 
-       if (is_isa_arcompact()) {
-               isa_nm = "ARCompact";
-               be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
-
-               atomic = cpu->isa.atomic1;
-               if (!cpu->isa.ver)      /* ISA BCR absent, use Kconfig info */
-                       atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
-       } else {
-               isa_nm = "ARCv2";
-               be = cpu->isa.be;
-               atomic = cpu->isa.atomic;
-       }
-
        n += scnprintf(buf + n, len - n,
                       "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
                       core->family, core->cpu_id, core->chip_id);
 
-       for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
-               if ((core->family >= tbl->info.id) &&
-                   (core->family <= tbl->up_range)) {
-                       n += scnprintf(buf + n, len - n,
-                                      "processor [%d]\t: %s (%s ISA) %s\n",
-                                      cpu_id, tbl->info.str, isa_nm,
-                                      IS_AVAIL1(be, "[Big-Endian]"));
-                       break;
-               }
-       }
-
-       if (tbl->info.id == 0)
-               n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+       n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+                      cpu_id, cpu->name, cpu->details,
+                      is_isa_arcompact() ? "ARCompact" : "ARCv2",
+                      IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
                       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                                 CONFIG_ARC_HAS_RTC));
 
        n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
-                          IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+                          IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
                           IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
                           IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
 
@@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       IS_AVAIL1(cpu->extn.swap, "swap "),
                       IS_AVAIL1(cpu->extn.minmax, "minmax "),
                       IS_AVAIL1(cpu->extn.crc, "crc "),
-                      IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
+                      IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
 
        if (cpu->bpu.ver)
                n += scnprintf(buf + n, len - n,
@@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
 
        FIX_PTR(cpu);
 
-       n += scnprintf(buf + n, len - n,
-                      "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
-                      cpu->vec_base, perip_base, perip_end);
+       n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
 
        if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
                n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
         * way to pass it w/o having to kmalloc/free a 2 byte string.
         * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
         */
-       return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+       return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
 }
 
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
index 934150e..82f9bc8 100644 (file)
@@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
        if (!user_mode(regs))
                show_stacktrace(current, regs);
 }
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/debugfs.h>
-
-static struct dentry *test_dentry;
-static struct dentry *test_dir;
-static struct dentry *test_u32_dentry;
-
-static u32 clr_on_read = 1;
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-u32 numitlb, numdtlb, num_pte_not_present;
-
-static int fill_display_data(char *kbuf)
-{
-       size_t num = 0;
-       num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
-       num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
-       num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
-
-       if (clr_on_read)
-               numitlb = numdtlb = num_pte_not_present = 0;
-
-       return num;
-}
-
-static int tlb_stats_open(struct inode *inode, struct file *file)
-{
-       file->private_data = (void *)__get_free_page(GFP_KERNEL);
-       return 0;
-}
-
-/* called on user read(): display the counters */
-static ssize_t tlb_stats_output(struct file *file,     /* file descriptor */
-                               char __user *user_buf,  /* user buffer */
-                               size_t len,             /* length of buffer */
-                               loff_t *offset)         /* offset in the file */
-{
-       size_t num;
-       char *kbuf = (char *)file->private_data;
-
-       /* All of the data can he shoved in one iteration */
-       if (*offset != 0)
-               return 0;
-
-       num = fill_display_data(kbuf);
-
-       /* simple_read_from_buffer() is helper for copy to user space
-          It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
-          @3 (offset) into the user space address starting at @1 (user_buf).
-          @5 (len) is max size of user buffer
-        */
-       return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
-}
-
-/* called on user write : clears the counters */
-static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
-                              size_t length, loff_t *offset)
-{
-       numitlb = numdtlb = num_pte_not_present = 0;
-       return length;
-}
-
-static int tlb_stats_close(struct inode *inode, struct file *file)
-{
-       free_page((unsigned long)(file->private_data));
-       return 0;
-}
-
-static const struct file_operations tlb_stats_file_ops = {
-       .read = tlb_stats_output,
-       .write = tlb_stats_clear,
-       .open = tlb_stats_open,
-       .release = tlb_stats_close
-};
-#endif
-
-static int __init arc_debugfs_init(void)
-{
-       test_dir = debugfs_create_dir("arc", NULL);
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-       test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
-                                         &tlb_stats_file_ops);
-#endif
-
-       test_u32_dentry =
-           debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
-
-       return 0;
-}
-
-module_init(arc_debugfs_init);
-
-static void __exit arc_debugfs_exit(void)
-{
-       debugfs_remove(test_u32_dentry);
-       debugfs_remove(test_dentry);
-       debugfs_remove(test_dir);
-}
-module_exit(arc_debugfs_exit);
-
-#endif
index 97dddbe..2b96cfc 100644 (file)
@@ -22,8 +22,8 @@
 #include <asm/setup.h>
 
 static int l2_line_sz;
-int ioc_exists;
-volatile int slc_enable = 1, ioc_enable = 1;
+static int ioc_exists;
+int slc_enable = 1, ioc_enable = 1;
 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
 
@@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
        PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
        PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
 
-       if (!is_isa_arcv2())
-                return buf;
-
        p = &cpuinfo_arc700[c].slc;
        if (p->ver)
                n += scnprintf(buf + n, len - n,
                               "SLC\t\t: %uK, %uB Line%s\n",
                               p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
 
-       if (ioc_exists)
-               n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
-                               IS_DISABLED_RUN(ioc_enable));
+       n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+                      perip_base,
+                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
 
        return buf;
 }
@@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
        }
 
        READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
-       if (cbcr.c && ioc_enable)
+       if (cbcr.c)
                ioc_exists = 1;
+       else
+               ioc_enable = 0;
 
        /* HS 2.0 didn't have AUX_VOL */
        if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1002,7 +1001,7 @@ void arc_cache_init(void)
                        read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
        }
 
-       if (is_isa_arcv2() && ioc_exists) {
+       if (is_isa_arcv2() && ioc_enable) {
                /* IO coherency base - 0x8z */
                write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
                /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
index 20afc65..60aab5a 100644 (file)
@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
         *   -For coherent data, Read/Write to buffers terminate early in cache
         *   (vs. always going to memory - thus are faster)
         */
-       if ((is_isa_arcv2() && ioc_exists) ||
+       if ((is_isa_arcv2() && ioc_enable) ||
            (attrs & DMA_ATTR_NON_CONSISTENT))
                need_coh = 0;
 
@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
        int is_non_coh = 1;
 
        is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
-                       (is_isa_arcv2() && ioc_exists);
+                       (is_isa_arcv2() && ioc_enable);
 
        if (PageHighMem(page) || !is_non_coh)
                iounmap((void __force __iomem *)vaddr);
index ec868a9..bdb295e 100644 (file)
@@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
        char super_pg[64] = "";
 
        if (p_mmu->s_pg_sz_m)
-               scnprintf(super_pg, 64, "%dM Super Page%s, ",
+               scnprintf(super_pg, 64, "%dM Super Page %s",
                          p_mmu->s_pg_sz_m,
                          IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
 
        n += scnprintf(buf + n, len - n,
-                     "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
+                     "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
                       p_mmu->ver, p_mmu->pg_sz_k, super_pg,
                       p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
                       p_mmu->u_dtlb, p_mmu->u_itlb,
-                      IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
+                      IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
 
        return buf;
 }
index f1967ee..b30e4e3 100644 (file)
@@ -237,15 +237,6 @@ ex_saved_reg1:
 
 2:
 
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-       and.f 0, r0, _PAGE_PRESENT
-       bz   1f
-       ld   r3, [num_pte_not_present]
-       add  r3, r3, 1
-       st   r3, [num_pte_not_present]
-1:
-#endif
-
 .endm
 
 ;-----------------------------------------------------------------
@@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
 
        TLBMISS_FREEUP_REGS
 
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-       ld  r0, [@numitlb]
-       add r0, r0, 1
-       st  r0, [@numitlb]
-#endif
-
        ;----------------------------------------------------------------
        ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
        LOAD_FAULT_PTE
@@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
 
        TLBMISS_FREEUP_REGS
 
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-       ld  r0, [@numdtlb]
-       add r0, r0, 1
-       st  r0, [@numdtlb]
-#endif
-
        ;----------------------------------------------------------------
        ; Get the PTE corresponding to V-addr accessed
        ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
index 03e9273..08bb84f 100644 (file)
@@ -1312,6 +1312,13 @@ static int init_hyp_mode(void)
                goto out_err;
        }
 
+       err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+                                 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
+       if (err) {
+               kvm_err("Cannot map bss section\n");
+               goto out_err;
+       }
+
        /*
         * Map the Hyp stack pages
         */
index 30398db..969ef88 100644 (file)
@@ -915,7 +915,7 @@ config RANDOMIZE_BASE
 
 config RANDOMIZE_MODULE_REGION_FULL
        bool "Randomize the module region independently from the core kernel"
-       depends on RANDOMIZE_BASE
+       depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
        default y
        help
          Randomizes the location of the module region without considering the
index ab51aed..3635b86 100644 (file)
@@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 GZFLAGS                :=-9
 
 ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux                += -pie -Bsymbolic
+LDFLAGS_vmlinux                += -pie -shared -Bsymbolic
 endif
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
index 758d74f..a27c324 100644 (file)
@@ -94,7 +94,7 @@ struct arm64_cpu_capabilities {
        u16 capability;
        int def_scope;                  /* default scope */
        bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
-       void (*enable)(void *);         /* Called on all active CPUs */
+       int (*enable)(void *);          /* Called on all active CPUs */
        union {
                struct {        /* To be used for erratum handling only */
                        u32 midr_model;
index db0563c..f7865dd 100644 (file)
@@ -18,6 +18,9 @@
 #ifndef __ASM_EXEC_H
 #define __ASM_EXEC_H
 
+#include <linux/sched.h>
+
 extern unsigned long arch_align_stack(unsigned long sp);
+void uao_thread_switch(struct task_struct *next);
 
 #endif /* __ASM_EXEC_H */
index fd9d5fd..f5ea0ba 100644 (file)
@@ -178,11 +178,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
        return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
 }
 
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
-{
-       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
-}
-
 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 {
        return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
@@ -203,6 +198,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
        return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
 }
 
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+       return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+               kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+}
+
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 {
        return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
index ba62df8..b71086d 100644 (file)
@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define _virt_addr_valid(kaddr)        pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 #else
 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr)  (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(page)   (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
 
 #define page_to_virt(page)     ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
 #define virt_to_page(vaddr)    ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
index e12af67..06ff7fd 100644 (file)
@@ -17,6 +17,7 @@
 #define __ASM_MODULE_H
 
 #include <asm-generic/module.h>
+#include <asm/memory.h>
 
 #define MODULE_ARCH_VERMAGIC   "aarch64"
 
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
                          Elf64_Sym *sym);
 
 #ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start            (kimage_vaddr - KIMAGE_VADDR)
+#endif
 extern u64 module_alloc_base;
 #else
 #define module_alloc_base      ((u64)_etext - MODULES_VSIZE)
index 2fee2f5..5394c84 100644 (file)
@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr,                        \
                                                                        \
        switch (size) {                                                 \
        case 1:                                                         \
-               do {                                                    \
-                       asm ("//__per_cpu_" #op "_1\n"                  \
-                       "ldxrb    %w[ret], %[ptr]\n"                    \
+               asm ("//__per_cpu_" #op "_1\n"                          \
+               "1:     ldxrb     %w[ret], %[ptr]\n"                    \
                        #asm_op " %w[ret], %w[ret], %w[val]\n"          \
-                       "stxrb    %w[loop], %w[ret], %[ptr]\n"          \
-                       : [loop] "=&r" (loop), [ret] "=&r" (ret),       \
-                         [ptr] "+Q"(*(u8 *)ptr)                        \
-                       : [val] "Ir" (val));                            \
-               } while (loop);                                         \
+               "       stxrb     %w[loop], %w[ret], %[ptr]\n"          \
+               "       cbnz      %w[loop], 1b"                         \
+               : [loop] "=&r" (loop), [ret] "=&r" (ret),               \
+                 [ptr] "+Q"(*(u8 *)ptr)                                \
+               : [val] "Ir" (val));                                    \
                break;                                                  \
        case 2:                                                         \
-               do {                                                    \
-                       asm ("//__per_cpu_" #op "_2\n"                  \
-                       "ldxrh    %w[ret], %[ptr]\n"                    \
+               asm ("//__per_cpu_" #op "_2\n"                          \
+               "1:     ldxrh     %w[ret], %[ptr]\n"                    \
                        #asm_op " %w[ret], %w[ret], %w[val]\n"          \
-                       "stxrh    %w[loop], %w[ret], %[ptr]\n"          \
-                       : [loop] "=&r" (loop), [ret] "=&r" (ret),       \
-                         [ptr]  "+Q"(*(u16 *)ptr)                      \
-                       : [val] "Ir" (val));                            \
-               } while (loop);                                         \
+               "       stxrh     %w[loop], %w[ret], %[ptr]\n"          \
+               "       cbnz      %w[loop], 1b"                         \
+               : [loop] "=&r" (loop), [ret] "=&r" (ret),               \
+                 [ptr]  "+Q"(*(u16 *)ptr)                              \
+               : [val] "Ir" (val));                                    \
                break;                                                  \
        case 4:                                                         \
-               do {                                                    \
-                       asm ("//__per_cpu_" #op "_4\n"                  \
-                       "ldxr     %w[ret], %[ptr]\n"                    \
+               asm ("//__per_cpu_" #op "_4\n"                          \
+               "1:     ldxr      %w[ret], %[ptr]\n"                    \
                        #asm_op " %w[ret], %w[ret], %w[val]\n"          \
-                       "stxr     %w[loop], %w[ret], %[ptr]\n"          \
-                       : [loop] "=&r" (loop), [ret] "=&r" (ret),       \
-                         [ptr] "+Q"(*(u32 *)ptr)                       \
-                       : [val] "Ir" (val));                            \
-               } while (loop);                                         \
+               "       stxr      %w[loop], %w[ret], %[ptr]\n"          \
+               "       cbnz      %w[loop], 1b"                         \
+               : [loop] "=&r" (loop), [ret] "=&r" (ret),               \
+                 [ptr] "+Q"(*(u32 *)ptr)                               \
+               : [val] "Ir" (val));                                    \
                break;                                                  \
        case 8:                                                         \
-               do {                                                    \
-                       asm ("//__per_cpu_" #op "_8\n"                  \
-                       "ldxr     %[ret], %[ptr]\n"                     \
+               asm ("//__per_cpu_" #op "_8\n"                          \
+               "1:     ldxr      %[ret], %[ptr]\n"                     \
                        #asm_op " %[ret], %[ret], %[val]\n"             \
-                       "stxr     %w[loop], %[ret], %[ptr]\n"           \
-                       : [loop] "=&r" (loop), [ret] "=&r" (ret),       \
-                         [ptr] "+Q"(*(u64 *)ptr)                       \
-                       : [val] "Ir" (val));                            \
-               } while (loop);                                         \
+               "       stxr      %w[loop], %[ret], %[ptr]\n"           \
+               "       cbnz      %w[loop], 1b"                         \
+               : [loop] "=&r" (loop), [ret] "=&r" (ret),               \
+                 [ptr] "+Q"(*(u64 *)ptr)                               \
+               : [val] "Ir" (val));                                    \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
 
        switch (size) {
        case 1:
-               do {
-                       asm ("//__percpu_xchg_1\n"
-                       "ldxrb %w[ret], %[ptr]\n"
-                       "stxrb %w[loop], %w[val], %[ptr]\n"
-                       : [loop] "=&r"(loop), [ret] "=&r"(ret),
-                         [ptr] "+Q"(*(u8 *)ptr)
-                       : [val] "r" (val));
-               } while (loop);
+               asm ("//__percpu_xchg_1\n"
+               "1:     ldxrb   %w[ret], %[ptr]\n"
+               "       stxrb   %w[loop], %w[val], %[ptr]\n"
+               "       cbnz    %w[loop], 1b"
+               : [loop] "=&r"(loop), [ret] "=&r"(ret),
+                 [ptr] "+Q"(*(u8 *)ptr)
+               : [val] "r" (val));
                break;
        case 2:
-               do {
-                       asm ("//__percpu_xchg_2\n"
-                       "ldxrh %w[ret], %[ptr]\n"
-                       "stxrh %w[loop], %w[val], %[ptr]\n"
-                       : [loop] "=&r"(loop), [ret] "=&r"(ret),
-                         [ptr] "+Q"(*(u16 *)ptr)
-                       : [val] "r" (val));
-               } while (loop);
+               asm ("//__percpu_xchg_2\n"
+               "1:     ldxrh   %w[ret], %[ptr]\n"
+               "       stxrh   %w[loop], %w[val], %[ptr]\n"
+               "       cbnz    %w[loop], 1b"
+               : [loop] "=&r"(loop), [ret] "=&r"(ret),
+                 [ptr] "+Q"(*(u16 *)ptr)
+               : [val] "r" (val));
                break;
        case 4:
-               do {
-                       asm ("//__percpu_xchg_4\n"
-                       "ldxr %w[ret], %[ptr]\n"
-                       "stxr %w[loop], %w[val], %[ptr]\n"
-                       : [loop] "=&r"(loop), [ret] "=&r"(ret),
-                         [ptr] "+Q"(*(u32 *)ptr)
-                       : [val] "r" (val));
-               } while (loop);
+               asm ("//__percpu_xchg_4\n"
+               "1:     ldxr    %w[ret], %[ptr]\n"
+               "       stxr    %w[loop], %w[val], %[ptr]\n"
+               "       cbnz    %w[loop], 1b"
+               : [loop] "=&r"(loop), [ret] "=&r"(ret),
+                 [ptr] "+Q"(*(u32 *)ptr)
+               : [val] "r" (val));
                break;
        case 8:
-               do {
-                       asm ("//__percpu_xchg_8\n"
-                       "ldxr %[ret], %[ptr]\n"
-                       "stxr %w[loop], %[val], %[ptr]\n"
-                       : [loop] "=&r"(loop), [ret] "=&r"(ret),
-                         [ptr] "+Q"(*(u64 *)ptr)
-                       : [val] "r" (val));
-               } while (loop);
+               asm ("//__percpu_xchg_8\n"
+               "1:     ldxr    %[ret], %[ptr]\n"
+               "       stxr    %w[loop], %[val], %[ptr]\n"
+               "       cbnz    %w[loop], 1b"
+               : [loop] "=&r"(loop), [ret] "=&r"(ret),
+                 [ptr] "+Q"(*(u64 *)ptr)
+               : [val] "r" (val));
                break;
        default:
                BUILD_BUG();
index df2e53d..60e3482 100644 (file)
@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
 
 #endif
 
-void cpu_enable_pan(void *__unused);
-void cpu_enable_uao(void *__unused);
-void cpu_enable_cache_maint_trap(void *__unused);
+int cpu_enable_pan(void *__unused);
+int cpu_enable_uao(void *__unused);
+int cpu_enable_cache_maint_trap(void *__unused);
 
 #endif /* __ASM_PROCESSOR_H */
index e8d46e8..6c80b36 100644 (file)
@@ -286,7 +286,7 @@ asm(
 
 #define write_sysreg_s(v, r) do {                                      \
        u64 __val = (u64)v;                                             \
-       asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val));  \
+       asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
 } while (0)
 
 static inline void config_sctlr_el1(u32 clear, u32 set)
index bcaf6fb..55d0adb 100644 (file)
@@ -21,6 +21,7 @@
 /*
  * User space memory access functions
  */
+#include <linux/bitops.h>
 #include <linux/kasan-checks.h>
 #include <linux/string.h>
 #include <linux/thread_info.h>
@@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
        flag;                                                           \
 })
 
+/*
+ * When dealing with data aborts or instruction traps we may end up with
+ * a tagged userland pointer. Clear the tag to get a sane pointer to pass
+ * on to access_ok(), for instance.
+ */
+#define untagged_addr(addr)            sign_extend64(addr, 55)
+
 #define access_ok(type, addr, size)    __range_ok(addr, size)
 #define user_addr_max                  get_fs
 
index 42ffdb5..b0988bb 100644 (file)
@@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
 /*
  * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
  */
-#define __user_swpX_asm(data, addr, res, temp, B)              \
+
+/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
+#define __SWP_LL_SC_LOOPS      4
+
+#define __user_swpX_asm(data, addr, res, temp, temp2, B)       \
        __asm__ __volatile__(                                   \
+       "       mov             %w3, %w7\n"                     \
        ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,    \
                    CONFIG_ARM64_PAN)                           \
-       "0:     ldxr"B"         %w2, [%3]\n"                    \
-       "1:     stxr"B"         %w0, %w1, [%3]\n"               \
+       "0:     ldxr"B"         %w2, [%4]\n"                    \
+       "1:     stxr"B"         %w0, %w1, [%4]\n"               \
        "       cbz             %w0, 2f\n"                      \
-       "       mov             %w0, %w4\n"                     \
+       "       sub             %w3, %w3, #1\n"                 \
+       "       cbnz            %w3, 0b\n"                      \
+       "       mov             %w0, %w5\n"                     \
        "       b               3f\n"                           \
        "2:\n"                                                  \
        "       mov             %w1, %w2\n"                     \
        "3:\n"                                                  \
        "       .pushsection     .fixup,\"ax\"\n"               \
        "       .align          2\n"                            \
-       "4:     mov             %w0, %w5\n"                     \
+       "4:     mov             %w0, %w6\n"                     \
        "       b               3b\n"                           \
        "       .popsection"                                    \
        _ASM_EXTABLE(0b, 4b)                                    \
        _ASM_EXTABLE(1b, 4b)                                    \
        ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,    \
                CONFIG_ARM64_PAN)                               \
-       : "=&r" (res), "+r" (data), "=&r" (temp)                \
-       : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)              \
+       : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+       : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT),             \
+         "i" (__SWP_LL_SC_LOOPS)                               \
        : "memory")
 
-#define __user_swp_asm(data, addr, res, temp) \
-       __user_swpX_asm(data, addr, res, temp, "")
-#define __user_swpb_asm(data, addr, res, temp) \
-       __user_swpX_asm(data, addr, res, temp, "b")
+#define __user_swp_asm(data, addr, res, temp, temp2) \
+       __user_swpX_asm(data, addr, res, temp, temp2, "")
+#define __user_swpb_asm(data, addr, res, temp, temp2) \
+       __user_swpX_asm(data, addr, res, temp, temp2, "b")
 
 /*
  * Bit 22 of the instruction encoding distinguishes between
@@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
        }
 
        while (1) {
-               unsigned long temp;
+               unsigned long temp, temp2;
 
                if (type == TYPE_SWPB)
-                       __user_swpb_asm(*data, address, res, temp);
+                       __user_swpb_asm(*data, address, res, temp, temp2);
                else
-                       __user_swp_asm(*data, address, res, temp);
+                       __user_swp_asm(*data, address, res, temp, temp2);
 
                if (likely(res != -EAGAIN) || signal_pending(current))
                        break;
index 0150394..b75e917 100644 (file)
@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
                (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
 }
 
-static void cpu_enable_trap_ctr_access(void *__unused)
+static int cpu_enable_trap_ctr_access(void *__unused)
 {
        /* Clear SCTLR_EL1.UCT */
        config_sctlr_el1(SCTLR_EL1_UCT, 0);
+       return 0;
 }
 
 #define MIDR_RANGE(model, min, max) \
index d577f26..c02504e 100644 (file)
@@ -19,7 +19,9 @@
 #define pr_fmt(fmt) "CPU features: " fmt
 
 #include <linux/bsearch.h>
+#include <linux/cpumask.h>
 #include <linux/sort.h>
+#include <linux/stop_machine.h>
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 {
        for (; caps->matches; caps++)
                if (caps->enable && cpus_have_cap(caps->capability))
-                       on_each_cpu(caps->enable, NULL, true);
+                       /*
+                        * Use stop_machine() as it schedules the work allowing
+                        * us to modify PSTATE, instead of on_each_cpu() which
+                        * uses an IPI, giving us a PSTATE that disappears when
+                        * we return.
+                        */
+                       stop_machine(caps->enable, NULL, cpu_online_mask);
 }
 
 /*
index 427f6d3..332e331 100644 (file)
@@ -586,8 +586,9 @@ CPU_LE(     movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
        b.lt    4f                              // Skip if no PMU present
        mrs     x0, pmcr_el0                    // Disable debug access traps
        ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
-       msr     mdcr_el2, x0                    // all PMU counters from EL1
 4:
+       csel    x0, xzr, x0, lt                 // all PMU counters from EL1
+       msr     mdcr_el2, x0                    // (if they exist)
 
        /* Stage-2 translation */
        msr     vttbr_el2, xzr
index 27b2f13..01753cd 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/alternative.h>
 #include <asm/compat.h>
 #include <asm/cacheflush.h>
+#include <asm/exec.h>
 #include <asm/fpsimd.h>
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
@@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs)
        printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
               regs->pc, lr, regs->pstate);
        printk("sp : %016llx\n", sp);
-       for (i = top_reg; i >= 0; i--) {
+
+       i = top_reg;
+
+       while (i >= 0) {
                printk("x%-2d: %016llx ", i, regs->regs[i]);
-               if (i % 2 == 0)
-                       printk("\n");
+               i--;
+
+               if (i % 2 == 0) {
+                       pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
+                       i--;
+               }
+
+               pr_cont("\n");
        }
        printk("\n");
 }
@@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next)
 }
 
 /* Restore the UAO state depending on next's addr_limit */
-static void uao_thread_switch(struct task_struct *next)
+void uao_thread_switch(struct task_struct *next)
 {
        if (IS_ENABLED(CONFIG_ARM64_UAO)) {
                if (task_thread_info(next)->addr_limit == KERNEL_DS)
index b8799e7..1bec41b 100644 (file)
@@ -135,7 +135,7 @@ ENTRY(_cpu_resume)
 
 #ifdef CONFIG_KASAN
        mov     x0, sp
-       bl      kasan_unpoison_remaining_stack
+       bl      kasan_unpoison_task_stack_below
 #endif
 
        ldp     x19, x20, [x29, #16]
index d3f151c..8507703 100644 (file)
@@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
                        return;
                }
                bootcpu_valid = true;
+               early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
                return;
        }
 
index ad73414..bb0cd78 100644 (file)
@@ -1,8 +1,11 @@
 #include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <asm/alternative.h>
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/debug-monitors.h>
+#include <asm/exec.h>
 #include <asm/pgtable.h>
 #include <asm/memory.h>
 #include <asm/mmu_context.h>
@@ -49,6 +52,14 @@ void notrace __cpu_suspend_exit(void)
         */
        set_my_cpu_offset(per_cpu_offset(cpu));
 
+       /*
+        * PSTATE was not saved over suspend/resume, re-enable any detected
+        * features that might not have been set correctly.
+        */
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+                       CONFIG_ARM64_PAN));
+       uao_thread_switch(current);
+
        /*
         * Restore HW breakpoint registers to sane values
         * before debug exceptions are possibly reenabled
index 5ff020f..c9986b3 100644 (file)
@@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
        force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
 }
 
-void cpu_enable_cache_maint_trap(void *__unused)
+int cpu_enable_cache_maint_trap(void *__unused)
 {
        config_sctlr_el1(SCTLR_EL1_UCI, 0);
+       return 0;
 }
 
 #define __user_cache_maint(insn, address, res)                 \
-       asm volatile (                                          \
-               "1:     " insn ", %1\n"                         \
-               "       mov     %w0, #0\n"                      \
-               "2:\n"                                          \
-               "       .pushsection .fixup,\"ax\"\n"           \
-               "       .align  2\n"                            \
-               "3:     mov     %w0, %w2\n"                     \
-               "       b       2b\n"                           \
-               "       .popsection\n"                          \
-               _ASM_EXTABLE(1b, 3b)                            \
-               : "=r" (res)                                    \
-               : "r" (address), "i" (-EFAULT) )
+       if (untagged_addr(address) >= user_addr_max())          \
+               res = -EFAULT;                                  \
+       else                                                    \
+               asm volatile (                                  \
+                       "1:     " insn ", %1\n"                 \
+                       "       mov     %w0, #0\n"              \
+                       "2:\n"                                  \
+                       "       .pushsection .fixup,\"ax\"\n"   \
+                       "       .align  2\n"                    \
+                       "3:     mov     %w0, %w2\n"             \
+                       "       b       2b\n"                   \
+                       "       .popsection\n"                  \
+                       _ASM_EXTABLE(1b, 3b)                    \
+                       : "=r" (res)                            \
+                       : "r" (address), "i" (-EFAULT) )
 
 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
 {
index 53d9159..0f87883 100644 (file)
@@ -29,7 +29,9 @@
 #include <linux/sched.h>
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
+#include <linux/preempt.h>
 
+#include <asm/bug.h>
 #include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/debug-monitors.h>
@@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
 NOKPROBE_SYMBOL(do_debug_exception);
 
 #ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(void *__unused)
+int cpu_enable_pan(void *__unused)
 {
+       /*
+        * We modify PSTATE. This won't work from irq context as the PSTATE
+        * is discarded once we return from the exception.
+        */
+       WARN_ON_ONCE(in_interrupt());
+
        config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+       asm(SET_PSTATE_PAN(1));
+       return 0;
 }
 #endif /* CONFIG_ARM64_PAN */
 
@@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused)
  * We need to enable the feature at runtime (instead of adding it to
  * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
  */
-void cpu_enable_uao(void *__unused)
+int cpu_enable_uao(void *__unused)
 {
        asm(SET_PSTATE_UAO(1));
+       return 0;
 }
 #endif /* CONFIG_ARM64_UAO */
index 21c489b..212c4d1 100644 (file)
@@ -421,35 +421,35 @@ void __init mem_init(void)
 
        pr_notice("Virtual kernel memory layout:\n");
 #ifdef CONFIG_KASAN
-       pr_cont("    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n",
+       pr_notice("    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n",
                MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
 #endif
-       pr_cont("    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n",
+       pr_notice("    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n",
                MLM(MODULES_VADDR, MODULES_END));
-       pr_cont("    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n",
+       pr_notice("    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n",
                MLG(VMALLOC_START, VMALLOC_END));
-       pr_cont("      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+       pr_notice("      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n",
                MLK_ROUNDUP(_text, _etext));
-       pr_cont("    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+       pr_notice("    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n",
                MLK_ROUNDUP(__start_rodata, __init_begin));
-       pr_cont("      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+       pr_notice("      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n",
                MLK_ROUNDUP(__init_begin, __init_end));
-       pr_cont("      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+       pr_notice("      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
                MLK_ROUNDUP(_sdata, _edata));
-       pr_cont("       .bss : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+       pr_notice("       .bss : 0x%p" " - 0x%p" "   (%6ld KB)\n",
                MLK_ROUNDUP(__bss_start, __bss_stop));
-       pr_cont("    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n",
+       pr_notice("    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n",
                MLK(FIXADDR_START, FIXADDR_TOP));
-       pr_cont("    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n",
+       pr_notice("    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n",
                MLM(PCI_IO_START, PCI_IO_END));
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-       pr_cont("    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n",
+       pr_notice("    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n",
                MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
-       pr_cont("              0x%16lx - 0x%16lx   (%6ld MB actual)\n",
+       pr_notice("              0x%16lx - 0x%16lx   (%6ld MB actual)\n",
                MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
                    (unsigned long)virt_to_page(high_memory)));
 #endif
-       pr_cont("    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
+       pr_notice("    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
                MLM(__phys_to_virt(memblock_start_of_DRAM()),
                    (unsigned long)high_memory));
 
index 778a985..4b32168 100644 (file)
@@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       return node_distance(from, to);
+       return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
 }
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
        void *nd;
        int tnid;
 
-       pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
-               nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+       if (start_pfn < end_pfn)
+               pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
+                       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+       else
+               pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
 
        nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
        nd = __va(nd_pa);
index 8b8fe67..8d79286 100644 (file)
@@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        case BFIN_MEM_ACCESS_CORE:
                        case BFIN_MEM_ACCESS_CORE_ONLY:
                                copied = access_process_vm(child, addr, &tmp,
-                                                          to_copy, 0);
+                                                          to_copy, FOLL_FORCE);
                                if (copied)
                                        break;
 
@@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request,
                        case BFIN_MEM_ACCESS_CORE:
                        case BFIN_MEM_ACCESS_CORE_ONLY:
                                copied = access_process_vm(child, addr, &data,
-                                                          to_copy, 1);
+                                                          to_copy,
+                                                          FOLL_FORCE | FOLL_WRITE);
                                break;
                        case BFIN_MEM_ACCESS_DMA:
                                if (safe_dma_memcpy(paddr, &data, to_copy))
index b5698c8..0068fd4 100644 (file)
@@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
        err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
                             noinpages,
                             0,  /* read access only for in data */
-                            0, /* no force */
                             inpages,
                             NULL);
 
@@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
        if (oper.do_cipher){
                err = get_user_pages((unsigned long int)oper.cipher_outdata,
                                     nooutpages,
-                                    1, /* write access for out data */
-                                    0, /* no force */
+                                    FOLL_WRITE, /* write access for out data */
                                     outpages,
                                     NULL);
                up_read(&current->mm->mmap_sem);
@@ -3151,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
        printk("print_dma_descriptors start\n");
 
        printk("iop:\n");
-       printk("\tsid: 0x%lld\n", iop->sid);
+       printk("\tsid: 0x%llx\n", iop->sid);
 
        printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
        printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
index f085229..f0df654 100644 (file)
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
                                /* The trampoline page is globally mapped, no page table to traverse.*/
                                tmp = *(unsigned long*)addr;
                        } else {
-                               copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+                               copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
 
                                if (copied != sizeof(tmp))
                                        break;
@@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc)
   int opsize = 0;
 
   /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
-  copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0);
+  copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
   if (copied != sizeof(opcode))
     return 0;
 
index b408fe6..3cef068 100644 (file)
@@ -31,7 +31,6 @@ struct thread_info {
        int                cpu;                 /* cpu we're on */
        int                preempt_count;       /* 0 => preemptable, <0 => BUG */
        mm_segment_t            addr_limit;
-       struct restart_block restart_block;
 };
 
 /*
@@ -44,9 +43,6 @@ struct thread_info {
        .cpu =          0,                      \
        .preempt_count = INIT_PREEMPT_COUNT,    \
        .addr_limit     = KERNEL_DS,            \
-       .restart_block  = {                     \
-               .fn = do_no_restart_syscall,    \
-       },                                      \
 }
 
 #define init_thread_info       (init_thread_union.thread_info)
index ad1f81f..7138303 100644 (file)
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
        unsigned int er0;
 
        /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+       current->restart_block.fn = do_no_restart_syscall;
 
        /* restore passed registers */
 #define COPY(r)  do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
index 09f8457..5ed0ea9 100644 (file)
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
        u64 virt_addr=simple_strtoull(buf, NULL, 16);
        int ret;
 
-       ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
+       ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
        if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
                printk("Virtual address %lx is not existing.\n",virt_addr);
index 6f54d51..31aa8c0 100644 (file)
@@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
                        return 0;
                }
        }
-       copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
+       copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
        if (copied != sizeof(ret))
                return -EIO;
        *val = ret;
@@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
                                *ia64_rse_skip_regs(krbs, regnum) = val;
                        }
                }
-       } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
+       } else if (access_process_vm(child, addr, &val, sizeof(val),
+                               FOLL_FORCE | FOLL_WRITE)
                   != sizeof(val))
                return -EIO;
        return 0;
@@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
                ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
                if (ret < 0)
                        return ret;
-               if (access_process_vm(child, addr, &val, sizeof(val), 1)
+               if (access_process_vm(child, addr, &val, sizeof(val),
+                               FOLL_FORCE | FOLL_WRITE)
                    != sizeof(val))
                        return -EIO;
        }
@@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
 
        /* now copy word for word from user rbs to kernel rbs: */
        for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
-               if (access_process_vm(child, addr, &val, sizeof(val), 0)
+               if (access_process_vm(child, addr, &val, sizeof(val),
+                               FOLL_FORCE)
                                != sizeof(val))
                        return -EIO;
 
@@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request,
        case PTRACE_PEEKTEXT:
        case PTRACE_PEEKDATA:
                /* read word at location addr */
-               if (access_process_vm(child, addr, &data, sizeof(data), 0)
+               if (access_process_vm(child, addr, &data, sizeof(data),
+                               FOLL_FORCE)
                    != sizeof(data))
                        return -EIO;
                /* ensure return value is not mistaken for error code */
index 51f5e9a..c145605 100644 (file)
@@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child)
        int i;
 
        for (i = 0; i < p->nr_trap; i++)
-               access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
+               access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
+                               FOLL_FORCE | FOLL_WRITE);
        p->nr_trap = 0;
 }
 
@@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
        unsigned long next_insn, code;
        unsigned long addr = next_pc & ~3;
 
-       if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
+       if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
+                       FOLL_FORCE)
            != sizeof(next_insn)) {
                return -1; /* error */
        }
@@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
        if (register_debug_trap(child, next_pc, next_insn, &code)) {
                return -1; /* error */
        }
-       if (access_process_vm(child, addr, &code, sizeof(code), 1)
+       if (access_process_vm(child, addr, &code, sizeof(code),
+                       FOLL_FORCE | FOLL_WRITE)
            != sizeof(code)) {
                return -1; /* error */
        }
@@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs)
        addr = (regs->bpc - 2) & ~3;
        regs->bpc -= 2;
        if (unregister_debug_trap(current, addr, &code)) {
-           access_process_vm(current, addr, &code, sizeof(code), 1);
+           access_process_vm(current, addr, &code, sizeof(code),
+                   FOLL_FORCE | FOLL_WRITE);
            invalidate_cache();
        }
 }
@@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child)
        /* Compute next pc.  */
        pc = get_stack_long(child, PT_BPC);
 
-       if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+       if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
+                       FOLL_FORCE)
            != sizeof(insn))
                return;
 
index 283b5a1..7e71a4e 100644 (file)
@@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        break;
 
                copied = access_process_vm(child, (u64)addrOthers, &tmp,
-                               sizeof(tmp), 0);
+                               sizeof(tmp), FOLL_FORCE);
                if (copied != sizeof(tmp))
                        break;
                ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        break;
                ret = 0;
                if (access_process_vm(child, (u64)addrOthers, &data,
-                                       sizeof(data), 1) == sizeof(data))
+                                       sizeof(data),
+                                       FOLL_FORCE | FOLL_WRITE) == sizeof(data))
                        break;
                ret = -EIO;
                break;
index ce96149..622037d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/kdebug.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
 #include <linux/bootmem.h>
index 42d124f..d8c3c15 100644 (file)
@@ -287,7 +287,7 @@ slow_irqon:
        pages += nr;
 
        ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
-                                     write, 0, pages);
+                                     pages, write ? FOLL_WRITE : 0);
 
        /* Have to be a bit careful with return values */
        if (nr > 0) {
index f7a184b..57d42d1 100644 (file)
@@ -32,9 +32,16 @@ static struct addr_range prep_kernel(void)
        void *addr = 0;
        struct elf_info ei;
        long len;
+       int uncompressed_image = 0;
 
-       partial_decompress(vmlinuz_addr, vmlinuz_size,
+       len = partial_decompress(vmlinuz_addr, vmlinuz_size,
                elfheader, sizeof(elfheader), 0);
+       /* assume uncompressed data if -1 is returned */
+       if (len == -1) {
+               uncompressed_image = 1;
+               memcpy(elfheader, vmlinuz_addr, sizeof(elfheader));
+               printf("No valid compressed data found, assume uncompressed data\n\r");
+       }
 
        if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
                fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
@@ -67,6 +74,13 @@ static struct addr_range prep_kernel(void)
                                        "device tree\n\r");
        }
 
+       if (uncompressed_image) {
+               memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize);
+               printf("0x%lx bytes of uncompressed data copied\n\r",
+                      ei.loadsize);
+               goto out;
+       }
+
        /* Finally, decompress the kernel */
        printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr,
               vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
@@ -82,7 +96,7 @@ static struct addr_range prep_kernel(void)
                         len, ei.loadsize);
 
        printf("Done! Decompressed 0x%lx bytes\n\r", len);
-
+out:
        flush_cache(addr, ei.loadsize);
 
        return (struct addr_range){addr, ei.memsize};
index 01b8a13..3919332 100644 (file)
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
        std     r0,0(r1);                                       \
        ptesync;                                                \
        ld      r0,0(r1);                                       \
-1:     cmp     cr0,r0,r0;                                      \
+1:     cmpd    cr0,r0,r0;                                      \
        bne     1b;                                             \
        IDLE_INST;                                              \
        b       .
index 2e4e7d8..84d49b1 100644 (file)
        ld      reg,PACAKBASE(r13);     /* get high part of &label */   \
        ori     reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
 
+#define __LOAD_HANDLER(reg, label)                                     \
+       ld      reg,PACAKBASE(r13);                                     \
+       ori     reg,reg,(ABS_ADDR(label))@l;
+
 /* Exception register prefixes */
 #define EXC_HV H
 #define EXC_STD
@@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 #define kvmppc_interrupt kvmppc_interrupt_pr
 #endif
 
+#ifdef CONFIG_RELOCATABLE
+#define BRANCH_TO_COMMON(reg, label)                                   \
+       __LOAD_HANDLER(reg, label);                                     \
+       mtctr   reg;                                                    \
+       bctr
+
+#else
+#define BRANCH_TO_COMMON(reg, label)                                   \
+       b       label
+
+#endif
+
 #define __KVM_HANDLER_PROLOG(area, n)                                  \
        BEGIN_FTR_SECTION_NESTED(947)                                   \
        ld      r10,area+EX_CFAR(r13);                                  \
index f6f68f7..99e1397 100644 (file)
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
        return cpumask_subset(mm_cpumask(mm),
                              topology_sibling_cpumask(smp_processor_id()));
 }
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+       return cpumask_equal(mm_cpumask(mm),
+                             cpumask_of(smp_processor_id()));
+}
+
 #else
 static inline int mm_is_core_local(struct mm_struct *mm)
 {
        return 1;
 }
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+       return 1;
+}
 #endif
 
 #endif /* __KERNEL__ */
index cf12c58..e8cdfec 100644 (file)
 
 #define __NR__exit __NR_exit
 
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
index f129408..08ba447 100644 (file)
@@ -95,19 +95,35 @@ __start_interrupts:
 /* No virt vectors corresponding with 0x0..0x100 */
 EXC_VIRT_NONE(0x4000, 0x4100)
 
-EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
-       SET_SCRATCH0(r13)
+
 #ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
-       /* Running native on arch 2.06 or later, check if we are
-        * waking up from nap/sleep/winkle.
+       /*
+        * If running native on arch 2.06 or later, check if we are waking up
+        * from nap/sleep/winkle, and branch to idle handler.
         */
-       mfspr   r13,SPRN_SRR1
-       rlwinm. r13,r13,47-31,30,31
-       beq     9f
+#define IDLETEST(n)                                                    \
+       BEGIN_FTR_SECTION ;                                             \
+       mfspr   r10,SPRN_SRR1 ;                                         \
+       rlwinm. r10,r10,47-31,30,31 ;                                   \
+       beq-    1f ;                                                    \
+       cmpwi   cr3,r10,2 ;                                             \
+       BRANCH_TO_COMMON(r10, system_reset_idle_common) ;               \
+1:                                                                     \
+       END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#else
+#define IDLETEST NOTEST
+#endif
 
-       cmpwi   cr3,r13,2
-       GET_PACA(r13)
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
+       SET_SCRATCH0(r13)
+       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+                                IDLETEST, 0x100)
+
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+
+#ifdef CONFIG_PPC_P7_NAP
+EXC_COMMON_BEGIN(system_reset_idle_common)
        bl      pnv_restore_hyp_resource
 
        li      r0,PNV_THREAD_RUNNING
@@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
        blt     cr3,2f
        b       pnv_wakeup_loss
 2:     b       pnv_wakeup_noloss
+#endif
 
-9:
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
-                                NOTEST, 0x100)
-EXC_REAL_END(system_reset, 0x100, 0x200)
-EXC_VIRT_NONE(0x4100, 0x4200)
 EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
 
 #ifdef CONFIG_PPC_PSERIES
@@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
 TRAMP_KVM(PACA_EXGEN, 0xb00)
 EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
 
-
-#define LOAD_SYSCALL_HANDLER(reg)                              \
-       ld      reg,PACAKBASE(r13);                             \
-       ori     reg,reg,(ABS_ADDR(system_call_common))@l;
+#define LOAD_SYSCALL_HANDLER(reg)                                      \
+       __LOAD_HANDLER(reg, system_call_common)
 
 /* Syscall routine is used twice, in reloc-off and reloc-on paths */
 #define SYSCALL_PSERIES_1                                      \
index 9781c69..03d089b 100644 (file)
@@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
        if (!stepped) {
                WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
                        "0x%lx will be disabled.", info->address);
-               perf_event_disable(bp);
+               perf_event_disable_inatomic(bp);
                goto out;
        }
        /*
index bd739fe..72dac0b 100644 (file)
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
  * Threads will spin in HMT_LOW until the lock bit is cleared.
  * r14 - pointer to core_idle_state
  * r15 - used to load contents of core_idle_state
+ * r9  - used as a temporary variable
  */
 
 core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
        bne     3b
        HMT_MEDIUM
        lwarx   r15,0,r14
+       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
+       bne     core_idle_lock_held
        blr
 
 /*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
        std     r9,_MSR(r1)
        std     r1,PACAR1(r13)
 
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-       /* Tell KVM we're entering idle */
-       li      r4,KVM_HWTHREAD_IN_IDLE
-       stb     r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
-
        /*
         * Go to real mode to do the nap, as required by the architecture.
         * Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
 
        .globl pnv_enter_arch207_idle_mode
 pnv_enter_arch207_idle_mode:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+       /* Tell KVM we're entering idle */
+       li      r4,KVM_HWTHREAD_IN_IDLE
+       /******************************************************/
+       /*  N O T E   W E L L    ! ! !    N O T E   W E L L   */
+       /* The following store to HSTATE_HWTHREAD_STATE(r13)  */
+       /* MUST occur in real mode, i.e. with the MMU off,    */
+       /* and the MMU must stay off until we clear this flag */
+       /* and test HSTATE_HWTHREAD_REQ(r13) in the system    */
+       /* reset interrupt vector in exceptions-64s.S.        */
+       /* The reason is that another thread can switch the   */
+       /* MMU to a guest context whenever this flag is set   */
+       /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on,    */
+       /* that would potentially cause this thread to start  */
+       /* executing instructions from guest memory in        */
+       /* hypervisor mode, leading to a host crash or data   */
+       /* corruption, or worse.                              */
+       /******************************************************/
+       stb     r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
        stb     r3,PACA_THREAD_IDLE_STATE(r13)
        cmpwi   cr3,r3,PNV_THREAD_SLEEP
        bge     cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
  * r3 - requested stop state
  */
 power_enter_stop:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+       /* Tell KVM we're entering idle */
+       li      r4,KVM_HWTHREAD_IN_IDLE
+       /* DO THIS IN REAL MODE!  See comment above. */
+       stb     r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
 /*
  * Check if the requested state is a deep idle state.
  */
index 9e7c10f..ce6dc61 100644 (file)
@@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
        /* Ensure that restore_math() will restore */
        if (msr_diff & MSR_FP)
                current->thread.load_fp = 1;
-#ifdef CONFIG_ALIVEC
+#ifdef CONFIG_ALTIVEC
        if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
                current->thread.load_vec = 1;
 #endif
index f52b7db..010b7b3 100644 (file)
@@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        break;
 
                copied = access_process_vm(child, (u64)addrOthers, &tmp,
-                               sizeof(tmp), 0);
+                               sizeof(tmp), FOLL_FORCE);
                if (copied != sizeof(tmp))
                        break;
                ret = put_user(tmp, (u32 __user *)data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        break;
                ret = 0;
                if (access_process_vm(child, (u64)addrOthers, &tmp,
-                                       sizeof(tmp), 1) == sizeof(tmp))
+                                       sizeof(tmp),
+                                       FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
                        break;
                ret = -EIO;
                break;
index 82ff5de..a0ea63a 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/ppc-opcode.h>
 #include <asm/pnv-pci.h>
 #include <asm/opal.h>
+#include <asm/smp.h>
 
 #include "book3s_xics.h"
 
index bb03542..362954f 100644 (file)
@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
        switch (REGION_ID(ea)) {
        case USER_REGION_ID:
                pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
+               if (mm == NULL)
+                       return 1;
                psize = get_slice_psize(mm, ea);
                ssize = user_segment_size(ea);
                vsid = get_vsid(mm->context.id, ea, ssize);
index 75b9cd6..a51c188 100644 (file)
@@ -845,7 +845,7 @@ void __init dump_numa_cpu_topology(void)
                return;
 
        for_each_online_node(node) {
-               printk(KERN_DEBUG "Node %d CPUs:", node);
+               pr_info("Node %d CPUs:", node);
 
                count = 0;
                /*
@@ -856,52 +856,18 @@ void __init dump_numa_cpu_topology(void)
                        if (cpumask_test_cpu(cpu,
                                        node_to_cpumask_map[node])) {
                                if (count == 0)
-                                       printk(" %u", cpu);
+                                       pr_cont(" %u", cpu);
                                ++count;
                        } else {
                                if (count > 1)
-                                       printk("-%u", cpu - 1);
+                                       pr_cont("-%u", cpu - 1);
                                count = 0;
                        }
                }
 
                if (count > 1)
-                       printk("-%u", nr_cpu_ids - 1);
-               printk("\n");
-       }
-}
-
-static void __init dump_numa_memory_topology(void)
-{
-       unsigned int node;
-       unsigned int count;
-
-       if (min_common_depth == -1 || !numa_enabled)
-               return;
-
-       for_each_online_node(node) {
-               unsigned long i;
-
-               printk(KERN_DEBUG "Node %d Memory:", node);
-
-               count = 0;
-
-               for (i = 0; i < memblock_end_of_DRAM();
-                    i += (1 << SECTION_SIZE_BITS)) {
-                       if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
-                               if (count == 0)
-                                       printk(" 0x%lx", i);
-                               ++count;
-                       } else {
-                               if (count > 0)
-                                       printk("-0x%lx", i);
-                               count = 0;
-                       }
-               }
-
-               if (count > 0)
-                       printk("-0x%lx", i);
-               printk("\n");
+                       pr_cont("-%u", nr_cpu_ids - 1);
+               pr_cont("\n");
        }
 }
 
@@ -947,8 +913,6 @@ void __init initmem_init(void)
 
        if (parse_numa_properties())
                setup_nonnuma();
-       else
-               dump_numa_memory_topology();
 
        memblock_dump_all();
 
index 0e49ec5..bda8c43 100644 (file)
@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
        if (unlikely(pid == MMU_NO_CONTEXT))
                goto no_context;
 
-       if (!mm_is_core_local(mm)) {
+       if (!mm_is_thread_local(mm)) {
                int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
 
                if (lock_tlbie)
@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
        if (unlikely(pid == MMU_NO_CONTEXT))
                goto no_context;
 
-       if (!mm_is_core_local(mm)) {
+       if (!mm_is_thread_local(mm)) {
                int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
 
                if (lock_tlbie)
@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
        pid = mm ? mm->context.id : 0;
        if (unlikely(pid == MMU_NO_CONTEXT))
                goto bail;
-       if (!mm_is_core_local(mm)) {
+       if (!mm_is_thread_local(mm)) {
                int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
 
                if (lock_tlbie)
@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
 {
        unsigned long pid;
        unsigned long addr;
-       int local = mm_is_core_local(mm);
+       int local = mm_is_thread_local(mm);
        unsigned long ap = mmu_get_ap(psize);
        int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
        unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
index 64053d9..836c562 100644 (file)
@@ -12,9 +12,7 @@
 
 #ifndef __ASSEMBLY__
 
-unsigned long return_address(int depth);
-
-#define ftrace_return_address(n) return_address(n)
+#define ftrace_return_address(n) __builtin_return_address(n)
 
 void _mcount(void);
 void ftrace_caller(void);
index 0332317..602af69 100644 (file)
@@ -192,7 +192,7 @@ struct task_struct;
 struct mm_struct;
 struct seq_file;
 
-typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
 void dump_trace(dump_trace_func_t func, void *data,
                struct task_struct *task, unsigned long sp);
 
index 02613ba..3066031 100644 (file)
@@ -9,6 +9,9 @@
 #include <uapi/asm/unistd.h>
 
 #define __IGNORE_time
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
index 43446fa..c74c592 100644 (file)
@@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
                        *ptr++ = '\t';
                ptr += print_insn(ptr, code + start, addr);
                start += opsize;
-               printk("%s", buffer);
+               pr_cont("%s", buffer);
                ptr = buffer;
                ptr += sprintf(ptr, "\n          ");
                hops++;
        }
-       printk("\n");
+       pr_cont("\n");
 }
 
 void print_fn_code(unsigned char *code, unsigned long len)
index 6693383..55d4fe1 100644 (file)
@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
+               if (func(data, sf->gprs[8], 0))
+                       return sp;
                /* Follow the backchain. */
                while (1) {
-                       if (func(data, sf->gprs[8]))
-                               return sp;
                        low = sp;
                        sp = sf->back_chain;
                        if (!sp)
@@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
+                       if (func(data, sf->gprs[8], 1))
+                               return sp;
                }
                /* Zero backchain detected, check for interrupt frame. */
                sp = (unsigned long) (sf + 1);
@@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
                        return sp;
                regs = (struct pt_regs *) sp;
                if (!user_mode(regs)) {
-                       if (func(data, regs->psw.addr))
+                       if (func(data, regs->psw.addr, 1))
                                return sp;
                }
                low = sp;
@@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
 }
 EXPORT_SYMBOL_GPL(dump_trace);
 
-struct return_address_data {
-       unsigned long address;
-       int depth;
-};
-
-static int __return_address(void *data, unsigned long address)
-{
-       struct return_address_data *rd = data;
-
-       if (rd->depth--)
-               return 0;
-       rd->address = address;
-       return 1;
-}
-
-unsigned long return_address(int depth)
-{
-       struct return_address_data rd = { .depth = depth + 2 };
-
-       dump_trace(__return_address, &rd, NULL, current_stack_pointer());
-       return rd.address;
-}
-EXPORT_SYMBOL_GPL(return_address);
-
-static int show_address(void *data, unsigned long address)
+static int show_address(void *data, unsigned long address, int reliable)
 {
-       printk("([<%016lx>] %pSR)\n", address, (void *)address);
+       if (reliable)
+               printk(" [<%016lx>] %pSR \n", address, (void *)address);
+       else
+               printk("([<%016lx>] %pSR)\n", address, (void *)address);
        return 0;
 }
 
@@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
                else
                        stack = (unsigned long *)task->thread.ksp;
        }
+       printk(KERN_DEFAULT "Stack:\n");
        for (i = 0; i < 20; i++) {
                if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
                        break;
-               if ((i * sizeof(long) % 32) == 0)
-                       printk("%s       ", i == 0 ? "" : "\n");
-               printk("%016lx ", *stack++);
+               if (i % 4 == 0)
+                       printk(KERN_DEFAULT "       ");
+               pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
        }
-       printk("\n");
        show_trace(task, (unsigned long)sp);
 }
 
@@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
        mode = user_mode(regs) ? "User" : "Krnl";
        printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
        if (!user_mode(regs))
-               printk(" (%pSR)", (void *)regs->psw.addr);
-       printk("\n");
+               pr_cont(" (%pSR)", (void *)regs->psw.addr);
+       pr_cont("\n");
        printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
               "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
               psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
-       printk(" RI:%x EA:%x", psw->ri, psw->eaba);
-       printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
+       pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
+       printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
               regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
        printk("           %016lx %016lx %016lx %016lx\n",
               regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
        printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
               regs->int_code >> 17, ++die_counter);
 #ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
+       pr_cont("PREEMPT ");
 #endif
 #ifdef CONFIG_SMP
-       printk("SMP ");
+       pr_cont("SMP ");
 #endif
        if (debug_pagealloc_enabled())
-               printk("DEBUG_PAGEALLOC");
-       printk("\n");
+               pr_cont("DEBUG_PAGEALLOC");
+       pr_cont("\n");
        notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
        print_modules();
        show_regs(regs);
index 17431f6..955a7b6 100644 (file)
@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
 }
 arch_initcall(service_level_perf_register);
 
-static int __perf_callchain_kernel(void *data, unsigned long address)
+static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
 {
        struct perf_callchain_entry_ctx *entry = data;
 
index 44f84b2..355db9d 100644 (file)
@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
        return 1;
 }
 
-static int save_address(void *data, unsigned long address)
+static int save_address(void *data, unsigned long address, int reliable)
 {
        return __save_address(data, address, 0);
 }
 
-static int save_address_nosched(void *data, unsigned long address)
+static int save_address_nosched(void *data, unsigned long address, int reliable)
 {
        return __save_address(data, address, 1);
 }
index 1cab8a1..7a27eeb 100644 (file)
@@ -119,8 +119,13 @@ static int handle_validity(struct kvm_vcpu *vcpu)
 
        vcpu->stat.exit_validity++;
        trace_kvm_s390_intercept_validity(vcpu, viwhy);
-       WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
-       return -EOPNOTSUPP;
+       KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
+                 current->pid, vcpu->kvm);
+
+       /* do not warn on invalid runtime instrumentation mode */
+       WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
+                 viwhy);
+       return -EINVAL;
 }
 
 static int handle_instruction(struct kvm_vcpu *vcpu)
index adb0c34..18d4107 100644 (file)
@@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
        /* Try to get the remaining pages with get_user_pages */
        start += nr << PAGE_SHIFT;
        pages += nr;
-       ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+       ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+                                     write ? FOLL_WRITE : 0);
        /* Have to be a bit careful with return values */
        if (nr > 0)
                ret = (ret < 0) ? nr : ret + nr;
index cd404aa..4a0c5bc 100644 (file)
@@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
        } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
        } else {
+               hugetlb_bad_size();
                pr_err("hugepagesz= specifies an unsupported page size %s\n",
                        string);
                return 0;
index f56a39b..b3e9d18 100644 (file)
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 {
-       unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
-       unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
+       unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
        unsigned long start_pfn = PFN_DOWN(start);
        unsigned long size_pages = PFN_DOWN(size);
-       unsigned long nr_pages;
-       int rc, zone_enum;
+       pg_data_t *pgdat = NODE_DATA(nid);
+       struct zone *zone;
+       int rc, i;
 
        rc = vmem_add_mapping(start, size);
        if (rc)
                return rc;
 
-       while (size_pages > 0) {
-               if (start_pfn < dma_end_pfn) {
-                       nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
-                                  dma_end_pfn - start_pfn : size_pages;
-                       zone_enum = ZONE_DMA;
-               } else if (start_pfn < normal_end_pfn) {
-                       nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
-                                  normal_end_pfn - start_pfn : size_pages;
-                       zone_enum = ZONE_NORMAL;
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               zone = pgdat->node_zones + i;
+               if (zone_idx(zone) != ZONE_MOVABLE) {
+                       /* Add range within existing zone limits, if possible */
+                       zone_start_pfn = zone->zone_start_pfn;
+                       zone_end_pfn = zone->zone_start_pfn +
+                                      zone->spanned_pages;
                } else {
-                       nr_pages = size_pages;
-                       zone_enum = ZONE_MOVABLE;
+                       /* Add remaining range to ZONE_MOVABLE */
+                       zone_start_pfn = start_pfn;
+                       zone_end_pfn = start_pfn + size_pages;
                }
-               rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
-                                start_pfn, size_pages);
+               if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+                       continue;
+               nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+                          zone_end_pfn - start_pfn : size_pages;
+               rc = __add_pages(nid, zone, start_pfn, nr_pages);
                if (rc)
                        break;
                start_pfn += nr_pages;
                size_pages -= nr_pages;
+               if (!size_pages)
+                       break;
        }
        if (rc)
                vmem_remove_mapping(start, size);
index 16f4c39..9a4de45 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <asm/processor.h>
 
-static int __s390_backtrace(void *data, unsigned long address)
+static int __s390_backtrace(void *data, unsigned long address, int reliable)
 {
        unsigned int *depth = data;
 
index 5583618..4f7314d 100644 (file)
@@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child,
 {
        int copied;
 
-       copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+       copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
 
        return copied != sizeof(*res) ? -EIO : 0;
 }
@@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child,
 {
        int copied;
 
-       copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+       copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
 
        return copied != sizeof(*res) ? -EIO : 0;
 }
@@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child,
 {
        int copied;
 
-       copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+       copied = access_process_vm(child, addr, &val, sizeof(val),
+                       FOLL_FORCE | FOLL_WRITE);
 
        return copied != sizeof(val) ? -EIO : 0;
 }
@@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child,
 {
        int copied;
 
-       copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+       copied = access_process_vm(child, addr, &val, sizeof(val),
+                       FOLL_FORCE | FOLL_WRITE);
 
        return copied != sizeof(val) ? -EIO : 0;
 }
index 0047666..336f33a 100644 (file)
@@ -31,7 +31,7 @@ isa-y                                 := $(isa-y)-up
 endif
 
 cflags-$(CONFIG_CPU_SH2)               := $(call cc-option,-m2,)
-cflags-$(CONFIG_CPU_J2)                        := $(call cc-option,-mj2,)
+cflags-$(CONFIG_CPU_J2)                        += $(call cc-option,-mj2,)
 cflags-$(CONFIG_CPU_SH2A)              += $(call cc-option,-m2a,) \
                                           $(call cc-option,-m2a-nofpu,) \
                                           $(call cc-option,-m4-nofpu,)
index e9c2c42..4e21949 100644 (file)
@@ -22,6 +22,16 @@ config SH_DEVICE_TREE
          have sufficient driver coverage to use this option; do not
          select it if you are using original SuperH hardware.
 
+config SH_JCORE_SOC
+       bool "J-Core SoC"
+       depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+       select CLKSRC_JCORE_PIT
+       select JCORE_AIC
+       default y if CPU_J2
+       help
+         Select this option to include drivers core components of the
+         J-Core SoC, including interrupt controllers and timers.
+
 config SH_SOLUTION_ENGINE
        bool "SolutionEngine"
        select SOLUTION_ENGINE
index 94d1eca..2eb81eb 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_MEMORY_START=0x10000000
 CONFIG_MEMORY_SIZE=0x04000000
 CONFIG_CPU_BIG_ENDIAN=y
 CONFIG_SH_DEVICE_TREE=y
+CONFIG_SH_JCORE_SOC=y
 CONFIG_HZ_100=y
 CONFIG_CMDLINE_OVERWRITE=y
 CONFIG_CMDLINE="console=ttyUL0 earlycon"
@@ -20,6 +21,7 @@ CONFIG_INET=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_NETDEVICES=y
+CONFIG_SERIAL_EARLYCON=y
 CONFIG_SERIAL_UARTLITE=y
 CONFIG_SERIAL_UARTLITE_CONSOLE=y
 CONFIG_I2C=y
index 40fa6c8..063c298 100644 (file)
@@ -258,7 +258,8 @@ slow_irqon:
                pages += nr;
 
                ret = get_user_pages_unlocked(start,
-                       (end - start) >> PAGE_SHIFT, write, 0, pages);
+                       (end - start) >> PAGE_SHIFT, pages,
+                       write ? FOLL_WRITE : 0);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {
index 9ddc492..ac082dd 100644 (file)
@@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
                if (copy_from_user(kbuf, (void __user *) uaddr, len))
                        return -EFAULT;
        } else {
-               int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
+               int len2 = access_process_vm(target, uaddr, kbuf, len,
+                               FOLL_FORCE);
                if (len2 != len)
                        return -EFAULT;
        }
@@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
                if (copy_to_user((void __user *) uaddr, kbuf, len))
                        return -EFAULT;
        } else {
-               int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
+               int len2 = access_process_vm(target, uaddr, kbuf, len,
+                               FOLL_FORCE | FOLL_WRITE);
                if (len2 != len)
                        return -EFAULT;
        }
@@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target,
                                if (access_process_vm(target,
                                                      (unsigned long)
                                                      &reg_window[pos],
-                                                     k, sizeof(*k), 0)
+                                                     k, sizeof(*k),
+                                                     FOLL_FORCE)
                                    != sizeof(*k))
                                        return -EFAULT;
                                k++;
@@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target,
                                if (access_process_vm(target,
                                                      (unsigned long)
                                                      &reg_window[pos],
-                                                     &reg, sizeof(reg), 0)
+                                                     &reg, sizeof(reg),
+                                                     FOLL_FORCE)
                                    != sizeof(reg))
                                        return -EFAULT;
                                if (access_process_vm(target,
                                                      (unsigned long) u,
-                                                     &reg, sizeof(reg), 1)
+                                                     &reg, sizeof(reg),
+                                                     FOLL_FORCE | FOLL_WRITE)
                                    != sizeof(reg))
                                        return -EFAULT;
                                pos++;
@@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target,
                                                      (unsigned long)
                                                      &reg_window[pos],
                                                      (void *) k,
-                                                     sizeof(*k), 1)
+                                                     sizeof(*k),
+                                                     FOLL_FORCE | FOLL_WRITE)
                                    != sizeof(*k))
                                        return -EFAULT;
                                k++;
@@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target,
                                if (access_process_vm(target,
                                                      (unsigned long)
                                                      u,
-                                                     &reg, sizeof(reg), 0)
+                                                     &reg, sizeof(reg),
+                                                     FOLL_FORCE)
                                    != sizeof(reg))
                                        return -EFAULT;
                                if (access_process_vm(target,
                                                      (unsigned long)
                                                      &reg_window[pos],
-                                                     &reg, sizeof(reg), 1)
+                                                     &reg, sizeof(reg),
+                                                     FOLL_FORCE | FOLL_WRITE)
                                    != sizeof(reg))
                                        return -EFAULT;
                                pos++;
index 4e06750..cd0e32b 100644 (file)
@@ -238,7 +238,8 @@ slow:
                pages += nr;
 
                ret = get_user_pages_unlocked(start,
-                       (end - start) >> PAGE_SHIFT, write, 0, pages);
+                       (end - start) >> PAGE_SHIFT, pages,
+                       write ? FOLL_WRITE : 0);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {
index 77f28ce..9976fce 100644 (file)
@@ -5,8 +5,8 @@
 OBJECT_FILES_NON_STANDARD_entry_$(BITS).o   := y
 OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
 
-CFLAGS_syscall_64.o            += -Wno-override-init
-CFLAGS_syscall_32.o            += -Wno-override-init
+CFLAGS_syscall_64.o            += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_32.o            += $(call cc-option,-Wno-override-init,)
 obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
 obj-y                          += common.o
 
index ff6ef7b..2b36185 100644 (file)
 380    i386    pkey_mprotect           sys_pkey_mprotect
 381    i386    pkey_alloc              sys_pkey_alloc
 382    i386    pkey_free               sys_pkey_free
-#383   i386    pkey_get                sys_pkey_get
-#384   i386    pkey_set                sys_pkey_set
index 2f024d0..e93ef0b 100644 (file)
 329    common  pkey_mprotect           sys_pkey_mprotect
 330    common  pkey_alloc              sys_pkey_alloc
 331    common  pkey_free               sys_pkey_free
-#332   common  pkey_get                sys_pkey_get
-#333   common  pkey_set                sys_pkey_set
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index a3a9eb8..a74a2db 100644 (file)
@@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
 
        /*
         * Quirk: v2 perfmon does not report fixed-purpose events, so
-        * assume at least 3 events:
+        * assume at least 3 events, when not running in a hypervisor:
         */
-       if (version > 1)
-               x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+       if (version > 1) {
+               int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
+
+               x86_pmu.num_counters_fixed =
+                       max((int)edx.split.num_counters_fixed, assume);
+       }
 
        if (boot_cpu_has(X86_FEATURE_PDCM)) {
                u64 capabilities;
@@ -3898,6 +3902,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_XEON_PHI_KNL:
+       case INTEL_FAM6_XEON_PHI_KNM:
                memcpy(hw_cache_event_ids,
                       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs,
@@ -3912,7 +3917,7 @@ __init int intel_pmu_init(void)
                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
-               pr_cont("Knights Landing events, ");
+               pr_cont("Knights Landing/Mill events, ");
                break;
 
        case INTEL_FAM6_SKYLAKE_MOBILE:
index 3ca87b5..4f5ac72 100644 (file)
@@ -48,7 +48,8 @@
  *                            Scope: Core
  *     MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
  *                            perf code: 0x02
- *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ *                                             SKL,KNL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
- *                            Available model: SNB,IVB,HSW,BDW,SKL
+ *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
- *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
- *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ *                                             SKL,KNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
@@ -118,6 +120,7 @@ struct cstate_model {
 
 /* Quirk flags */
 #define SLM_PKG_C6_USE_C7_MSR  (1UL << 0)
+#define KNL_CORE_C6_MSR                (1UL << 1)
 
 struct perf_cstate_msr {
        u64     msr;
@@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
        .quirks                 = SLM_PKG_C6_USE_C7_MSR,
 };
 
+
+static const struct cstate_model knl_cstates __initconst = {
+       .core_events            = BIT(PERF_CSTATE_CORE_C6_RES),
+
+       .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
+                                 BIT(PERF_CSTATE_PKG_C3_RES) |
+                                 BIT(PERF_CSTATE_PKG_C6_RES),
+       .quirks                 = KNL_CORE_C6_MSR,
+};
+
+
+
 #define X86_CSTATES_MODEL(model, states)                               \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
 
@@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+
+       X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
        if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
                pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
 
+       /* KNL has different MSR for CORE C6 */
+       if (cm->quirks & KNL_CORE_C6_MSR)
+               pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
+
+
        has_cstate_core = cstate_probe_msr(cm->core_events,
                                           PERF_CSTATE_CORE_EVENT_MAX,
                                           core_msr, core_events_attrs);
index fc6cf21..81b321a 100644 (file)
@@ -458,8 +458,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
        if (!x86_pmu.lbr_nr)
                return;
 
-       if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
-                                       event->ctx->task_ctx_data) {
+       if (branch_user_callstack(cpuc->br_sel) &&
+           event->ctx->task_ctx_data) {
                task_ctx = event->ctx->task_ctx_data;
                task_ctx->lbr_callstack_users--;
        }
index b0f0e83..0a535ce 100644 (file)
@@ -763,6 +763,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE,  skl_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
index d9844cc..efca268 100644 (file)
@@ -1349,6 +1349,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,    bdx_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,   knl_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM,   knl_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
index 1188bc8..a396292 100644 (file)
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
index 9ae5ab8..34a46dc 100644 (file)
@@ -64,5 +64,6 @@
 /* Xeon Phi */
 
 #define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
+#define INTEL_FAM6_XEON_PHI_KNM                0x85 /* Knights Mill */
 
 #endif /* _ASM_X86_INTEL_FAMILY_H */
index de25aad..d34bd37 100644 (file)
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
 #define arch_phys_wc_add arch_phys_wc_add
 #endif
 
+#ifdef CONFIG_X86_PAT
+extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
+extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
+#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
+#endif
+
 #endif /* _ASM_X86_IO_H */
index 56f4c66..78f3760 100644 (file)
@@ -88,7 +88,6 @@
 
 #define MSR_IA32_RTIT_CTL              0x00000570
 #define MSR_IA32_RTIT_STATUS           0x00000571
-#define MSR_IA32_RTIT_STATUS           0x00000571
 #define MSR_IA32_RTIT_ADDR0_A          0x00000580
 #define MSR_IA32_RTIT_ADDR0_B          0x00000581
 #define MSR_IA32_RTIT_ADDR1_A          0x00000582
index 3d33a71..a34e0d4 100644 (file)
@@ -103,8 +103,10 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
 ({                                                     \
        long tmp;                                       \
        struct rw_semaphore* ret;                       \
+       register void *__sp asm(_ASM_SP);               \
+                                                       \
        asm volatile("# beginning down_write\n\t"       \
-                    LOCK_PREFIX "  xadd      %1,(%3)\n\t"      \
+                    LOCK_PREFIX "  xadd      %1,(%4)\n\t"      \
                     /* adds 0xffff0001, returns the old value */ \
                     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
                     /* was the active mask 0 before? */\
@@ -112,7 +114,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
                     "  call " slow_path "\n"           \
                     "1:\n"                             \
                     "# ending down_write"              \
-                    : "+m" (sem->count), "=d" (tmp), "=a" (ret)        \
+                    : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
                     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
                     : "memory", "cc");                 \
        ret;                                            \
index 2aaca53..ad6f5eb 100644 (file)
@@ -52,6 +52,15 @@ struct task_struct;
 #include <asm/cpufeature.h>
 #include <linux/atomic.h>
 
+struct thread_info {
+       unsigned long           flags;          /* low level flags */
+};
+
+#define INIT_THREAD_INFO(tsk)                  \
+{                                              \
+       .flags          = 0,                    \
+}
+
 #define init_stack             (init_thread_union.stack)
 
 #else /* !__ASSEMBLY__ */
index 8a5abaa..931ced8 100644 (file)
@@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
        mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+       acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
        /*
         * stash over-ride to indicate we've been here
index 620ab06..017bda1 100644 (file)
@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
         * We need the physical address of the container for both bitness since
         * boot_params.hdr.ramdisk_image is a physical address.
         */
-       cont    = __pa(container);
+       cont    = __pa_nodebug(container);
        cont_va = container;
 #endif
 
index 8cb57df..1db8dc4 100644 (file)
@@ -32,6 +32,8 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
 
        static const struct cpuid_bit cpuid_bits[] = {
                { X86_FEATURE_INTEL_PT,         CR_EBX,25, 0x00000007, 0 },
+               { X86_FEATURE_AVX512_4VNNIW,    CR_EDX, 2, 0x00000007, 0 },
+               { X86_FEATURE_AVX512_4FMAPS,    CR_EDX, 3, 0x00000007, 0 },
                { X86_FEATURE_APERFMPERF,       CR_ECX, 0, 0x00000006, 0 },
                { X86_FEATURE_EPB,              CR_ECX, 3, 0x00000006, 0 },
                { X86_FEATURE_HW_PSTATE,        CR_EDX, 7, 0x80000007, 0 },
index 8116057..5130985 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/div64.h>
 #include <asm/x86_init.h>
 #include <asm/hypervisor.h>
+#include <asm/timer.h>
 #include <asm/apic.h>
 
 #define CPUID_VMWARE_INFO_LEAF 0x40000000
@@ -94,6 +95,10 @@ static void __init vmware_platform_setup(void)
        } else {
                pr_warn("Failed to get TSC freq from the hypervisor\n");
        }
+
+#ifdef CONFIG_X86_IO_APIC
+       no_timer_check = 1;
+#endif
 }
 
 /*
index b85fe5f..90e8dde 100644 (file)
@@ -350,7 +350,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
                 * continue building up new bios map based on this
                 * information
                 */
-               if (current_type != last_type) {
+               if (current_type != last_type || current_type == E820_PRAM) {
                        if (last_type != 0)      {
                                new_bios[new_bios_entry].size =
                                        change_point[chgidx]->addr - last_addr;
index 124aa5c..095ef7d 100644 (file)
@@ -74,6 +74,8 @@ void fpu__xstate_clear_all_cpu_caps(void)
        setup_clear_cpu_cap(X86_FEATURE_MPX);
        setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
        setup_clear_cpu_cap(X86_FEATURE_PKU);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
 }
 
 /*
index 28cee01..d9d8d16 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/kallsyms.h>
 #include <linux/ftrace.h>
 #include <linux/frame.h>
+#include <linux/kasan.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -1057,9 +1058,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
         * tailcall optimization. So, to be absolutely safe
         * we also save and restore enough stack bytes to cover
         * the argument area.
+        * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
+        * raw stack chunk with redzones:
         */
-       memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
-              MIN_STACK_SIZE(addr));
+       __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
        regs->flags &= ~X86_EFLAGS_IF;
        trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
@@ -1080,6 +1082,9 @@ void jprobe_return(void)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 
+       /* Unpoison stack redzones in the frames we are going to jump over. */
+       kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
+
        asm volatile (
 #ifdef CONFIG_X86_64
                        "       xchg   %%rbx,%%rsp      \n"
@@ -1118,7 +1123,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
                /* It's OK to start function graph tracing again */
                unpause_graph_tracing();
                *regs = kcb->jprobe_saved_regs;
-               memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+               __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
                preempt_enable_no_resched();
                return 1;
        }
index efe73aa..7b0d3da 100644 (file)
 
 #ifdef CC_USING_FENTRY
 # define function_hook __fentry__
+EXPORT_SYMBOL(__fentry__)
 #else
 # define function_hook mcount
+EXPORT_SYMBOL(mcount)
 #endif
 
 /* All cases save the original rbp (8 bytes) */
@@ -295,7 +297,6 @@ trace:
        jmp fgraph_trace
 END(function_hook)
 #endif /* CONFIG_DYNAMIC_FTRACE */
-EXPORT_SYMBOL(function_hook)
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
index 51402a7..0bee04d 100644 (file)
@@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
                        amd_disable_seq_and_redirect_scrub);
 
-#endif
-
 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
 #include <linux/jump_label.h>
 #include <asm/string_64.h>
@@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
 #endif
+#endif
index 40df337..ec1f756 100644 (file)
@@ -105,9 +105,6 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
        /* Don't let flags to be set from userspace */
        act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
 
-       if (user_64bit_mode(current_pt_regs()))
-               return;
-
        if (in_ia32_syscall())
                act->sa.sa_flags |= SA_IA32_ABI;
        if (in_x32_syscall())
index 68f8cc2..c00cb64 100644 (file)
@@ -261,8 +261,10 @@ static inline void __smp_reschedule_interrupt(void)
 
 __visible void smp_reschedule_interrupt(struct pt_regs *regs)
 {
+       irq_enter();
        ack_APIC_irq();
        __smp_reschedule_interrupt();
+       irq_exit();
        /*
         * KVM uses this interrupt to force a cpu out of guest mode
         */
index 951f093..42f5eb7 100644 (file)
@@ -1409,15 +1409,17 @@ __init void prefill_possible_map(void)
 
        /* No boot processor was found in mptable or ACPI MADT */
        if (!num_processors) {
-               int apicid = boot_cpu_physical_apicid;
-               int cpu = hard_smp_processor_id();
+               if (boot_cpu_has(X86_FEATURE_APIC)) {
+                       int apicid = boot_cpu_physical_apicid;
+                       int cpu = hard_smp_processor_id();
 
-               pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+                       pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
 
-               /* Make sure boot cpu is enumerated */
-               if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
-                   apic->apic_id_valid(apicid))
-                       generic_processor_info(apicid, boot_cpu_apic_version);
+                       /* Make sure boot cpu is enumerated */
+                       if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+                           apic->apic_id_valid(apicid))
+                               generic_processor_info(apicid, boot_cpu_apic_version);
+               }
 
                if (!num_processors)
                        num_processors = 1;
index c9a0738..a23ce84 100644 (file)
@@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
        unsigned char opcode[15];
        unsigned long addr = convert_ip_to_linear(child, regs);
 
-       copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+       copied = access_process_vm(child, addr, opcode, sizeof(opcode),
+                       FOLL_FORCE);
        for (i = 0; i < copied; i++) {
                switch (opcode[i]) {
                /* popf and iret */
index 9298993..2d721e5 100644 (file)
@@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        get_stack_info(first_frame, state->task, &state->stack_info,
                       &state->stack_mask);
 
-       if (!__kernel_text_address(*first_frame))
+       /*
+        * The caller can provide the address of the first frame directly
+        * (first_frame) or indirectly (regs->sp) to indicate which stack frame
+        * to start unwinding at.  Skip ahead until we reach it.
+        */
+       if (!unwind_done(state) &&
+           (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+           !__kernel_text_address(*first_frame)))
                unwind_next_frame(state);
 }
 EXPORT_SYMBOL_GPL(__unwind_start);
index c7220ba..1a22de7 100644 (file)
@@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
        ioapic->irr = 0;
        ioapic->irr_delivered = 0;
        ioapic->id = 0;
-       memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
+       memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
        rtc_irq_eoi_tracking_reset(ioapic);
 }
 
index 6c633de..e375235 100644 (file)
@@ -5733,13 +5733,13 @@ static int kvmclock_cpu_online(unsigned int cpu)
 
 static void kvm_timer_init(void)
 {
-       int cpu;
-
        max_tsc_khz = tsc_khz;
 
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
 #ifdef CONFIG_CPU_FREQ
                struct cpufreq_policy policy;
+               int cpu;
+
                memset(&policy, 0, sizeof(policy));
                cpu = get_cpu();
                cpufreq_get_policy(&policy, cpu);
index b8b6a60..0d4fb3e 100644 (file)
@@ -435,7 +435,7 @@ slow_irqon:
 
                ret = get_user_pages_unlocked(start,
                                              (end - start) >> PAGE_SHIFT,
-                                             write, 0, pages);
+                                             pages, write ? FOLL_WRITE : 0);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {
index ddd2661..887e571 100644 (file)
@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
         * consistent with the vaddr_start/vaddr_end variables.
         */
        BUILD_BUG_ON(vaddr_start >= vaddr_end);
-       BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+       BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
                     vaddr_end >= EFI_VA_START);
-       BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
-                     config_enabled(CONFIG_EFI)) &&
+       BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+                     IS_ENABLED(CONFIG_EFI)) &&
                     vaddr_end >= __START_KERNEL_map);
        BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
 
index 8047687..e4f8009 100644 (file)
@@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
 {
        long gup_ret;
        int nr_pages = 1;
-       int force = 0;
 
-       gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
-                       force, NULL, NULL);
+       gup_ret = get_user_pages((unsigned long)addr, nr_pages,
+                       write ? FOLL_WRITE : 0, NULL, NULL);
        /*
         * get_user_pages() returns number of pages gotten.
         * 0 means we failed to fault in and get anything,
index 170cc4f..83e701f 100644 (file)
@@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
        free_memtype(start, end);
 }
 
+int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+       enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
+
+       return io_reserve_memtype(start, start + size, &type);
+}
+EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
+
+void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+       io_free_memtype(start, start + size);
+}
+EXPORT_SYMBOL(arch_io_free_memtype_wc);
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                unsigned long size, pgprot_t vma_prot)
 {
index b4d5e95..4a6a5a2 100644 (file)
@@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
                 */
                return BIOS_STATUS_UNIMPLEMENTED;
 
-       ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+       /*
+        * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
+        * callback method, which uses efi_call() directly, with the kernel page tables:
+        */
+       if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags)))
+               ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
+       else
+               ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(uv_bios_call);
index 5766ead..60a5a5a 100644 (file)
@@ -36,7 +36,8 @@ int is_syscall(unsigned long addr)
                 * slow, but that doesn't matter, since it will be called only
                 * in case of singlestepping, if copy_from_user failed.
                 */
-               n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+               n = access_process_vm(current, addr, &instr, sizeof(instr),
+                               FOLL_FORCE);
                if (n != sizeof(instr)) {
                        printk(KERN_ERR "is_syscall : failed to read "
                               "instruction from 0x%lx\n", addr);
index 0b5c184..e30202b 100644 (file)
@@ -212,7 +212,8 @@ int is_syscall(unsigned long addr)
                 * slow, but that doesn't matter, since it will be called only
                 * in case of singlestepping, if copy_from_user failed.
                 */
-               n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+               n = access_process_vm(current, addr, &instr, sizeof(instr),
+                               FOLL_FORCE);
                if (n != sizeof(instr)) {
                        printk("is_syscall : failed to read instruction from "
                               "0x%lx\n", addr);
index c0fdd57..bdd8556 100644 (file)
@@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
 
        xen_domain_type = XEN_HVM_DOMAIN;
 }
+#endif
 
 static int xen_cpu_up_prepare(unsigned int cpu)
 {
@@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
        return 0;
 }
 
+#ifdef CONFIG_XEN_PVHVM
 #ifdef CONFIG_KEXEC_CORE
 static void xen_hvm_shutdown(void)
 {
index 7be53cb..6ebcef2 100644 (file)
@@ -133,6 +133,26 @@ retry:
 }
 EXPORT_SYMBOL_GPL(badblocks_check);
 
+static void badblocks_update_acked(struct badblocks *bb)
+{
+       u64 *p = bb->page;
+       int i;
+       bool unacked = false;
+
+       if (!bb->unacked_exist)
+               return;
+
+       for (i = 0; i < bb->count ; i++) {
+               if (!BB_ACK(p[i])) {
+                       unacked = true;
+                       break;
+               }
+       }
+
+       if (!unacked)
+               bb->unacked_exist = 0;
+}
+
 /**
  * badblocks_set() - Add a range of bad blocks to the table.
  * @bb:                the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
        bb->changed = 1;
        if (!acknowledged)
                bb->unacked_exist = 1;
+       else
+               badblocks_update_acked(bb);
        write_sequnlock_irqrestore(&bb->lock, flags);
 
        return rv;
@@ -354,7 +376,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
                 * current range.  Earlier ranges could also overlap,
                 * but only this one can overlap the end of the range.
                 */
-               if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+               if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
+                   (BB_OFFSET(p[lo]) < target)) {
                        /* Partial overlap, leave the tail of this range */
                        int ack = BB_ACK(p[lo]);
                        sector_t a = BB_OFFSET(p[lo]);
@@ -377,7 +400,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
                        lo--;
                }
                while (lo >= 0 &&
-                      BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+                      (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
+                      (BB_OFFSET(p[lo]) < target)) {
                        /* This range does overlap */
                        if (BB_OFFSET(p[lo]) < s) {
                                /* Keep the early parts of this range. */
@@ -399,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
                }
        }
 
+       badblocks_update_acked(bb);
        bb->changed = 1;
 out:
        write_sequnlock_irq(&bb->lock);
index 6a14b68..3c882cb 100644 (file)
@@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error)
        struct request_queue *q = rq->q;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 
+       /*
+        * Updating q->in_flight[] here for making this tag usable
+        * early. Because in blk_queue_start_tag(),
+        * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
+        * reserve tags for sync I/O.
+        *
+        * More importantly this way can avoid the following I/O
+        * deadlock:
+        *
+        * - suppose there are 40 fua requests comming to flush queue
+        *   and queue depth is 31
+        * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
+        *   tag for async I/O any more
+        * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
+        *   and flush_data_end_io() is called
+        * - the other rqs still can't go ahead if not updating
+        *   q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
+        *   are held in flush data queue and make no progress of
+        *   handling post flush rq
+        * - only after the post flush rq is handled, all these rqs
+        *   can be completed
+        */
+
+       elv_completed_request(q, rq);
+
+       /* for avoiding double accounting */
+       rq->cmd_flags &= ~REQ_STARTED;
+
        /*
         * After populating an empty queue, kick it to avoid stall.  Read
         * the comment in flush_end_io().
index ddc2eed..f3d27a6 100644 (file)
@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
        rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
 
-       hctx->queued++;
-       data->hctx = hctx;
-       data->ctx = ctx;
+       data->hctx = alloc_data.hctx;
+       data->ctx = alloc_data.ctx;
+       data->hctx->queued++;
        return rq;
 }
 
index f0afdfb..194d20b 100644 (file)
@@ -21,7 +21,7 @@ obj-y                         += video/
 obj-y                          += idle/
 
 # IPMI must come before ACPI in order to provide IPMI opregion support
-obj-$(CONFIG_IPMI_HANDLER)     += char/ipmi/
+obj-y                          += char/ipmi/
 
 obj-$(CONFIG_ACPI)             += acpi/
 obj-$(CONFIG_SFI)              += sfi/
index f1e6dcc..54d48b9 100644 (file)
@@ -46,6 +46,7 @@
 #include "acdispat.h"
 #include "acnamesp.h"
 #include "actables.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
 
        /* Walk entire namespace from the supplied root */
 
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
        /*
         * We don't use acpi_walk_namespace since we do not want to acquire
         * the namespace reader lock.
         */
        status =
            acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
-                                  ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
-                                  NULL, &info, NULL);
+                                  0, acpi_ds_init_one_object, NULL, &info,
+                                  NULL);
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
        }
-       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
        status = acpi_get_table_by_index(table_index, &table);
        if (ACPI_FAILURE(status)) {
index 32e9ddc..2b3210f 100644 (file)
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
                          "Method auto-serialization parse [%4.4s] %p\n",
                          acpi_ut_get_node_name(node), node));
 
-       acpi_ex_enter_interpreter();
-
        /* Create/Init a root op for the method parse tree */
 
        op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
        if (!op) {
-               status = AE_NO_MEMORY;
-               goto unlock;
+               return_ACPI_STATUS(AE_NO_MEMORY);
        }
 
        acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
            acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
        if (!walk_state) {
                acpi_ps_free_op(op);
-               status = AE_NO_MEMORY;
-               goto unlock;
+               return_ACPI_STATUS(AE_NO_MEMORY);
        }
 
        status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
        status = acpi_ps_parse_aml(walk_state);
 
        acpi_ps_delete_parse_tree(op);
-unlock:
-       acpi_ex_exit_interpreter();
        return_ACPI_STATUS(status);
 }
 
@@ -730,26 +724,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
 
                acpi_ds_method_data_delete_all(walk_state);
 
-               /*
-                * If method is serialized, release the mutex and restore the
-                * current sync level for this thread
-                */
-               if (method_desc->method.mutex) {
-
-                       /* Acquisition Depth handles recursive calls */
-
-                       method_desc->method.mutex->mutex.acquisition_depth--;
-                       if (!method_desc->method.mutex->mutex.acquisition_depth) {
-                               walk_state->thread->current_sync_level =
-                                   method_desc->method.mutex->mutex.
-                                   original_sync_level;
-
-                               acpi_os_release_mutex(method_desc->method.
-                                                     mutex->mutex.os_mutex);
-                               method_desc->method.mutex->mutex.thread_id = 0;
-                       }
-               }
-
                /*
                 * Delete any namespace objects created anywhere within the
                 * namespace by the execution of this method. Unless:
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
                                    ~ACPI_METHOD_MODIFIED_NAMESPACE;
                        }
                }
+
+               /*
+                * If method is serialized, release the mutex and restore the
+                * current sync level for this thread
+                */
+               if (method_desc->method.mutex) {
+
+                       /* Acquisition Depth handles recursive calls */
+
+                       method_desc->method.mutex->mutex.acquisition_depth--;
+                       if (!method_desc->method.mutex->mutex.acquisition_depth) {
+                               walk_state->thread->current_sync_level =
+                                   method_desc->method.mutex->mutex.
+                                   original_sync_level;
+
+                               acpi_os_release_mutex(method_desc->method.
+                                                     mutex->mutex.os_mutex);
+                               method_desc->method.mutex->mutex.thread_id = 0;
+                       }
+               }
        }
 
        /* Decrement the thread count on the method */
index 028b22a..e362182 100644 (file)
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
                                }
                        }
 
-                       acpi_ex_exit_interpreter();
                        status =
                            acpi_ev_initialize_region
                            (acpi_ns_get_attached_object(node), FALSE);
-                       acpi_ex_enter_interpreter();
 
                        if (ACPI_FAILURE(status)) {
                                /*
index 3843f1f..75ddd16 100644 (file)
@@ -45,6 +45,7 @@
 #include "accommon.h"
 #include "acevents.h"
 #include "acnamesp.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
                                        }
                                }
 
+                               acpi_ex_exit_interpreter();
                                status =
                                    acpi_ev_execute_reg_method(region_obj,
                                                               ACPI_REG_CONNECT);
+                               acpi_ex_enter_interpreter();
 
                                if (acpi_ns_locked) {
                                        status =
index 334d3c5..d1f2014 100644 (file)
@@ -137,7 +137,9 @@ unlock:
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                          "**** Begin Table Object Initialization\n"));
 
+       acpi_ex_enter_interpreter();
        status = acpi_ds_initialize_objects(table_index, node);
+       acpi_ex_exit_interpreter();
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                          "**** Completed Table Object Initialization\n"));
index f0a029e..0d099a2 100644 (file)
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
        ghes_do_proc(ghes, ghes->estatus);
 out:
        ghes_clear_estatus(ghes);
-       return 0;
+       return rc;
 }
 
 static void ghes_add_timer(struct ghes *ghes)
index c983bf7..bc3d914 100644 (file)
@@ -87,6 +87,7 @@ struct acpi_pci_link {
 
 static LIST_HEAD(acpi_link_list);
 static DEFINE_MUTEX(acpi_link_lock);
+static int sci_irq = -1, sci_penalty;
 
 /* --------------------------------------------------------------------------
                             PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
 {
        int penalty = 0;
 
-       /*
-       * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
-       * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
-       * use for PCI IRQs.
-       */
-       if (irq == acpi_gbl_FADT.sci_interrupt) {
-               u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
-
-               if (type != IRQ_TYPE_LEVEL_LOW)
-                       penalty += PIRQ_PENALTY_ISA_ALWAYS;
-               else
-                       penalty += PIRQ_PENALTY_PCI_USING;
-       }
+       if (irq == sci_irq)
+               penalty += sci_penalty;
 
        if (irq < ACPI_MAX_ISA_IRQS)
                return penalty + acpi_isa_irq_penalty[irq];
 
-       penalty += acpi_irq_pci_sharing_penalty(irq);
-       return penalty;
+       return penalty + acpi_irq_pci_sharing_penalty(irq);
 }
 
 int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                            acpi_device_bid(link->device));
                return -ENODEV;
        } else {
+               if (link->irq.active < ACPI_MAX_ISA_IRQS)
+                       acpi_isa_irq_penalty[link->irq.active] +=
+                               PIRQ_PENALTY_PCI_USING;
+
                printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
                       acpi_device_name(link->device),
                       acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
                        continue;
 
                if (used)
-                       new_penalty = acpi_irq_get_penalty(irq) +
+                       new_penalty = acpi_isa_irq_penalty[irq] +
                                        PIRQ_PENALTY_ISA_USED;
                else
                        new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
 void acpi_penalize_isa_irq(int irq, int active)
 {
        if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
-               acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+               acpi_isa_irq_penalty[irq] +=
                  (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
 }
 
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
                    acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
 }
 
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+       sci_irq = irq;
+
+       if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+           polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+               sci_penalty = PIRQ_PENALTY_PCI_USING;
+       else
+               sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+}
+
 /*
  * Over-ride default table to reserve additional IRQs for use by ISA
  * e.g. acpi_irq_isa=5
index 562af94..3c71b98 100644 (file)
@@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
 
 
 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-                                        uint32_t desc)
+                                        u32 desc, bool need_strong_ref)
 {
        struct rb_node *n = proc->refs_by_desc.rb_node;
        struct binder_ref *ref;
@@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
        while (n) {
                ref = rb_entry(n, struct binder_ref, rb_node_desc);
 
-               if (desc < ref->desc)
+               if (desc < ref->desc) {
                        n = n->rb_left;
-               else if (desc > ref->desc)
+               } else if (desc > ref->desc) {
                        n = n->rb_right;
-               else
+               } else if (need_strong_ref && !ref->strong) {
+                       binder_user_error("tried to use weak ref as strong ref\n");
+                       return NULL;
+               } else {
                        return ref;
+               }
        }
        return NULL;
 }
@@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+                       struct binder_ref *ref;
+
+                       ref = binder_get_ref(proc, fp->handle,
+                                            fp->type == BINDER_TYPE_HANDLE);
 
                        if (ref == NULL) {
                                pr_err("transaction release %d bad handle %d\n",
@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
                if (tr->target.handle) {
                        struct binder_ref *ref;
 
-                       ref = binder_get_ref(proc, tr->target.handle);
+                       ref = binder_get_ref(proc, tr->target.handle, true);
                        if (ref == NULL) {
                                binder_user_error("%d:%d got transaction to invalid handle\n",
                                        proc->pid, thread->pid);
@@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
                                fp->type = BINDER_TYPE_HANDLE;
                        else
                                fp->type = BINDER_TYPE_WEAK_HANDLE;
+                       fp->binder = 0;
                        fp->handle = ref->desc;
+                       fp->cookie = 0;
                        binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                                       &thread->todo);
 
@@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+                       struct binder_ref *ref;
+
+                       ref = binder_get_ref(proc, fp->handle,
+                                            fp->type == BINDER_TYPE_HANDLE);
 
                        if (ref == NULL) {
                                binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
                                        return_error = BR_FAILED_REPLY;
                                        goto err_binder_get_ref_for_node_failed;
                                }
+                               fp->binder = 0;
                                fp->handle = new_ref->desc;
+                               fp->cookie = 0;
                                binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
                                trace_binder_transaction_ref_to_ref(t, ref,
                                                                    new_ref);
@@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
                        binder_debug(BINDER_DEBUG_TRANSACTION,
                                     "        fd %d -> %d\n", fp->handle, target_fd);
                        /* TODO: fput? */
+                       fp->binder = 0;
                        fp->handle = target_fd;
                } break;
 
@@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
                                                ref->desc);
                                }
                        } else
-                               ref = binder_get_ref(proc, target);
+                               ref = binder_get_ref(proc, target,
+                                                    cmd == BC_ACQUIRE ||
+                                                    cmd == BC_RELEASE);
                        if (ref == NULL) {
                                binder_user_error("%d:%d refcount change on invalid ref %d\n",
                                        proc->pid, thread->pid, target);
@@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
-                       ref = binder_get_ref(proc, target);
+                       ref = binder_get_ref(proc, target, false);
                        if (ref == NULL) {
                                binder_user_error("%d:%d %s invalid ref %d\n",
                                        proc->pid, thread->pid,
index ba5f11c..9669fc7 100644 (file)
@@ -1418,30 +1418,33 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
         * Message mode could be enforced. In this case assume that advantage
         * of multipe MSIs is negated and use single MSI mode instead.
         */
-       nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
-                       PCI_IRQ_MSIX | PCI_IRQ_MSI);
-       if (nvec > 0) {
-               if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
-                       hpriv->get_irq_vector = ahci_get_irq_vector;
-                       hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-                       return nvec;
+       if (n_ports > 1) {
+               nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+                               PCI_IRQ_MSIX | PCI_IRQ_MSI);
+               if (nvec > 0) {
+                       if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+                               hpriv->get_irq_vector = ahci_get_irq_vector;
+                               hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+                               return nvec;
+                       }
+
+                       /*
+                        * Fallback to single MSI mode if the controller
+                        * enforced MRSM mode.
+                        */
+                       printk(KERN_INFO
+                               "ahci: MRSM is on, fallback to single MSI\n");
+                       pci_free_irq_vectors(pdev);
                }
 
                /*
-                * Fallback to single MSI mode if the controller enforced MRSM
-                * mode.
+                * -ENOSPC indicated we don't have enough vectors.  Don't bother
+                * trying a single vectors for any other error:
                 */
-               printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
-               pci_free_irq_vectors(pdev);
+               if (nvec < 0 && nvec != -ENOSPC)
+                       return nvec;
        }
 
-       /*
-        * -ENOSPC indicated we don't have enough vectors.  Don't bother trying
-        * a single vectors for any other error:
-        */
-       if (nvec < 0 && nvec != -ENOSPC)
-               return nvec;
-
        /*
         * If the host is not capable of supporting per-port vectors, fall
         * back to single MSI before finally attempting single MSI-X.
@@ -1617,7 +1620,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                /* legacy intx interrupts */
                pci_intx(pdev, 1);
        }
-       hpriv->irq = pdev->irq;
+       hpriv->irq = pci_irq_vector(pdev, 0);
 
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
index fdf44ca..d02e7c0 100644 (file)
@@ -213,14 +213,16 @@ config DEBUG_DEVRES
          If you are unsure about this, Say N here.
 
 config DEBUG_TEST_DRIVER_REMOVE
-       bool "Test driver remove calls during probe"
+       bool "Test driver remove calls during probe (UNSTABLE)"
        depends on DEBUG_KERNEL
        help
          Say Y here if you want the Driver core to test driver remove functions
          by calling probe, remove, probe. This tests the remove path without
          having to unbind the driver or unload the driver module.
 
-         If you are unsure about this, say N here.
+         This option is expected to find errors and may render your system
+         unusable. You should say N here unless you are explicitly looking to
+         test this functionality.
 
 config SYS_HYPERVISOR
        bool
index 811e11c..0809cda 100644 (file)
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
        case DAC960_PD_Controller:
          if (!request_region(Controller->IO_Address, 0x80,
                              Controller->FullModelName)) {
-               DAC960_Error("IO port 0x%d busy for Controller at\n",
+               DAC960_Error("IO port 0x%lx busy for Controller at\n",
                             Controller, Controller->IO_Address);
                goto Failure;
          }
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
        case DAC960_P_Controller:
          if (!request_region(Controller->IO_Address, 0x80,
                              Controller->FullModelName)){
-               DAC960_Error("IO port 0x%d busy for Controller at\n",
+               DAC960_Error("IO port 0x%lx busy for Controller at\n",
                             Controller, Controller->IO_Address);
                goto Failure;
          }
index ba405b5..19a16b2 100644 (file)
@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
        spin_lock(&nbd->sock_lock);
 
        if (!nbd->sock) {
-               spin_unlock_irq(&nbd->sock_lock);
+               spin_unlock(&nbd->sock_lock);
                return;
        }
 
index abb7162..7b274ff 100644 (file)
@@ -415,15 +415,15 @@ struct rbd_device {
 };
 
 /*
- * Flag bits for rbd_dev->flags.  If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ *   by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
  */
 enum rbd_dev_flags {
        RBD_DEV_FLAG_EXISTS,    /* mapped snapshot has not been deleted */
        RBD_DEV_FLAG_REMOVING,  /* this mapping is being removed */
+       RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
 };
 
 static DEFINE_MUTEX(client_mutex);     /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
        struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
                                            struct rbd_device, watch_dwork);
        bool was_lock_owner = false;
+       bool need_to_wake = false;
        int ret;
 
        dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
                was_lock_owner = rbd_release_lock(rbd_dev);
 
        mutex_lock(&rbd_dev->watch_mutex);
-       if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
-               goto fail_unlock;
+       if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+               mutex_unlock(&rbd_dev->watch_mutex);
+               goto out;
+       }
 
        ret = __rbd_register_watch(rbd_dev);
        if (ret) {
                rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
-               if (ret != -EBLACKLISTED)
+               if (ret == -EBLACKLISTED || ret == -ENOENT) {
+                       set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+                       need_to_wake = true;
+               } else {
                        queue_delayed_work(rbd_dev->task_wq,
                                           &rbd_dev->watch_dwork,
                                           RBD_RETRY_DELAY);
-               goto fail_unlock;
+               }
+               mutex_unlock(&rbd_dev->watch_mutex);
+               goto out;
        }
 
+       need_to_wake = true;
        rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
        rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
        mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
                                 ret);
        }
 
+out:
        up_write(&rbd_dev->lock_rwsem);
-       wake_requests(rbd_dev, true);
-       return;
-
-fail_unlock:
-       mutex_unlock(&rbd_dev->watch_mutex);
-       up_write(&rbd_dev->lock_rwsem);
+       if (need_to_wake)
+               wake_requests(rbd_dev, true);
 }
 
 /*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
                up_read(&rbd_dev->lock_rwsem);
                schedule();
                down_read(&rbd_dev->lock_rwsem);
-       } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+       } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+                !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
        finish_wait(&rbd_dev->lock_waitq, &wait);
 }
 
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
 
        if (must_be_locked) {
                down_read(&rbd_dev->lock_rwsem);
-               if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+               if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+                   !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
                        rbd_wait_state_locked(rbd_dev);
+
+               WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+                       !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+               if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+                       result = -EBLACKLISTED;
+                       goto err_unlock;
+               }
        }
 
        img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
index 4827945..d2d2c89 100644 (file)
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
 
 static void add_early_randomness(struct hwrng *rng)
 {
-       unsigned char bytes[16];
        int bytes_read;
+       size_t size = min_t(size_t, 16, rng_buffer_size());
 
        mutex_lock(&reading_mutex);
-       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+       bytes_read = rng_get_data(rng, rng_buffer, size, 1);
        mutex_unlock(&reading_mutex);
        if (bytes_read > 0)
-               add_device_randomness(bytes, bytes_read);
+               add_device_randomness(rng_buffer, bytes_read);
 }
 
 static inline void cleanup_rng(struct kref *kref)
index 5a9350b..7f81665 100644 (file)
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
         the IPMI management controller is capable of this.
 
 endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+       depends on ARCH_ASPEED
+       tristate "BT IPMI bmc driver"
+       help
+         Provides a driver for the BT (Block Transfer) IPMI interface
+         found on Aspeed SOCs (AST2400 and AST2500). The driver
+         implements the BMC side of the BT interface.
index f3ffde1..0d98cd9 100644 (file)
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
 obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
 obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
 obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644 (file)
index 0000000..b49e613
--- /dev/null
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME    "ipmi-bt-host"
+
+#define BT_IO_BASE     0xe4
+#define BT_IRQ         10
+
+#define BT_CR0         0x0
+#define   BT_CR0_IO_BASE               16
+#define   BT_CR0_IRQ                   12
+#define   BT_CR0_EN_CLR_SLV_RDP                0x8
+#define   BT_CR0_EN_CLR_SLV_WRP                0x4
+#define   BT_CR0_ENABLE_IBT            0x1
+#define BT_CR1         0x4
+#define   BT_CR1_IRQ_H2B       0x01
+#define   BT_CR1_IRQ_HBUSY     0x40
+#define BT_CR2         0x8
+#define   BT_CR2_IRQ_H2B       0x01
+#define   BT_CR2_IRQ_HBUSY     0x40
+#define BT_CR3         0xc
+#define BT_CTRL                0x10
+#define   BT_CTRL_B_BUSY               0x80
+#define   BT_CTRL_H_BUSY               0x40
+#define   BT_CTRL_OEM0                 0x20
+#define   BT_CTRL_SMS_ATN              0x10
+#define   BT_CTRL_B2H_ATN              0x08
+#define   BT_CTRL_H2B_ATN              0x04
+#define   BT_CTRL_CLR_RD_PTR           0x02
+#define   BT_CTRL_CLR_WR_PTR           0x01
+#define BT_BMC2HOST    0x14
+#define BT_INTMASK     0x18
+#define   BT_INTMASK_B2H_IRQEN         0x01
+#define   BT_INTMASK_B2H_IRQ           0x02
+#define   BT_INTMASK_BMC_HWRST         0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+       struct device           dev;
+       struct miscdevice       miscdev;
+       void __iomem            *base;
+       int                     irq;
+       wait_queue_head_t       queue;
+       struct timer_list       poll_timer;
+       struct mutex            mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+       return ioread8(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+       iowrite8(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+       bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+       bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+       bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+       if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+               bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+       if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+               bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+       bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+       return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               buf[i] = bt_read(bt_bmc);
+       return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+       bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               bt_write(bt_bmc, buf[i]);
+       return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+       bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+       return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+       struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+       if (atomic_inc_return(&open_count) == 1) {
+               clr_b_busy(bt_bmc);
+               return 0;
+       }
+
+       atomic_dec(&open_count);
+       return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ *    Byte 1  Byte 2     Byte 3  Byte 4  Byte 5:N
+ *    Length  NetFn/LUN  Seq     Cmd     Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+                          size_t count, loff_t *ppos)
+{
+       struct bt_bmc *bt_bmc = file_bt_bmc(file);
+       u8 len;
+       int len_byte = 1;
+       u8 kbuffer[BT_BMC_BUFFER_SIZE];
+       ssize_t ret = 0;
+       ssize_t nread;
+
+       if (!access_ok(VERIFY_WRITE, buf, count))
+               return -EFAULT;
+
+       WARN_ON(*ppos);
+
+       if (wait_event_interruptible(bt_bmc->queue,
+                                    bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+               return -ERESTARTSYS;
+
+       mutex_lock(&bt_bmc->mutex);
+
+       if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+               ret = -EIO;
+               goto out_unlock;
+       }
+
+       set_b_busy(bt_bmc);
+       clr_h2b_atn(bt_bmc);
+       clr_rd_ptr(bt_bmc);
+
+       /*
+        * The BT frames start with the message length, which does not
+        * include the length byte.
+        */
+       kbuffer[0] = bt_read(bt_bmc);
+       len = kbuffer[0];
+
+       /* We pass the length back to userspace as well */
+       if (len + 1 > count)
+               len = count - 1;
+
+       while (len) {
+               nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+               bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+               if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               len -= nread;
+               buf += nread + len_byte;
+               ret += nread + len_byte;
+               len_byte = 0;
+       }
+
+       clr_b_busy(bt_bmc);
+
+out_unlock:
+       mutex_unlock(&bt_bmc->mutex);
+       return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ *    Byte 1  Byte 2     Byte 3  Byte 4  Byte 5  Byte 6:N
+ *    Length  NetFn/LUN  Seq     Cmd     Code    Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+                           size_t count, loff_t *ppos)
+{
+       struct bt_bmc *bt_bmc = file_bt_bmc(file);
+       u8 kbuffer[BT_BMC_BUFFER_SIZE];
+       ssize_t ret = 0;
+       ssize_t nwritten;
+
+       /*
+        * send a minimum response size
+        */
+       if (count < 5)
+               return -EINVAL;
+
+       if (!access_ok(VERIFY_READ, buf, count))
+               return -EFAULT;
+
+       WARN_ON(*ppos);
+
+       /*
+        * There's no interrupt for clearing bmc busy so we have to
+        * poll
+        */
+       if (wait_event_interruptible(bt_bmc->queue,
+                                    !(bt_inb(bt_bmc, BT_CTRL) &
+                                      (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+               return -ERESTARTSYS;
+
+       mutex_lock(&bt_bmc->mutex);
+
+       if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+                    (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+               ret = -EIO;
+               goto out_unlock;
+       }
+
+       clr_wr_ptr(bt_bmc);
+
+       while (count) {
+               nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+               if (copy_from_user(&kbuffer, buf, nwritten)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               bt_writen(bt_bmc, kbuffer, nwritten);
+
+               count -= nwritten;
+               buf += nwritten;
+               ret += nwritten;
+       }
+
+       set_b2h_atn(bt_bmc);
+
+out_unlock:
+       mutex_unlock(&bt_bmc->mutex);
+       return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+                        unsigned long param)
+{
+       struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+       switch (cmd) {
+       case BT_BMC_IOCTL_SMS_ATN:
+               set_sms_atn(bt_bmc);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+       struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+       atomic_dec(&open_count);
+       set_b_busy(bt_bmc);
+       return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+       struct bt_bmc *bt_bmc = file_bt_bmc(file);
+       unsigned int mask = 0;
+       u8 ctrl;
+
+       poll_wait(file, &bt_bmc->queue, wait);
+
+       ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+       if (ctrl & BT_CTRL_H2B_ATN)
+               mask |= POLLIN;
+
+       if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+               mask |= POLLOUT;
+
+       return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = bt_bmc_open,
+       .read           = bt_bmc_read,
+       .write          = bt_bmc_write,
+       .release        = bt_bmc_release,
+       .poll           = bt_bmc_poll,
+       .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+       struct bt_bmc *bt_bmc = (void *)data;
+
+       bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+       wake_up(&bt_bmc->queue);
+       add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+       struct bt_bmc *bt_bmc = arg;
+       u32 reg;
+
+       reg = ioread32(bt_bmc->base + BT_CR2);
+       reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+       if (!reg)
+               return IRQ_NONE;
+
+       /* ack pending IRQs */
+       iowrite32(reg, bt_bmc->base + BT_CR2);
+
+       wake_up(&bt_bmc->queue);
+       return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+                            struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       u32 reg;
+       int rc;
+
+       bt_bmc->irq = platform_get_irq(pdev, 0);
+       if (!bt_bmc->irq)
+               return -ENODEV;
+
+       rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+                             DEVICE_NAME, bt_bmc);
+       if (rc < 0) {
+               dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+               bt_bmc->irq = 0;
+               return rc;
+       }
+
+       /*
+        * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+        * H2B will be asserted when the bmc has data for us; HBUSY
+        * will be cleared (along with B2H) when we can write the next
+        * message to the BT buffer
+        */
+       reg = ioread32(bt_bmc->base + BT_CR1);
+       reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+       iowrite32(reg, bt_bmc->base + BT_CR1);
+
+       return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+       struct bt_bmc *bt_bmc;
+       struct device *dev;
+       struct resource *res;
+       int rc;
+
+       if (!pdev || !pdev->dev.of_node)
+               return -ENODEV;
+
+       dev = &pdev->dev;
+       dev_info(dev, "Found bt bmc device\n");
+
+       bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+       if (!bt_bmc)
+               return -ENOMEM;
+
+       dev_set_drvdata(&pdev->dev, bt_bmc);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(bt_bmc->base))
+               return PTR_ERR(bt_bmc->base);
+
+       mutex_init(&bt_bmc->mutex);
+       init_waitqueue_head(&bt_bmc->queue);
+
+       bt_bmc->miscdev.minor   = MISC_DYNAMIC_MINOR,
+               bt_bmc->miscdev.name    = DEVICE_NAME,
+               bt_bmc->miscdev.fops    = &bt_bmc_fops,
+               bt_bmc->miscdev.parent = dev;
+       rc = misc_register(&bt_bmc->miscdev);
+       if (rc) {
+               dev_err(dev, "Unable to register misc device\n");
+               return rc;
+       }
+
+       bt_bmc_config_irq(bt_bmc, pdev);
+
+       if (bt_bmc->irq) {
+               dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+       } else {
+               dev_info(dev, "No IRQ; using timer\n");
+               setup_timer(&bt_bmc->poll_timer, poll_timer,
+                           (unsigned long)bt_bmc);
+               bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+               add_timer(&bt_bmc->poll_timer);
+       }
+
+       iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
+                 (BT_IRQ << BT_CR0_IRQ) |
+                 BT_CR0_EN_CLR_SLV_RDP |
+                 BT_CR0_EN_CLR_SLV_WRP |
+                 BT_CR0_ENABLE_IBT,
+                 bt_bmc->base + BT_CR0);
+
+       clr_b_busy(bt_bmc);
+
+       return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+       struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+       misc_deregister(&bt_bmc->miscdev);
+       if (!bt_bmc->irq)
+               del_timer_sync(&bt_bmc->poll_timer);
+       return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+       { .compatible = "aspeed,ast2400-bt-bmc" },
+       { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+       .driver = {
+               .name           = DEVICE_NAME,
+               .of_match_table = bt_bmc_match,
+       },
+       .probe = bt_bmc_probe,
+       .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the BT interface");
index d861999..fcdd886 100644 (file)
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
                intf->curr_channel = IPMI_MAX_CHANNELS;
        }
 
+       rv = ipmi_bmc_register(intf, i);
+
        if (rv == 0)
                rv = add_proc_entries(intf, i);
 
-       rv = ipmi_bmc_register(intf, i);
-
  out:
        if (rv) {
                if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
        int intf_num = intf->intf_num;
        ipmi_user_t user;
 
-       ipmi_bmc_unregister(intf);
-
        mutex_lock(&smi_watchers_mutex);
        mutex_lock(&ipmi_interfaces_mutex);
        intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
        mutex_unlock(&ipmi_interfaces_mutex);
 
        remove_proc_entries(intf);
+       ipmi_bmc_unregister(intf);
 
        /*
         * Call all the watcher interfaces to tell them that
index 190122e..85a449c 100644 (file)
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
        ret = clk_hw_register(NULL, &prog->hw);
        if (ret) {
                kfree(prog);
-               hw = &prog->hw;
+               hw = ERR_PTR(ret);
        }
 
        return hw;
index b68bf57..8c7763f 100644 (file)
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
 static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
                                   unsigned long *parent_rate)
 {
+       struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+       const struct bcm2835_pll_data *data = pll->data;
        u32 ndiv, fdiv;
 
+       rate = clamp(rate, data->min_rate, data->max_rate);
+
        bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
 
        return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
        u32 ana[4];
        int i;
 
-       if (rate < data->min_rate || rate > data->max_rate) {
-               dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
-                       clk_hw_get_name(hw), rate,
-                       data->min_rate, data->max_rate);
-               return -EINVAL;
-       }
-
        if (rate > data->max_fb_rate) {
                use_fb_prediv = true;
                rate /= 2;
index b637f59..eb953d3 100644 (file)
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       drv_data->num_clks = num_clks;
        drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
                                              sizeof(*drv_data->max_clk_data),
                                              GFP_KERNEL);
index fe364e6..c0e8e1f 100644 (file)
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
        hi6220_clk_register_divider(hi6220_div_clks_sys,
                        ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
 }
-CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
 
 
 /* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
        hi6220_clk_register_divider(hi6220_div_clks_media,
                                ARRAY_SIZE(hi6220_div_clks_media), clk_data);
 }
-CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
 
 
 /* clocks in pmctrl */
index 380c372..f042bd2 100644 (file)
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
 
 config COMMON_CLK_MT8135
        bool "Clock driver for Mediatek MT8135"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
        select COMMON_CLK_MEDIATEK
        default ARCH_MEDIATEK
        ---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
 
 config COMMON_CLK_MT8173
        bool "Clock driver for Mediatek MT8173"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
        select COMMON_CLK_MEDIATEK
        default ARCH_MEDIATEK
        ---help---
index 45905fc..cecb0fd 100644 (file)
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
 };
 static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
                                         void __iomem *reg, spinlock_t *lock,
-                                        struct device *dev, struct clk_hw *hw)
+                                        struct device *dev, struct clk_hw **hw)
 {
        const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
                *rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
                gate->lock = lock;
                gate_ops = gate_hw->init->ops;
                gate->reg = reg + (u64)gate->reg;
+               gate->flags = CLK_GATE_SET_TO_DISABLE;
        }
 
        if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
                }
        }
 
-       hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+       *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
                                       data->num_parents, mux_hw,
                                       mux_ops, rate_hw, rate_ops,
                                       gate_hw, gate_ops, CLK_IGNORE_UNUSED);
 
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
+       if (IS_ERR(*hw))
+               return PTR_ERR(*hw);
 
        return 0;
 }
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
        spin_lock_init(&driver_data->lock);
 
        for (i = 0; i < num_periph; i++) {
-               struct clk_hw *hw = driver_data->hw_data->hws[i];
+               struct clk_hw **hw = &driver_data->hw_data->hws[i];
 
                if (armada_3700_add_composite_clk(&data[i], reg,
                                                  &driver_data->lock, dev, hw))
index 51d152f..17e68a7 100644 (file)
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
        },
        { },
 };
+MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
 
 static void exynos_audss_clk_teardown(void)
 {
index 5ffb898..26c53f7 100644 (file)
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
        hw_data->num = clk_num;
 
        /* avoid returning NULL for unused idx */
-       for (; clk_num >= 0; clk_num--)
+       while (--clk_num >= 0)
                hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
 
        for (p = data; p->name; p++) {
@@ -110,6 +110,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
 
 static const struct of_device_id uniphier_clk_match[] = {
        /* System clock */
+       {
+               .compatible = "socionext,uniphier-sld3-clock",
+               .data = uniphier_sld3_sys_clk_data,
+       },
        {
                .compatible = "socionext,uniphier-ld4-clock",
                .data = uniphier_ld4_sys_clk_data,
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
                .compatible = "socionext,uniphier-ld20-clock",
                .data = uniphier_ld20_sys_clk_data,
        },
-       /* Media I/O clock */
+       /* Media I/O clock, SD clock */
        {
                .compatible = "socionext,uniphier-sld3-mio-clock",
                .data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
                .data = uniphier_sld3_mio_clk_data,
        },
        {
-               .compatible = "socionext,uniphier-pro5-mio-clock",
-               .data = uniphier_pro5_mio_clk_data,
+               .compatible = "socionext,uniphier-pro5-sd-clock",
+               .data = uniphier_pro5_sd_clk_data,
        },
        {
-               .compatible = "socionext,uniphier-pxs2-mio-clock",
-               .data = uniphier_pro5_mio_clk_data,
+               .compatible = "socionext,uniphier-pxs2-sd-clock",
+               .data = uniphier_pro5_sd_clk_data,
        },
        {
                .compatible = "socionext,uniphier-ld11-mio-clock",
                .data = uniphier_sld3_mio_clk_data,
        },
        {
-               .compatible = "socionext,uniphier-ld20-mio-clock",
-               .data = uniphier_pro5_mio_clk_data,
+               .compatible = "socionext,uniphier-ld20-sd-clock",
+               .data = uniphier_pro5_sd_clk_data,
        },
        /* Peripheral clock */
        {
index 6aa7ec7..218d20f 100644 (file)
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
        { /* sentinel */ }
 };
 
-const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
        UNIPHIER_MIO_CLK_SD_FIXED,
        UNIPHIER_MIO_CLK_SD(0, 0),
        UNIPHIER_MIO_CLK_SD(1, 1),
index 15a2f2c..2c243a8 100644 (file)
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
        struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
        int num_parents = clk_hw_get_num_parents(hw);
        int ret;
-       u32 val;
+       unsigned int val;
        u8 i;
 
        ret = regmap_read(mux->regmap, mux->reg, &val);
index 3ae1840..0244dba 100644 (file)
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
 extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
 extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
 extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
-extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
 extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
 extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
 
index 2451908..e2c6e43 100644 (file)
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
 config SYS_SUPPORTS_EM_STI
         bool
 
+config CLKSRC_JCORE_PIT
+       bool "J-Core PIT timer driver" if COMPILE_TEST
+       depends on OF
+       depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
+       select CLKSRC_MMIO
+       help
+         This enables build of clocksource and clockevent driver for
+         the integrated PIT in the J-Core synthesizable, open source SoC.
+
 config SH_TIMER_CMT
        bool "Renesas CMT timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
index fd9d6df..cf87f40 100644 (file)
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC)  += tcb_clksrc.o
 obj-$(CONFIG_X86_PM_TIMER)     += acpi_pm.o
 obj-$(CONFIG_SCx200HR_TIMER)   += scx200_hrt.o
 obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)   += cs5535-clockevt.o
+obj-$(CONFIG_CLKSRC_JCORE_PIT)         += jcore-pit.o
 obj-$(CONFIG_SH_TIMER_CMT)     += sh_cmt.o
 obj-$(CONFIG_SH_TIMER_MTU2)    += sh_mtu2.o
 obj-$(CONFIG_SH_TIMER_TMU)     += sh_tmu.o
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644 (file)
index 0000000..54e1665
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * J-Core SoC PIT/clocksource driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PIT_IRQ_SHIFT          12
+#define PIT_PRIO_SHIFT         20
+#define PIT_ENABLE_SHIFT       26
+#define PIT_PRIO_MASK          0xf
+
+#define REG_PITEN              0x00
+#define REG_THROT              0x10
+#define REG_COUNT              0x14
+#define REG_BUSPD              0x18
+#define REG_SECHI              0x20
+#define REG_SECLO              0x24
+#define REG_NSEC               0x28
+
+struct jcore_pit {
+       struct clock_event_device       ced;
+       void __iomem                    *base;
+       unsigned long                   periodic_delta;
+       u32                             enable_val;
+};
+
+static void __iomem *jcore_pit_base;
+static struct jcore_pit __percpu *jcore_pit_percpu;
+
+static notrace u64 jcore_sched_clock_read(void)
+{
+       u32 seclo, nsec, seclo0;
+       __iomem void *base = jcore_pit_base;
+
+       seclo = readl(base + REG_SECLO);
+       do {
+               seclo0 = seclo;
+               nsec  = readl(base + REG_NSEC);
+               seclo = readl(base + REG_SECLO);
+       } while (seclo0 != seclo);
+
+       return seclo * NSEC_PER_SEC + nsec;
+}
+
+static cycle_t jcore_clocksource_read(struct clocksource *cs)
+{
+       return jcore_sched_clock_read();
+}
+
+static int jcore_pit_disable(struct jcore_pit *pit)
+{
+       writel(0, pit->base + REG_PITEN);
+       return 0;
+}
+
+static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
+{
+       jcore_pit_disable(pit);
+       writel(delta, pit->base + REG_THROT);
+       writel(pit->enable_val, pit->base + REG_PITEN);
+       return 0;
+}
+
+static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
+{
+       struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+       return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
+{
+       struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+       return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
+{
+       struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+       return jcore_pit_set(pit->periodic_delta, pit);
+}
+
+static int jcore_pit_set_next_event(unsigned long delta,
+                                   struct clock_event_device *ced)
+{
+       struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+       return jcore_pit_set(delta, pit);
+}
+
+static int jcore_pit_local_init(unsigned cpu)
+{
+       struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+       unsigned buspd, freq;
+
+       pr_info("Local J-Core PIT init on cpu %u\n", cpu);
+
+       buspd = readl(pit->base + REG_BUSPD);
+       freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
+       pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+
+       clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+
+       return 0;
+}
+
+static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
+{
+       struct jcore_pit *pit = this_cpu_ptr(dev_id);
+
+       if (clockevent_state_oneshot(&pit->ced))
+               jcore_pit_disable(pit);
+
+       pit->ced.event_handler(&pit->ced);
+
+       return IRQ_HANDLED;
+}
+
+static int __init jcore_pit_init(struct device_node *node)
+{
+       int err;
+       unsigned pit_irq, cpu;
+       unsigned long hwirq;
+       u32 irqprio, enable_val;
+
+       jcore_pit_base = of_iomap(node, 0);
+       if (!jcore_pit_base) {
+               pr_err("Error: Cannot map base address for J-Core PIT\n");
+               return -ENXIO;
+       }
+
+       pit_irq = irq_of_parse_and_map(node, 0);
+       if (!pit_irq) {
+               pr_err("Error: J-Core PIT has no IRQ\n");
+               return -ENXIO;
+       }
+
+       pr_info("Initializing J-Core PIT at %p IRQ %d\n",
+               jcore_pit_base, pit_irq);
+
+       err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
+                                   NSEC_PER_SEC, 400, 32,
+                                   jcore_clocksource_read);
+       if (err) {
+               pr_err("Error registering clocksource device: %d\n", err);
+               return err;
+       }
+
+       sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
+
+       jcore_pit_percpu = alloc_percpu(struct jcore_pit);
+       if (!jcore_pit_percpu) {
+               pr_err("Failed to allocate memory for clock event device\n");
+               return -ENOMEM;
+       }
+
+       err = request_irq(pit_irq, jcore_timer_interrupt,
+                         IRQF_TIMER | IRQF_PERCPU,
+                         "jcore_pit", jcore_pit_percpu);
+       if (err) {
+               pr_err("pit irq request failed: %d\n", err);
+               free_percpu(jcore_pit_percpu);
+               return err;
+       }
+
+       /*
+        * The J-Core PIT is not hard-wired to a particular IRQ, but
+        * integrated with the interrupt controller such that the IRQ it
+        * generates is programmable, as follows:
+        *
+        * The bit layout of the PIT enable register is:
+        *
+        *      .....e..ppppiiiiiiii............
+        *
+        * where the .'s indicate unrelated/unused bits, e is enable,
+        * p is priority, and i is hard irq number.
+        *
+        * For the PIT included in AIC1 (obsolete but still in use),
+        * any hard irq (trap number) can be programmed via the 8
+        * iiiiiiii bits, and a priority (0-15) is programmable
+        * separately in the pppp bits.
+        *
+        * For the PIT included in AIC2 (current), the programming
+        * interface is equivalent modulo interrupt mapping. This is
+        * why a different compatible tag was not used. However only
+        * traps 64-127 (the ones actually intended to be used for
+        * interrupts, rather than syscalls/exceptions/etc.) can be
+        * programmed (the high 2 bits of i are ignored) and the
+        * priority pppp is <<2'd and or'd onto the irq number. This
+        * choice seems to have been made on the hardware engineering
+        * side under an assumption that preserving old AIC1 priority
+        * mappings was important. Future models will likely ignore
+        * the pppp field.
+        */
+       hwirq = irq_get_irq_data(pit_irq)->hwirq;
+       irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
+       enable_val = (1U << PIT_ENABLE_SHIFT)
+                  | (hwirq << PIT_IRQ_SHIFT)
+                  | (irqprio << PIT_PRIO_SHIFT);
+
+       for_each_present_cpu(cpu) {
+               struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
+
+               pit->base = of_iomap(node, cpu);
+               if (!pit->base) {
+                       pr_err("Unable to map PIT for cpu %u\n", cpu);
+                       continue;
+               }
+
+               pit->ced.name = "jcore_pit";
+               pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
+                                 | CLOCK_EVT_FEAT_ONESHOT
+                                 | CLOCK_EVT_FEAT_PERCPU;
+               pit->ced.cpumask = cpumask_of(cpu);
+               pit->ced.rating = 400;
+               pit->ced.irq = pit_irq;
+               pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
+               pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
+               pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
+               pit->ced.set_next_event = jcore_pit_set_next_event;
+
+               pit->enable_val = enable_val;
+       }
+
+       cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+                         "AP_JCORE_TIMER_STARTING",
+                         jcore_pit_local_init, NULL);
+
+       return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
index c184eb8..4f87f3e 100644 (file)
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+{
+       struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+
+       return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+}
+
 static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
                                unsigned long event, void *data)
 {
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
        writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
               base + TIMER_CTL_REG(1));
 
-       ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
-                                   rate, 340, 32, clocksource_mmio_readl_down);
+       cs->clksrc.name = node->name;
+       cs->clksrc.rating = 340;
+       cs->clksrc.read = sun5i_clksrc_read;
+       cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+       cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+       ret = clocksource_register_hz(&cs->clksrc, rate);
        if (ret) {
                pr_err("Couldn't register clock source.\n");
                goto err_remove_notifier;
index f535f81..4737520 100644 (file)
@@ -179,6 +179,7 @@ struct _pid {
 /**
  * struct cpudata -    Per CPU instance data storage
  * @cpu:               CPU number for this instance data
+ * @policy:            CPUFreq policy value
  * @update_util:       CPUFreq utility callback information
  * @update_util_set:   CPUFreq utility callback is set
  * @iowait_boost:      iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
 struct cpudata {
        int cpu;
 
+       unsigned int policy;
        struct update_util_data update_util;
        bool   update_util_set;
 
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 {
-       int pstate = cpu->pstate.min_pstate;
-
        trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
        cpu->pstate.current_pstate = pstate;
        /*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
                      pstate_funcs.get_val(cpu, pstate));
 }
 
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+       int min_pstate, max_pstate;
+
+       update_turbo_state();
+       intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+       intel_pstate_set_pstate(cpu, max_pstate);
+}
+
 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 {
        cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1325,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 
        from = cpu->pstate.current_pstate;
 
-       target_pstate = pstate_funcs.get_target_pstate(cpu);
+       target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
+               cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
 
        intel_pstate_update_pstate(cpu, target_pstate);
 
@@ -1491,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
                 policy->cpuinfo.max_freq, policy->max);
 
-       cpu = all_cpu_data[0];
+       cpu = all_cpu_data[policy->cpu];
+       cpu->policy = policy->policy;
+
        if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
            policy->max < policy->cpuinfo.max_freq &&
            policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1499,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                policy->max = policy->cpuinfo.max_freq;
        }
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+       if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
                if (policy->max >= policy->cpuinfo.max_freq) {
                        pr_debug("set performance\n");
@@ -1535,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
  out:
+       if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+               /*
+                * NOHZ_FULL CPUs need this as the governor callback may not
+                * be invoked on them.
+                */
+               intel_pstate_clear_update_util_hook(policy->cpu);
+               intel_pstate_max_within_limits(cpu);
+       }
+
        intel_pstate_set_update_util_hook(policy->cpu);
 
        intel_pstate_hwp_set_policy(policy);
index daadd20..3e2ab3b 100644 (file)
@@ -14,7 +14,7 @@ if DEV_DAX
 
 config DEV_DAX_PMEM
        tristate "PMEM DAX: direct access to persistent memory"
-       depends on NVDIMM_DAX
+       depends on LIBNVDIMM && NVDIMM_DAX
        default DEV_DAX
        help
          Support raw access to persistent memory.  Note that this
index 9630d88..4a15fa5 100644 (file)
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
 
        dev_dbg(dax_pmem->dev, "%s\n", __func__);
        percpu_ref_exit(ref);
-       wait_for_completion(&dax_pmem->cmp);
 }
 
 static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
 
        dev_dbg(dax_pmem->dev, "%s\n", __func__);
        percpu_ref_kill(ref);
+       wait_for_completion(&dax_pmem->cmp);
 }
 
 static int dax_pmem_probe(struct device *dev)
index ca957a5..b8cde09 100644 (file)
@@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work)
        if (ret)
                return;
 
-       extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+       extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
 }
 
 static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
index 631c977..180f0a9 100644 (file)
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
 
        lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
                                          PCILYNX_MAX_REGISTER);
+       if (lynx->registers == NULL) {
+               dev_err(&dev->dev, "Failed to map registers\n");
+               ret = -ENOMEM;
+               goto fail_deallocate_lynx;
+       }
 
        lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
                                sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
            lynx->rcv_buffer == NULL) {
                dev_err(&dev->dev, "Failed to allocate receive buffer\n");
                ret = -ENOMEM;
-               goto fail_deallocate;
+               goto fail_deallocate_buffers;
        }
        lynx->rcv_start_pcl->next       = cpu_to_le32(lynx->rcv_pcl_bus);
        lynx->rcv_pcl->next             = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
                dev_err(&dev->dev,
                        "Failed to allocate shared interrupt %d\n", dev->irq);
                ret = -EIO;
-               goto fail_deallocate;
+               goto fail_deallocate_buffers;
        }
 
        lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
        reg_write(lynx, PCI_INT_ENABLE, 0);
        free_irq(lynx->pci_device->irq, lynx);
 
-fail_deallocate:
+fail_deallocate_buffers:
        if (lynx->rcv_start_pcl)
                pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
                                lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
                pci_free_consistent(lynx->pci_device, PAGE_SIZE,
                                lynx->rcv_buffer, lynx->rcv_buffer_bus);
        iounmap(lynx->registers);
+
+fail_deallocate_lynx:
        kfree(lynx);
 
 fail_disable:
index c069451..5e23e2d 100644 (file)
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
                                   -mno-mmx -mno-sse
 
 cflags-$(CONFIG_ARM64)         := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
                                   -fno-builtin -fpic -mno-single-pic-base
 
 cflags-$(CONFIG_EFI_ARMSTUB)   += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
 # decompressor. So move our .data to .data.efistub, which is preserved
 # explicitly by the decompressor linker script.
 #
-STUBCOPY_FLAGS-$(CONFIG_ARM)   += --rename-section .data=.data.efistub
+STUBCOPY_FLAGS-$(CONFIG_ARM)   += --rename-section .data=.data.efistub \
+                                  -R ___ksymtab+sort -R ___kcrctab+sort
 STUBCOPY_RELOC-$(CONFIG_ARM)   := R_ARM_ABS
index 26ee00f..d011cb8 100644 (file)
@@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ
 
 config GPIO_MOCKUP
        tristate "GPIO Testing Driver"
-       depends on GPIOLIB
+       depends on GPIOLIB && SYSFS
        select GPIO_SYSFS
        help
          This enables GPIO Testing driver, which provides a way to test GPIO
index 9457e20..dc37dbe 100644 (file)
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
        { .compatible = "qca,ar9340-gpio" },
        {},
 };
+MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
 
 static int ath79_gpio_probe(struct platform_device *pdev)
 {
index 425501c..793518a 100644 (file)
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
                                irq_hw_number_t hwirq)
 {
        irq_set_chip_data(irq, h->host_data);
-       irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+       irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
 
        return 0;
 }
index b9daa0b..ee17248 100644 (file)
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
        writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
 
        irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
-       if (irq_base < 0)
-               return irq_base;
+       if (irq_base < 0) {
+               err = irq_base;
+               goto out_iounmap;
+       }
 
        port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
                                             &irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
        irq_domain_remove(port->domain);
 out_irqdesc_free:
        irq_free_descs(irq_base, 32);
+out_iounmap:
+       iounmap(port->base);
        return err;
 }
 
index e7d422a..5b00427 100644 (file)
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
                 * 801/1801/1600, bits are cleared when read.
                 * Edge detect register is not present on 801/1600/1801
                 */
-               if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+               if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
                    stmpe->partnum != STMPE1801) {
                        stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
                        stmpe_reg_write(stmpe,
index 9925611..c2a80b4 100644 (file)
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
        { .compatible = "technologic,ts4800-gpio", },
        {},
 };
+MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
 
 static struct platform_driver ts4800_gpio_driver = {
        .driver = {
index 58ece20..72a4b32 100644 (file)
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
 {
        int idx, i;
        unsigned int irq_flags;
+       int ret = -ENOENT;
 
        for (i = 0, idx = 0; idx <= index; i++) {
                struct acpi_gpio_info info;
                struct gpio_desc *desc;
 
                desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
-               if (IS_ERR(desc))
+               if (IS_ERR(desc)) {
+                       ret = PTR_ERR(desc);
                        break;
+               }
                if (info.gpioint && idx++ == index) {
                        int irq = gpiod_to_irq(desc);
 
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
                }
 
        }
-       return -ENOENT;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
 
index f0fc3a0..20e09b7 100644 (file)
@@ -333,6 +333,13 @@ struct linehandle_state {
        u32 numdescs;
 };
 
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+       (GPIOHANDLE_REQUEST_INPUT | \
+       GPIOHANDLE_REQUEST_OUTPUT | \
+       GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+       GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+       GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
 static long linehandle_ioctl(struct file *filep, unsigned int cmd,
                             unsigned long arg)
 {
@@ -344,6 +351,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
        if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
                int val;
 
+               memset(&ghd, 0, sizeof(ghd));
+
                /* TODO: check if descriptors are really input */
                for (i = 0; i < lh->numdescs; i++) {
                        val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -444,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                u32 lflags = handlereq.flags;
                struct gpio_desc *desc;
 
+               if (offset >= gdev->ngpio) {
+                       ret = -EINVAL;
+                       goto out_free_descs;
+               }
+
+               /* Return an error if a unknown flag is set */
+               if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+                       ret = -EINVAL;
+                       goto out_free_descs;
+               }
+
                desc = &gdev->descs[offset];
                ret = gpiod_request(desc, lh->label);
                if (ret)
@@ -536,6 +556,10 @@ struct lineevent_state {
        struct mutex read_lock;
 };
 
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+       (GPIOEVENT_REQUEST_RISING_EDGE | \
+       GPIOEVENT_REQUEST_FALLING_EDGE)
+
 static unsigned int lineevent_poll(struct file *filep,
                                   struct poll_table_struct *wait)
 {
@@ -623,6 +647,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
        if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
                int val;
 
+               memset(&ghd, 0, sizeof(ghd));
+
                val = gpiod_get_value_cansleep(le->desc);
                if (val < 0)
                        return val;
@@ -726,6 +752,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        lflags = eventreq.handleflags;
        eflags = eventreq.eventflags;
 
+       if (offset >= gdev->ngpio) {
+               ret = -EINVAL;
+               goto out_free_label;
+       }
+
+       /* Return an error if a unknown flag is set */
+       if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+           (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+               ret = -EINVAL;
+               goto out_free_label;
+       }
+
        /* This is just wrong: we don't look for events on output lines */
        if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
                ret = -EINVAL;
@@ -823,6 +861,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
                struct gpiochip_info chipinfo;
 
+               memset(&chipinfo, 0, sizeof(chipinfo));
+
                strncpy(chipinfo.name, dev_name(&gdev->dev),
                        sizeof(chipinfo.name));
                chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +879,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
                if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
                        return -EFAULT;
-               if (lineinfo.line_offset > gdev->ngpio)
+               if (lineinfo.line_offset >= gdev->ngpio)
                        return -EINVAL;
 
                desc = &gdev->descs[lineinfo.line_offset];
index 2e3a054..e3281d4 100644 (file)
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
        return ret;
 }
 
-static void amdgpu_connector_destroy(struct drm_connector *connector)
+static void amdgpu_connector_unregister(struct drm_connector *connector)
 {
        struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
                drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
                amdgpu_connector->ddc_bus->has_aux = false;
        }
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
        amdgpu_connector_free_edid(connector);
        kfree(amdgpu_connector->con_priv);
        drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = amdgpu_connector_lvds_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .early_unregister = amdgpu_connector_unregister,
        .destroy = amdgpu_connector_destroy,
        .set_property = amdgpu_connector_set_lcd_property,
 };
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = amdgpu_connector_vga_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .early_unregister = amdgpu_connector_unregister,
        .destroy = amdgpu_connector_destroy,
        .set_property = amdgpu_connector_set_property,
 };
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
        .detect = amdgpu_connector_dvi_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = amdgpu_connector_set_property,
+       .early_unregister = amdgpu_connector_unregister,
        .destroy = amdgpu_connector_destroy,
        .force = amdgpu_connector_dvi_force,
 };
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
        .detect = amdgpu_connector_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = amdgpu_connector_set_property,
+       .early_unregister = amdgpu_connector_unregister,
        .destroy = amdgpu_connector_destroy,
        .force = amdgpu_connector_dvi_force,
 };
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
        .detect = amdgpu_connector_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = amdgpu_connector_set_lcd_property,
+       .early_unregister = amdgpu_connector_unregister,
        .destroy = amdgpu_connector_destroy,
        .force = amdgpu_connector_dvi_force,
 };
index e203e55..a5e2fcb 100644 (file)
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
                ctx->rings[i].sequence = 1;
                ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
        }
+
+       ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
        /* create context entity for each ring */
        for (i = 0; i < adev->num_rings; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
index 7dbe85d..b4f4a92 100644 (file)
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_block_status[i].valid)
                        continue;
-               if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
-                       adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
-                       continue;
-               /* enable clockgating to save power */
-               r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-                                                                   AMD_CG_STATE_GATE);
-               if (r) {
-                       DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
-                       return r;
-               }
                if (adev->ip_blocks[i].funcs->late_init) {
                        r = adev->ip_blocks[i].funcs->late_init((void *)adev);
                        if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
                        }
                        adev->ip_block_status[i].late_initialized = true;
                }
+               /* skip CG for VCE/UVD, it's handled specially */
+               if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+                   adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+                       /* enable clockgating to save power */
+                       r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+                                                                           AMD_CG_STATE_GATE);
+                       if (r) {
+                               DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+                                         adev->ip_blocks[i].funcs->name, r);
+                               return r;
+                       }
+               }
        }
 
        return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
 {
        int i, r;
 
+       /* need to disable SMC first */
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].hw)
+                       continue;
+               if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+                       /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+                       r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+                                                                           AMD_CG_STATE_UNGATE);
+                       if (r) {
+                               DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+                                         adev->ip_blocks[i].funcs->name, r);
+                               return r;
+                       }
+                       r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+                       /* XXX handle errors */
+                       if (r) {
+                               DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+                                         adev->ip_blocks[i].funcs->name, r);
+                       }
+                       adev->ip_block_status[i].hw = false;
+                       break;
+               }
+       }
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_block_status[i].hw)
                        continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
                if (!adev->ip_block_status[i].valid)
                        continue;
                if (adev->ip_blocks[i].funcs->check_soft_reset)
-                       adev->ip_blocks[i].funcs->check_soft_reset(adev);
+                       adev->ip_block_status[i].hang =
+                               adev->ip_blocks[i].funcs->check_soft_reset(adev);
                if (adev->ip_block_status[i].hang) {
                        DRM_INFO("IP block:%d is hang!\n", i);
                        asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
 
 static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
 {
-       if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
-           adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
-           adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
-           adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
-               DRM_INFO("Some block need full reset!\n");
-               return true;
+       int i;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+                   (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+                   (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+                   (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+                       if (adev->ip_block_status[i].hang) {
+                               DRM_INFO("Some block need full reset!\n");
+                               return true;
+                       }
+               }
        }
        return false;
 }
index fe36caf..14f57d9 100644 (file)
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
        printk("\n");
 }
 
+
 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 {
        struct drm_device *dev = adev->ddev;
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
-       u32 line_time_us, vblank_lines;
+       u32 vblank_in_pixels;
        u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 
        if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                        amdgpu_crtc = to_amdgpu_crtc(crtc);
                        if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
-                               line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
-                                       amdgpu_crtc->hw_mode.clock;
-                               vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+                               vblank_in_pixels =
+                                       amdgpu_crtc->hw_mode.crtc_htotal *
+                                       (amdgpu_crtc->hw_mode.crtc_vblank_end -
                                        amdgpu_crtc->hw_mode.crtc_vdisplay +
-                                       (amdgpu_crtc->v_border * 2);
-                               vblank_time_us = vblank_lines * line_time_us;
+                                       (amdgpu_crtc->v_border * 2));
+
+                               vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
                                break;
                        }
                }
index aa074fa..f3efb1c 100644 (file)
@@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = {
 
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
+       /* reserve PAT memory space to WC for VRAM */
+       arch_io_reserve_memtype_wc(adev->mc.aper_base,
+                                  adev->mc.aper_size);
+
        /* Add an MTRR for the VRAM */
        adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
                                              adev->mc.aper_size);
@@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
 {
        amdgpu_ttm_fini(adev);
        arch_phys_wc_del(adev->mc.vram_mtrr);
+       arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
 }
 
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
index e1fa873..3cb5e90 100644 (file)
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
        ent = debugfs_create_file(name,
                                  S_IFREG | S_IRUGO, root,
                                  ring, &amdgpu_debugfs_ring_fops);
-       if (IS_ERR(ent))
-               return PTR_ERR(ent);
+       if (!ent)
+               return -ENOMEM;
 
        i_size_write(ent->d_inode, ring->ring_size + 12);
        ring->ent = ent;
index 887483b..dcaf691 100644 (file)
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+       unsigned int flags = 0;
        unsigned pinned = 0;
        int r;
 
+       if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+               flags |= FOLL_WRITE;
+
        if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
                /* check that we only use anonymous memory
                   to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
                list_add(&guptask.list, &gtt->guptasks);
                spin_unlock(&gtt->guptasklock);
 
-               r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+               r = get_user_pages(userptr, num_pages, flags, p, NULL);
 
                spin_lock(&gtt->guptasklock);
                list_del(&guptask.list);
index f80a083..3c082e1 100644 (file)
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
        return 0;
 }
 
-/* borrowed from KV, need future unify */
 static int cz_dpm_get_temperature(struct amdgpu_device *adev)
 {
        int actual_temp = 0;
-       uint32_t temp = RREG32_SMC(0xC0300E0C);
+       uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+       uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
 
-       if (temp)
+       if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
                actual_temp = 1000 * ((temp / 8) - 49);
+       else
+               actual_temp = 1000 * (temp / 8);
 
        return actual_temp;
 }
index 613ebb7..4108c68 100644 (file)
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
        return 0;
 }
 
-static int dce_v10_0_check_soft_reset(void *handle)
+static bool dce_v10_0_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (dce_v10_0_is_display_hung(adev))
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
-       else
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
-
-       return 0;
+       return dce_v10_0_is_display_hung(adev);
 }
 
 static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
        u32 srbm_soft_reset = 0, tmp;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
-               return 0;
-
        if (dce_v10_0_is_display_hung(adev))
                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
 
index 6c6ff57..ee6a48a 100644 (file)
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 {
        int r;
+       u32 tmp;
 
        gfx_v8_0_rlc_stop(adev);
 
        /* disable CG */
-       WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+       tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+       tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+                RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+       WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
        if (adev->asic_type == CHIP_POLARIS11 ||
-           adev->asic_type == CHIP_POLARIS10)
-               WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+           adev->asic_type == CHIP_POLARIS10) {
+               tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+               tmp &= ~0x3;
+               WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+       }
 
        /* disable PG */
        WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
-static int gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
                                                SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
 
        if (grbm_soft_reset || srbm_soft_reset) {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
                adev->gfx.grbm_soft_reset = grbm_soft_reset;
                adev->gfx.srbm_soft_reset = srbm_soft_reset;
+               return true;
        } else {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
                adev->gfx.grbm_soft_reset = 0;
                adev->gfx.srbm_soft_reset = 0;
+               return false;
        }
-
-       return 0;
 }
 
 static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+       if ((!adev->gfx.grbm_soft_reset) &&
+           (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
        u32 tmp;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+       if ((!adev->gfx.grbm_soft_reset) &&
+           (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+       if ((!adev->gfx.grbm_soft_reset) &&
+           (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
index 1b319f5..c22ef14 100644 (file)
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
 
 }
 
-static int gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
 {
        u32 srbm_soft_reset = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
                                                        SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
        }
        if (srbm_soft_reset) {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
                adev->mc.srbm_soft_reset = srbm_soft_reset;
+               return true;
        } else {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
                adev->mc.srbm_soft_reset = 0;
+               return false;
        }
-       return 0;
 }
 
 static int gmc_v8_0_pre_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+       if (!adev->mc.srbm_soft_reset)
                return 0;
 
        gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+       if (!adev->mc.srbm_soft_reset)
                return 0;
        srbm_soft_reset = adev->mc.srbm_soft_reset;
 
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+       if (!adev->mc.srbm_soft_reset)
                return 0;
 
        gmc_v8_0_mc_resume(adev, &adev->mc.save);
index f325fd8..a9d1094 100644 (file)
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
-static int sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
        }
 
        if (srbm_soft_reset) {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
                adev->sdma.srbm_soft_reset = srbm_soft_reset;
+               return true;
        } else {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
                adev->sdma.srbm_soft_reset = 0;
+               return false;
        }
-
-       return 0;
 }
 
 static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset = 0;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+       if (!adev->sdma.srbm_soft_reset)
                return 0;
 
        srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset = 0;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+       if (!adev->sdma.srbm_soft_reset)
                return 0;
 
        srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
        u32 srbm_soft_reset = 0;
        u32 tmp;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+       if (!adev->sdma.srbm_soft_reset)
                return 0;
 
        srbm_soft_reset = adev->sdma.srbm_soft_reset;
index 8bd0892..3de7bca 100644 (file)
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                max_sclk = 75000;
                max_mclk = 80000;
        }
+       /* Limit clocks for some HD8600 parts */
+       if (adev->pdev->device == 0x6660 &&
+           adev->pdev->revision == 0x83) {
+               max_sclk = 75000;
+               max_mclk = 80000;
+       }
 
        if (rps->vce_active) {
                rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
index d127d59..b4ea229 100644 (file)
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
-static int tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
                                                SOFT_RESET_IH, 1);
 
        if (srbm_soft_reset) {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
                adev->irq.srbm_soft_reset = srbm_soft_reset;
+               return true;
        } else {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
                adev->irq.srbm_soft_reset = 0;
+               return false;
        }
-
-       return 0;
 }
 
 static int tonga_ih_pre_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+       if (!adev->irq.srbm_soft_reset)
                return 0;
 
        return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+       if (!adev->irq.srbm_soft_reset)
                return 0;
 
        return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+       if (!adev->irq.srbm_soft_reset)
                return 0;
        srbm_soft_reset = adev->irq.srbm_soft_reset;
 
index e0fd9f2..ab3df6d 100644 (file)
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
 }
 
 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
-static int uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 
        if (srbm_soft_reset) {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
                adev->uvd.srbm_soft_reset = srbm_soft_reset;
+               return true;
        } else {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
                adev->uvd.srbm_soft_reset = 0;
+               return false;
        }
-       return 0;
 }
+
 static int uvd_v6_0_pre_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+       if (!adev->uvd.srbm_soft_reset)
                return 0;
 
        uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+       if (!adev->uvd.srbm_soft_reset)
                return 0;
        srbm_soft_reset = adev->uvd.srbm_soft_reset;
 
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+       if (!adev->uvd.srbm_soft_reset)
                return 0;
 
        mdelay(5);
index 3f6db4e..8533269 100644 (file)
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
 #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
                                      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
 
-static int vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
        WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       mutex_unlock(&adev->grbm_idx_mutex);
 
        if (srbm_soft_reset) {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
                adev->vce.srbm_soft_reset = srbm_soft_reset;
+               return true;
        } else {
-               adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
                adev->vce.srbm_soft_reset = 0;
+               return false;
        }
-       mutex_unlock(&adev->grbm_idx_mutex);
-       return 0;
 }
 
 static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        u32 srbm_soft_reset;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+       if (!adev->vce.srbm_soft_reset)
                return 0;
        srbm_soft_reset = adev->vce.srbm_soft_reset;
 
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+       if (!adev->vce.srbm_soft_reset)
                return 0;
 
        mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+       if (!adev->vce.srbm_soft_reset)
                return 0;
 
        mdelay(5);
index c934b78..bec8125 100644 (file)
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
        /* poll for idle */
        int (*wait_for_idle)(void *handle);
        /* check soft reset the IP block */
-       int (*check_soft_reset)(void *handle);
+       bool (*check_soft_reset)(void *handle);
        /* pre soft reset the IP block */
        int (*pre_soft_reset)(void *handle);
        /* soft reset the IP block */
index 92b1178..8cee4e0 100644 (file)
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
        uninitialize_display_phy_access_tasks,
        disable_gfx_voltage_island_power_gating_tasks,
        disable_gfx_clock_gating_tasks,
+       uninitialize_thermal_controller_tasks,
        set_boot_state_tasks,
        adjust_power_state_tasks,
        disable_dynamic_state_management_tasks,
index 7e4fcbb..9604249 100644 (file)
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
        return 0;
 }
 
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+       int actual_temp = 0;
+       uint32_t val = cgs_read_ind_register(hwmgr->device,
+                                            CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+       uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+       if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+               actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       else
+               actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+       return actual_temp;
+}
+
 static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
 {
        struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
        case AMDGPU_PP_SENSOR_VCE_POWER:
                *value = cz_hwmgr->vce_power_gated ? 0 : 1;
                return 0;
+       case AMDGPU_PP_SENSOR_GPU_TEMP:
+               *value = cz_thermal_get_temperature(hwmgr);
+               return 0;
        default:
                return -EINVAL;
        }
index 508245d..609996c 100644 (file)
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        /* disable SCLK dpm */
-       if (!data->sclk_dpm_key_disabled)
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_DPM_Disable) == 0),
-                               "Failed to disable SCLK DPM!",
-                               return -EINVAL);
+       if (!data->sclk_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                               "Trying to disable SCLK DPM when DPM is disabled",
+                               return 0);
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+       }
 
        /* disable MCLK dpm */
        if (!data->mclk_dpm_key_disabled) {
-               PP_ASSERT_WITH_CODE(
-                               (smum_send_msg_to_smc(hwmgr->smumgr,
-                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
-                               "Failed to disable MCLK DPM!",
-                               return -EINVAL);
+               PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                               "Trying to disable MCLK DPM when DPM is disabled",
+                               return 0);
+               smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
        }
 
        return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
                                return -EINVAL);
        }
 
-       if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
-               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
-               return -EINVAL;
-       }
+       smu7_disable_sclk_mclk_dpm(hwmgr);
+
+       PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+                       "Trying to disable voltage DPM when DPM is disabled",
+                       return 0);
+
+       smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
 
        return 0;
 }
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
 
-       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
 
        tmp_result = smu7_enable_sclk_control(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((tmp_result == 0),
                        "Failed to disable thermal auto throttle!", result = tmp_result);
 
+       if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+               PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+                                       "Failed to disable AVFS!",
+                                       return -EINVAL);
+       }
+
        tmp_result = smu7_stop_dpm(hwmgr);
        PP_ASSERT_WITH_CODE((tmp_result == 0),
                        "Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
 
 
-       if (table_info != NULL)
-               sclk_table = table_info->vdd_dep_on_sclk;
+       if (table_info == NULL)
+               return -EINVAL;
+
+       sclk_table = table_info->vdd_dep_on_sclk;
 
        for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
                vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
 
 int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
 {
-       const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
-       const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+       const struct smu7_power_state *psa;
+       const struct smu7_power_state *psb;
        int i;
 
        if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
                return -EINVAL;
 
+       psa = cast_const_phw_smu7_power_state(pstate1);
+       psb = cast_const_phw_smu7_power_state(pstate2);
        /* If the two states don't even have the same number of performance levels they cannot be the same state. */
        if (psa->performance_level_count != psb->performance_level_count) {
                *equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
        .set_mclk_od = smu7_set_mclk_od,
        .get_clock_by_type = smu7_get_clock_by_type,
        .read_sensor = smu7_read_sensor,
+       .dynamic_state_management_disable = smu7_disable_dpm_tasks,
 };
 
 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
index eda802b..8c889ca 100644 (file)
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
                        PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
                                "Invalid VramInfo table.", return -EINVAL);
 
-                       if (!data->is_memory_gddr5) {
+                       if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
                                table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
                                table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
                                for (k = 0; k < table->num_entries; k++) {
index 2f58e9e..a51f8cb 100644 (file)
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 
-       if (dcrtc->dpms != dpms) {
-               dcrtc->dpms = dpms;
-               if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
-                       WARN_ON(clk_prepare_enable(dcrtc->clk));
-               armada_drm_crtc_update(dcrtc);
-               if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
-                       clk_disable_unprepare(dcrtc->clk);
+       if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
                if (dpms_blanked(dpms))
                        armada_drm_vblank_off(dcrtc);
-               else
+               else if (!IS_ERR(dcrtc->clk))
+                       WARN_ON(clk_prepare_enable(dcrtc->clk));
+               dcrtc->dpms = dpms;
+               armada_drm_crtc_update(dcrtc);
+               if (!dpms_blanked(dpms))
                        drm_crtc_vblank_on(&dcrtc->crtc);
+               else if (!IS_ERR(dcrtc->clk))
+                       clk_disable_unprepare(dcrtc->clk);
+       } else if (dcrtc->dpms != dpms) {
+               dcrtc->dpms = dpms;
        }
 }
 
index 608df4c..0743e65 100644 (file)
@@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast)
                return ret;
        }
 
+       arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+                                  pci_resource_len(dev->pdev, 0));
        ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
                                        pci_resource_len(dev->pdev, 0));
 
@@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast)
 
 void ast_mm_fini(struct ast_private *ast)
 {
+       struct drm_device *dev = ast->dev;
+
        ttm_bo_device_release(&ast->ttm.bdev);
 
        ast_ttm_global_release(ast);
 
        arch_phys_wc_del(ast->fb_mtrr);
+       arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+                               pci_resource_len(dev->pdev, 0));
 }
 
 void ast_ttm_placement(struct ast_bo *bo, int domain)
index bb2438d..5e7e63c 100644 (file)
@@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
                return ret;
        }
 
+       arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+                                  pci_resource_len(dev->pdev, 0));
+
        cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
                                           pci_resource_len(dev->pdev, 0));
 
@@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
 
 void cirrus_mm_fini(struct cirrus_device *cirrus)
 {
+       struct drm_device *dev = cirrus->dev;
+
        if (!cirrus->mm_inited)
                return;
 
@@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
 
        arch_phys_wc_del(cirrus->fb_mtrr);
        cirrus->fb_mtrr = 0;
+       arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+                               pci_resource_len(dev->pdev, 0));
 }
 
 void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
index 1df2d33..ffb2ab3 100644 (file)
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
 
        mutex_lock(&dev->master_mutex);
        master = dev->master;
-       if (!master)
-               goto out_unlock;
-
        seq_printf(m, "%s", dev->driver->name);
        if (dev->dev)
                seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
        if (dev->unique)
                seq_printf(m, " unique=%s", dev->unique);
        seq_printf(m, "\n");
-out_unlock:
        mutex_unlock(&dev->master_mutex);
 
        return 0;
index cb86c7e..d923013 100644 (file)
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
        /*
         * Append a LINK to the submitted command buffer to return to
         * the ring buffer.  return_target is the ring target address.
-        * We need three dwords: event, wait, link.
+        * We need at most 7 dwords in the return target: 2 cache flush +
+        * 2 semaphore stall + 1 event + 1 wait + 1 link.
         */
-       return_dwords = 3;
+       return_dwords = 7;
        return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
        CMD_LINK(cmdbuf, return_dwords, return_target);
 
        /*
-        * Append event, wait and link pointing back to the wait
-        * command to the ring buffer.
+        * Append a cache flush, stall, event, wait and link pointing back to
+        * the wait command to the ring buffer.
         */
+       if (gpu->exec_state == ETNA_PIPE_2D) {
+               CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+                                      VIVS_GL_FLUSH_CACHE_PE2D);
+       } else {
+               CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+                                      VIVS_GL_FLUSH_CACHE_DEPTH |
+                                      VIVS_GL_FLUSH_CACHE_COLOR);
+               CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+                                      VIVS_TS_FLUSH_CACHE_FLUSH);
+       }
+       CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+       CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
        CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
                       VIVS_GL_EVENT_FROM_PE);
        CMD_WAIT(buffer);
-       CMD_LINK(buffer, 2, return_target + 8);
+       CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+                           buffer->user_size - 4);
 
        if (drm_debug & DRM_UT_DRIVER)
                pr_info("stream link to 0x%08x @ 0x%08x %p\n",
index 5ce3603..0370b84 100644 (file)
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
        int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
        struct page **pvec;
        uintptr_t ptr;
+       unsigned int flags = 0;
 
        pvec = drm_malloc_ab(npages, sizeof(struct page *));
        if (!pvec)
                return ERR_PTR(-ENOMEM);
 
+       if (!etnaviv_obj->userptr.ro)
+               flags |= FOLL_WRITE;
+
        pinned = 0;
        ptr = etnaviv_obj->userptr.ptr;
 
        down_read(&mm->mmap_sem);
        while (pinned < npages) {
                ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
-                                           !etnaviv_obj->userptr.ro, 0,
-                                           pvec + pinned, NULL);
+                                           flags, pvec + pinned, NULL);
                if (ret < 0)
                        break;
 
index d3796ed..169ac96 100644 (file)
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
                        return (u32)buf->vram_node.start;
 
                mutex_lock(&mmu->lock);
-               ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+               ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
+                                             buf->size + SZ_64K);
                if (ret < 0) {
                        mutex_unlock(&mmu->lock);
                        return 0;
index aa92dec..fbd13fa 100644 (file)
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
                goto err_free;
        }
 
-       ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+       ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+               g2d_userptr->vec);
        if (ret != npages) {
                DRM_ERROR("failed to get user pages from userptr.\n");
                if (ret < 0)
index 3371635..b2d5e18 100644 (file)
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
                           DCU_MODE_DCU_MODE(DCU_MODE_OFF));
        regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
                     DCU_UPDATE_MODE_READREG);
+       clk_disable_unprepare(fsl_dev->pix_clk);
 }
 
 static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 
+       clk_prepare_enable(fsl_dev->pix_clk);
        regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
                           DCU_MODE_DCU_MODE_MASK,
                           DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
                     DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
                     DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
                     DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
-       regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-                    DCU_UPDATE_MODE_READREG);
        return;
 }
 
index 0884c45..e04efbe 100644 (file)
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
                return ret;
        }
 
-       ret = clk_prepare_enable(fsl_dev->pix_clk);
-       if (ret < 0) {
-               dev_err(dev, "failed to enable pix clk\n");
-               goto disable_dcu_clk;
-       }
-
+       if (fsl_dev->tcon)
+               fsl_tcon_bypass_enable(fsl_dev->tcon);
        fsl_dcu_drm_init_planes(fsl_dev->drm);
        drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
 
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        enable_irq(fsl_dev->irq);
 
        return 0;
-
-disable_dcu_clk:
-       clk_disable_unprepare(fsl_dev->clk);
-       return ret;
 }
 #endif
 
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
                goto disable_clk;
        }
 
-       ret = clk_prepare_enable(fsl_dev->pix_clk);
-       if (ret < 0) {
-               dev_err(dev, "failed to enable pix clk\n");
-               goto unregister_pix_clk;
-       }
-
        fsl_dev->tcon = fsl_tcon_init(dev);
 
        drm = drm_dev_alloc(driver, dev);
        if (IS_ERR(drm)) {
                ret = PTR_ERR(drm);
-               goto disable_pix_clk;
+               goto unregister_pix_clk;
        }
 
        fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
 
 unref:
        drm_dev_unref(drm);
-disable_pix_clk:
-       clk_disable_unprepare(fsl_dev->pix_clk);
 unregister_pix_clk:
        clk_unregister(fsl_dev->pix_clk);
 disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
        struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
 
        clk_disable_unprepare(fsl_dev->clk);
-       clk_disable_unprepare(fsl_dev->pix_clk);
        clk_unregister(fsl_dev->pix_clk);
        drm_put_dev(fsl_dev->drm);
 
index a7e5486..9e6f7d8 100644 (file)
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
                for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
                        regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
        }
-       regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-                          DCU_MODE_DCU_MODE_MASK,
-                          DCU_MODE_DCU_MODE(DCU_MODE_OFF));
-       regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-                    DCU_UPDATE_MODE_READREG);
 }
 
 struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
index 26edcc8..e1dd75b 100644 (file)
 #include "fsl_dcu_drm_drv.h"
 #include "fsl_tcon.h"
 
-static int
-fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
-                                struct drm_crtc_state *crtc_state,
-                                struct drm_connector_state *conn_state)
-{
-       return 0;
-}
-
-static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-       struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
-       if (fsl_dev->tcon)
-               fsl_tcon_bypass_disable(fsl_dev->tcon);
-}
-
-static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-       struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
-       if (fsl_dev->tcon)
-               fsl_tcon_bypass_enable(fsl_dev->tcon);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-       .atomic_check = fsl_dcu_drm_encoder_atomic_check,
-       .disable = fsl_dcu_drm_encoder_disable,
-       .enable = fsl_dcu_drm_encoder_enable,
-};
-
 static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
 {
        drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
        int ret;
 
        encoder->possible_crtcs = 1;
+
+       /* Use bypass mode for parallel RGB/LVDS encoder */
+       if (fsl_dev->tcon)
+               fsl_tcon_bypass_enable(fsl_dev->tcon);
+
        ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
                               DRM_MODE_ENCODER_LVDS, NULL);
        if (ret < 0)
                return ret;
 
-       drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
        return 0;
 }
 
index e537930..c6f780f 100644 (file)
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
+               unsigned int flags = 0;
+
+               if (!obj->userptr.read_only)
+                       flags |= FOLL_WRITE;
 
                ret = -EFAULT;
                if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                                        (work->task, mm,
                                         obj->userptr.ptr + pinned * PAGE_SIZE,
                                         npages - pinned,
-                                        !obj->userptr.read_only, 0,
+                                        flags,
                                         pvec + pinned, NULL);
                                if (ret < 0)
                                        break;
index 919b35f..dcf7d11 100644 (file)
@@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev)
                return ret;
        }
 
+       arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+                                  pci_resource_len(dev->pdev, 0));
+
        mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
                                         pci_resource_len(dev->pdev, 0));
 
@@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev)
 
 void mgag200_mm_fini(struct mga_device *mdev)
 {
+       struct drm_device *dev = mdev->dev;
+
        ttm_bo_device_release(&mdev->ttm.bdev);
 
        mgag200_ttm_global_release(mdev);
 
+       arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+                               pci_resource_len(dev->pdev, 0));
        arch_phys_wc_del(mdev->fb_mtrr);
        mdev->fb_mtrr = 0;
 }
index 1825dbc..a6dbe82 100644 (file)
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
        /* VRAM init */
        drm->gem.vram_available = drm->device.info.ram_user;
 
+       arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+                                  device->func->resource_size(device, 1));
+
        ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
                              drm->gem.vram_available >> PAGE_SHIFT);
        if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 void
 nouveau_ttm_fini(struct nouveau_drm *drm)
 {
+       struct nvkm_device *device = nvxx_device(&drm->device);
+
        ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
 
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
 
        arch_phys_wc_del(drm->ttm.mtrr);
        drm->ttm.mtrr = 0;
+       arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+                               device->func->resource_size(device, 1));
+
 }
index 6a4b020..5a26eb4 100644 (file)
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
        struct drm_device *dev = rdev->ddev;
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
-       u32 line_time_us, vblank_lines;
+       u32 vblank_in_pixels;
        u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 
        if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                        radeon_crtc = to_radeon_crtc(crtc);
                        if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                               line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
-                                       radeon_crtc->hw_mode.clock;
-                               vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
-                                       radeon_crtc->hw_mode.crtc_vdisplay +
-                                       (radeon_crtc->v_border * 2);
-                               vblank_time_us = vblank_lines * line_time_us;
+                               vblank_in_pixels =
+                                       radeon_crtc->hw_mode.crtc_htotal *
+                                       (radeon_crtc->hw_mode.crtc_vblank_end -
+                                        radeon_crtc->hw_mode.crtc_vdisplay +
+                                        (radeon_crtc->v_border * 2));
+
+                               vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
                                break;
                        }
                }
index 50e96d2..e18839d 100644 (file)
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
        return ret;
 }
 
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+       if (radeon_connector->ddc_bus->has_aux) {
+               drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+               radeon_connector->ddc_bus->has_aux = false;
+       }
+}
+
 static void radeon_connector_destroy(struct drm_connector *connector)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = radeon_lvds_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .set_property = radeon_lvds_set_property,
 };
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = radeon_vga_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .set_property = radeon_connector_set_property,
 };
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = radeon_tv_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .set_property = radeon_connector_set_property,
 };
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
        .detect = radeon_dvi_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = radeon_connector_set_property,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .force = radeon_dvi_force,
 };
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
        .detect = radeon_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = radeon_connector_set_property,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .force = radeon_dvi_force,
 };
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
        .detect = radeon_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = radeon_lvds_set_property,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .force = radeon_dvi_force,
 };
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
        .detect = radeon_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = radeon_lvds_set_property,
+       .early_unregister = radeon_connector_unregister,
        .destroy = radeon_connector_destroy,
        .force = radeon_dvi_force,
 };
index b8ab30a..cdb8cb5 100644 (file)
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
 
 void radeon_modeset_fini(struct radeon_device *rdev)
 {
-       radeon_fbdev_fini(rdev);
-       kfree(rdev->mode_info.bios_hardcoded_edid);
-
-       /* free i2c buses */
-       radeon_i2c_fini(rdev);
-
        if (rdev->mode_info.mode_config_initialized) {
-               radeon_afmt_fini(rdev);
                drm_kms_helper_poll_fini(rdev->ddev);
                radeon_hpd_fini(rdev);
                drm_crtc_force_disable_all(rdev->ddev);
+               radeon_fbdev_fini(rdev);
+               radeon_afmt_fini(rdev);
                drm_mode_config_cleanup(rdev->ddev);
                rdev->mode_info.mode_config_initialized = false;
        }
+
+       kfree(rdev->mode_info.bios_hardcoded_edid);
+
+       /* free i2c buses */
+       radeon_i2c_fini(rdev);
 }
 
 static bool is_hdtv_mode(const struct drm_display_mode *mode)
index 91c8f43..00ea000 100644 (file)
  *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
+ *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       47
+#define KMS_DRIVER_MINOR       48
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 021aa00..29f7817 100644 (file)
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
 {
        if (!i2c)
                return;
+       WARN_ON(i2c->has_aux);
        i2c_del_adapter(&i2c->adapter);
-       if (i2c->has_aux)
-               drm_dp_aux_unregister(&i2c->aux);
        kfree(i2c);
 }
 
index be30861..41b72ce 100644 (file)
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
 
 int radeon_bo_init(struct radeon_device *rdev)
 {
+       /* reserve PAT memory space to WC for VRAM */
+       arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+                                  rdev->mc.aper_size);
+
        /* Add an MTRR for the VRAM */
        if (!rdev->fastfb_working) {
                rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
 {
        radeon_ttm_fini(rdev);
        arch_phys_wc_del(rdev->mc.vram_mtrr);
+       arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
 }
 
 /* Returns how many bytes TTM can move per IB.
index 4552682..3de5e6e 100644 (file)
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
                uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
                struct page **pages = ttm->pages + pinned;
 
-               r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+               r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+                                  pages, NULL);
                if (r < 0)
                        goto release_pages;
 
index 7ee9aaf..e402be8 100644 (file)
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
        case SPI_CONFIG_CNTL:
        case SPI_CONFIG_CNTL_1:
        case TA_CNTL_AUX:
+       case TA_CS_BC_BASE_ADDR:
                return true;
        default:
                DRM_ERROR("Invalid register 0x%x in CS\n", reg);
index eb220ee..65a911d 100644 (file)
 #define        SPI_LB_CU_MASK                                  0x9354
 
 #define        TA_CNTL_AUX                                     0x9508
+#define        TA_CS_BC_BASE_ADDR                              0x950C
 
 #define CC_RB_BACKEND_DISABLE                          0x98F4
 #define                BACKEND_DISABLE(x)                      ((x) << 16)
index 7e2a12c..1a3ad76 100644 (file)
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
        down_read(&current->mm->mmap_sem);
        ret = get_user_pages((unsigned long)xfer->mem_addr,
                             vsg->num_pages,
-                            (vsg->direction == DMA_FROM_DEVICE),
-                            0, vsg->pages, NULL);
+                            (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+                            vsg->pages, NULL);
 
        up_read(&current->mm->mmap_sem);
        if (ret != vsg->num_pages) {
index e8ae3dc..18061a4 100644 (file)
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                              void *ptr);
 
 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
-module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 
index 070d750..1e59a48 100644 (file)
@@ -43,7 +43,7 @@
 
 #define VMWGFX_DRIVER_DATE "20160210"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 10
+#define VMWGFX_DRIVER_MINOR 11
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
index dc5beff..c7b53d9 100644 (file)
 
 #define VMW_RES_HT_ORDER 12
 
+/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+       vmw_res_rel_normal,
+       vmw_res_rel_nop,
+       vmw_res_rel_cond_nop,
+       vmw_res_rel_max
+};
+
 /**
  * struct vmw_resource_relocation - Relocation info for resources
  *
  * @head: List head for the software context's relocation list.
  * @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
+ * @offset: Offset of single byte entries into the command buffer where the
  * id that needs fixup is located.
+ * @rel_type: Type of relocation.
  */
 struct vmw_resource_relocation {
        struct list_head head;
        const struct vmw_resource *res;
-       unsigned long offset;
+       u32 offset:29;
+       enum vmw_resource_relocation_type rel_type:3;
 };
 
 /**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                                   struct vmw_dma_buffer *vbo,
                                   bool validate_as_mob,
                                   uint32_t *p_val_node);
-
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+       return (unsigned long) b - (unsigned long) a;
+}
 
 /**
  * vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
  * @list: Pointer to head of relocation list.
  * @res: The resource.
  * @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
+ * id that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
  */
 static int vmw_resource_relocation_add(struct list_head *list,
                                       const struct vmw_resource *res,
-                                      unsigned long offset)
+                                      unsigned long offset,
+                                      enum vmw_resource_relocation_type
+                                      rel_type)
 {
        struct vmw_resource_relocation *rel;
 
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
 
        rel->res = res;
        rel->offset = offset;
+       rel->rel_type = rel_type;
        list_add_tail(&rel->head, list);
 
        return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
 {
        struct vmw_resource_relocation *rel;
 
+       /* Validate the struct vmw_resource_relocation member size */
+       BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+       BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
        list_for_each_entry(rel, list, head) {
-               if (likely(rel->res != NULL))
-                       cb[rel->offset] = rel->res->id;
-               else
-                       cb[rel->offset] = SVGA_3D_CMD_NOP;
+               u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+               switch (rel->rel_type) {
+               case vmw_res_rel_normal:
+                       *addr = rel->res->id;
+                       break;
+               case vmw_res_rel_nop:
+                       *addr = SVGA_3D_CMD_NOP;
+                       break;
+               default:
+                       if (rel->res->id == -1)
+                               *addr = SVGA_3D_CMD_NOP;
+                       break;
+               }
        }
 }
 
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
        *p_val = NULL;
        ret = vmw_resource_relocation_add(&sw_context->res_relocations,
                                          res,
-                                         id_loc - sw_context->buf_start);
+                                         vmw_ptr_diff(sw_context->buf_start,
+                                                      id_loc),
+                                         vmw_res_rel_normal);
        if (unlikely(ret != 0))
                return ret;
 
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
 
                return vmw_resource_relocation_add
                        (&sw_context->res_relocations, res,
-                        id_loc - sw_context->buf_start);
+                        vmw_ptr_diff(sw_context->buf_start, id_loc),
+                        vmw_res_rel_normal);
        }
 
        ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
                return ret;
 
        return vmw_resource_relocation_add(&sw_context->res_relocations,
-                                          NULL, &cmd->header.id -
-                                          sw_context->buf_start);
-
-       return 0;
+                                          NULL,
+                                          vmw_ptr_diff(sw_context->buf_start,
+                                                       &cmd->header.id),
+                                          vmw_res_rel_nop);
 }
 
 /**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
                return ret;
 
        return vmw_resource_relocation_add(&sw_context->res_relocations,
-                                          NULL, &cmd->header.id -
-                                          sw_context->buf_start);
-
-       return 0;
+                                          NULL,
+                                          vmw_ptr_diff(sw_context->buf_start,
+                                                       &cmd->header.id),
+                                          vmw_res_rel_nop);
 }
 
 /**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
  * @header: Pointer to the command header in the command stream.
  *
  * Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
  */
 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
                                  struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
                return ret;
 
        /*
-        * Add view to the validate list iff it was not created using this
-        * command batch.
+        * If the view wasn't created during this command batch, it might
+        * have been removed due to a context swapout, so add a
+        * relocation to conditionally make this command a NOP to avoid
+        * device errors.
         */
-       return vmw_view_res_val_add(sw_context, view);
+       return vmw_resource_relocation_add(&sw_context->res_relocations,
+                                          view,
+                                          vmw_ptr_diff(sw_context->buf_start,
+                                                       &cmd->header.id),
+                                          vmw_res_rel_cond_nop);
 }
 
 /**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
                                   cmd->body.shaderResourceViewId);
 }
 
+/**
+ * vmw_cmd_dx_transfer_from_buffer -
+ * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+                                          struct vmw_sw_context *sw_context,
+                                          SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDXTransferFromBuffer body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+       int ret;
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.srcSid, NULL);
+       if (ret != 0)
+               return ret;
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.destSid, NULL);
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
                    &vmw_cmd_buffer_copy_check, true, false, true),
        VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
                    &vmw_cmd_pred_copy_check, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+                   &vmw_cmd_dx_transfer_from_buffer,
+                   true, false, true),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
        int ret;
 
        *header = NULL;
-       if (!dev_priv->cman || kernel_commands)
-               return kernel_commands;
-
        if (command_size > SVGA_CB_MAX_SIZE) {
                DRM_ERROR("Command buffer is too large.\n");
                return ERR_PTR(-EINVAL);
        }
 
+       if (!dev_priv->cman || kernel_commands)
+               return kernel_commands;
+
        /* If possible, add a little space for fencing. */
        cmdbuf_size = command_size + 512;
        cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
        vmw_dmabuf_unreference(&dev_priv->pinned_bo);
-       DRM_INFO("Dummy query bo pin count: %d\n",
-                dev_priv->dummy_query_bo->pin_count);
-
 out_unlock:
        return;
 
index 6a328d5..52ca1c9 100644 (file)
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
                bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
                long lret;
 
-               if (nonblock)
-                       return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
-
-               lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+               lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
+                                       nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
                if (!lret)
                        return -EBUSY;
                else if (lret < 0)
index c2a721a..b445ce9 100644 (file)
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
        if (res->id != -1) {
 
                cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
-               if (unlikely(cmd == NULL)) {
+               if (unlikely(!cmd)) {
                        DRM_ERROR("Failed reserving FIFO space for surface "
                                  "destruction.\n");
                        return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
 
        submit_size = vmw_surface_define_size(srf);
        cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
+       if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "creation.\n");
                ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
        uint8_t *cmd;
        struct vmw_private *dev_priv = res->dev_priv;
 
-       BUG_ON(val_buf->bo == NULL);
-
+       BUG_ON(!val_buf->bo);
        submit_size = vmw_surface_dma_size(srf);
        cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
+       if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "DMA.\n");
                return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
 
        submit_size = vmw_surface_destroy_size();
        cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
+       if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "eviction.\n");
                return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
        int ret;
        struct vmw_resource *res = &srf->res;
 
-       BUG_ON(res_free == NULL);
+       BUG_ON(!res_free);
        if (!dev_priv->has_mob)
                vmw_fifo_resource_inc(dev_priv);
        ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        struct drm_vmw_surface_create_req *req = &arg->req;
        struct drm_vmw_surface_arg *rep = &arg->rep;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct drm_vmw_size __user *user_sizes;
        int ret;
        int i, j;
        uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        }
 
        user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-       if (unlikely(user_srf == NULL)) {
+       if (unlikely(!user_srf)) {
                ret = -ENOMEM;
                goto out_no_user_srf;
        }
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
        srf->num_sizes = num_sizes;
        user_srf->size = size;
-
-       srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
-       if (unlikely(srf->sizes == NULL)) {
-               ret = -ENOMEM;
+       srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+                                req->size_addr,
+                                sizeof(*srf->sizes) * srf->num_sizes);
+       if (IS_ERR(srf->sizes)) {
+               ret = PTR_ERR(srf->sizes);
                goto out_no_sizes;
        }
-       srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
-                              GFP_KERNEL);
-       if (unlikely(srf->offsets == NULL)) {
+       srf->offsets = kmalloc_array(srf->num_sizes,
+                                    sizeof(*srf->offsets),
+                                    GFP_KERNEL);
+       if (unlikely(!srf->offsets)) {
                ret = -ENOMEM;
                goto out_no_offsets;
        }
 
-       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-           req->size_addr;
-
-       ret = copy_from_user(srf->sizes, user_sizes,
-                            srf->num_sizes * sizeof(*srf->sizes));
-       if (unlikely(ret != 0)) {
-               ret = -EFAULT;
-               goto out_no_copy;
-       }
-
        srf->base_size = *srf->sizes;
        srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
        srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 
        ret = -EINVAL;
        base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
-       if (unlikely(base == NULL)) {
+       if (unlikely(!base)) {
                DRM_ERROR("Could not find surface to reference.\n");
                goto out_no_lookup;
        }
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 
        cmd = vmw_fifo_reserve(dev_priv, submit_len);
        cmd2 = (typeof(cmd2))cmd;
-       if (unlikely(cmd == NULL)) {
+       if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "creation.\n");
                ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
        submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
 
        cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd1 == NULL)) {
+       if (unlikely(!cmd1)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "binding.\n");
                return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 
        submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
        cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
+       if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "unbinding.\n");
                return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
        vmw_binding_res_list_scrub(&res->binding_head);
 
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
+       if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "destruction.\n");
                mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
 
        user_srf = container_of(base, struct vmw_user_surface, prime.base);
        srf = &user_srf->srf;
-       if (srf->res.backup == NULL) {
+       if (!srf->res.backup) {
                DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
                goto out_bad_resource;
        }
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        }
 
        user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-       if (unlikely(user_srf == NULL)) {
+       if (unlikely(!user_srf)) {
                ret = -ENOMEM;
                goto out_no_user_srf;
        }
index 8fd4bf7..818ea7d 100644 (file)
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
        0xC0                /*  End Collection                  */
 };
 
-static __u8 pid0006_rdesc_fixed[] = {
-       0x05, 0x01,        /* Usage Page (Generic Desktop)      */
-       0x09, 0x04,        /* Usage (Joystick)                  */
-       0xA1, 0x01,        /* Collection (Application)          */
-       0xA1, 0x02,        /*   Collection (Logical)            */
-       0x75, 0x08,        /*     Report Size (8)               */
-       0x95, 0x05,        /*     Report Count (5)              */
-       0x15, 0x00,        /*     Logical Minimum (0)           */
-       0x26, 0xFF, 0x00,  /*     Logical Maximum (255)         */
-       0x35, 0x00,        /*     Physical Minimum (0)          */
-       0x46, 0xFF, 0x00,  /*     Physical Maximum (255)        */
-       0x09, 0x30,        /*     Usage (X)                     */
-       0x09, 0x33,        /*     Usage (Ry)                    */
-       0x09, 0x32,        /*     Usage (Z)                     */
-       0x09, 0x31,        /*     Usage (Y)                     */
-       0x09, 0x34,        /*     Usage (Ry)                    */
-       0x81, 0x02,        /*     Input (Variable)              */
-       0x75, 0x04,        /*     Report Size (4)               */
-       0x95, 0x01,        /*     Report Count (1)              */
-       0x25, 0x07,        /*     Logical Maximum (7)           */
-       0x46, 0x3B, 0x01,  /*     Physical Maximum (315)        */
-       0x65, 0x14,        /*     Unit (Centimeter)             */
-       0x09, 0x39,        /*     Usage (Hat switch)            */
-       0x81, 0x42,        /*     Input (Variable)              */
-       0x65, 0x00,        /*     Unit (None)                   */
-       0x75, 0x01,        /*     Report Size (1)               */
-       0x95, 0x0C,        /*     Report Count (12)             */
-       0x25, 0x01,        /*     Logical Maximum (1)           */
-       0x45, 0x01,        /*     Physical Maximum (1)          */
-       0x05, 0x09,        /*     Usage Page (Button)           */
-       0x19, 0x01,        /*     Usage Minimum (0x01)          */
-       0x29, 0x0C,        /*     Usage Maximum (0x0C)          */
-       0x81, 0x02,        /*     Input (Variable)              */
-       0x06, 0x00, 0xFF,  /*     Usage Page (Vendor Defined)   */
-       0x75, 0x01,        /*     Report Size (1)               */
-       0x95, 0x08,        /*     Report Count (8)              */
-       0x25, 0x01,        /*     Logical Maximum (1)           */
-       0x45, 0x01,        /*     Physical Maximum (1)          */
-       0x09, 0x01,        /*     Usage (0x01)                  */
-       0x81, 0x02,        /*     Input (Variable)              */
-       0xC0,              /*   End Collection                  */
-       0xA1, 0x02,        /*   Collection (Logical)            */
-       0x75, 0x08,        /*     Report Size (8)               */
-       0x95, 0x07,        /*     Report Count (7)              */
-       0x46, 0xFF, 0x00,  /*     Physical Maximum (255)        */
-       0x26, 0xFF, 0x00,  /*     Logical Maximum (255)         */
-       0x09, 0x02,        /*     Usage (0x02)                  */
-       0x91, 0x02,        /*     Output (Variable)             */
-       0xC0,              /*   End Collection                  */
-       0xC0               /* End Collection                    */
-};
-
 static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                                unsigned int *rsize)
 {
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                        *rsize = sizeof(pid0011_rdesc_fixed);
                }
                break;
-       case 0x0006:
-               if (*rsize == sizeof(pid0006_rdesc_fixed)) {
-                       rdesc = pid0006_rdesc_fixed;
-                       *rsize = sizeof(pid0006_rdesc_fixed);
-               }
-               break;
        }
        return rdesc;
 }
 
+#define map_abs(c)      hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c)      hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+                           struct hid_field *field, struct hid_usage *usage,
+                           unsigned long **bit, int *max)
+{
+       switch (usage->hid) {
+       /*
+        * revert to the old hid-input behavior where axes
+        * can be randomly assigned when hid->usage is reused.
+        */
+       case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+       case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+               if (field->flags & HID_MAIN_ITEM_RELATIVE)
+                       map_rel(usage->hid & 0xf);
+               else
+                       map_abs(usage->hid & 0xf);
+               return 1;
+       }
+
+       return 0;
+}
+
 static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
        .id_table = dr_devices,
        .report_fixup = dr_report_fixup,
        .probe = dr_probe,
+       .input_mapping = dr_input_mapping,
 };
 module_hid_driver(dr_driver);
 
index cd59c79..6cfb5ca 100644 (file)
@@ -64,6 +64,9 @@
 #define USB_VENDOR_ID_AKAI             0x2011
 #define USB_DEVICE_ID_AKAI_MPKMINI2    0x0715
 
+#define USB_VENDOR_ID_AKAI_09E8                0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX        0x0031
+
 #define USB_VENDOR_ID_ALCOR            0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232   0x9720
 
index d8d55f3..d3e1ab1 100644 (file)
@@ -100,6 +100,7 @@ struct hidled_device {
        const struct hidled_config *config;
        struct hid_device       *hdev;
        struct hidled_rgb       *rgb;
+       u8                      *buf;
        struct mutex            lock;
 };
 
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
 
        mutex_lock(&ldev->lock);
 
+       /*
+        * buffer provided to hid_hw_raw_request must not be on the stack
+        * and must not be part of a data structure
+        */
+       memcpy(ldev->buf, buf, ldev->config->report_size);
+
        if (ldev->config->report_type == RAW_REQUEST)
-               ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+               ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
                                         ldev->config->report_size,
                                         HID_FEATURE_REPORT,
                                         HID_REQ_SET_REPORT);
        else if (ldev->config->report_type == OUTPUT_REPORT)
-               ret = hid_hw_output_report(ldev->hdev, buf,
+               ret = hid_hw_output_report(ldev->hdev, ldev->buf,
                                           ldev->config->report_size);
        else
                ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
 
        mutex_lock(&ldev->lock);
 
-       ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+       memcpy(ldev->buf, buf, ldev->config->report_size);
+
+       ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
                                 ldev->config->report_size,
                                 HID_FEATURE_REPORT,
                                 HID_REQ_SET_REPORT);
        if (ret < 0)
                goto err;
 
-       ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+       ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
                                 ldev->config->report_size,
                                 HID_FEATURE_REPORT,
                                 HID_REQ_GET_REPORT);
+
+       memcpy(buf, ldev->buf, ldev->config->report_size);
 err:
        mutex_unlock(&ldev->lock);
 
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (!ldev)
                return -ENOMEM;
 
+       ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+       if (!ldev->buf)
+               return -ENOMEM;
+
        ret = hid_parse(hdev);
        if (ret)
                return ret;
index 0a0eca5..354d49e 100644 (file)
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
index 4aa3cb6..bcd0630 100644 (file)
@@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context)
        u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
        struct icmsg_negotiate *negop = NULL;
 
-       vmbus_recvpacket(channel, hbeat_txf_buf,
-                        PAGE_SIZE, &recvlen, &requestid);
+       while (1) {
+
+               vmbus_recvpacket(channel, hbeat_txf_buf,
+                                PAGE_SIZE, &recvlen, &requestid);
+
+               if (!recvlen)
+                       break;
 
-       if (recvlen > 0) {
                icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
                                sizeof(struct vmbuspipe_hdr)];
 
index 98114ce..2fe1828 100644 (file)
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
                 * 0.5'C per two measurement cycles thus ignore possible
                 * but unlikely aliasing error on lsb reading. --Grant
                 */
-               data->temp = ((i2c_smbus_read_byte_data(client,
+               data->temp = (i2c_smbus_read_byte_data(client,
                                        ADM9240_REG_TEMP) << 8) |
                                        i2c_smbus_read_byte_data(client,
-                                       ADM9240_REG_TEMP_CONF)) / 128;
+                                       ADM9240_REG_TEMP_CONF);
 
                for (i = 0; i < 2; i++) { /* read fans */
                        data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
                char *buf)
 {
        struct adm9240_data *data = adm9240_update_device(dev);
-       return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */
+       return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
 }
 
 static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
index bef84e0..c1b9275 100644 (file)
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
                             long *val)
 {
        struct max31790_data *data = max31790_update_device(dev);
-       u8 fan_config = data->fan_config[channel];
+       u8 fan_config;
 
        if (IS_ERR(data))
                return PTR_ERR(data);
 
+       fan_config = data->fan_config[channel];
+
        switch (attr) {
        case hwmon_pwm_input:
                *val = data->pwm[channel] >> 8;
index 6d94e2e..d252276 100644 (file)
@@ -79,12 +79,12 @@ config I2C_AMD8111
 
 config I2C_HIX5HD2
        tristate "Hix5hd2 high-speed I2C driver"
-       depends on ARCH_HIX5HD2 || COMPILE_TEST
+       depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
        help
-         Say Y here to include support for high-speed I2C controller in the
-         Hisilicon based hix5hd2 SoCs.
+         Say Y here to include support for the high-speed I2C controller
+         used in HiSilicon hix5hd2 SoCs.
 
-         This driver can also be built as a module.  If so, the module
+         This driver can also be built as a module. If so, the module
          will be called i2c-hix5hd2.
 
 config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
 
 config I2C_IMX
        tristate "IMX I2C interface"
-       depends on ARCH_MXC || ARCH_LAYERSCAPE
+       depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
        help
          Say Y here if you want to use the IIC bus controller on
-         the Freescale i.MX/MXC or Layerscape processors.
+         the Freescale i.MX/MXC, Layerscape or ColdFire processors.
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-imx.
index 1fe93c4..11e866d 100644 (file)
@@ -95,6 +95,9 @@
 #define DW_IC_STATUS_TFE               BIT(2)
 #define DW_IC_STATUS_MST_ACTIVITY      BIT(5)
 
+#define DW_IC_SDA_HOLD_RX_SHIFT                16
+#define DW_IC_SDA_HOLD_RX_MASK         GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
+
 #define DW_IC_ERR_TX_ABRT      0x1
 
 #define DW_IC_TAR_10BITADDR_MASTER BIT(12)
@@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        /* Configure SDA Hold Time if required */
        reg = dw_readl(dev, DW_IC_COMP_VERSION);
        if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
-               if (dev->sda_hold_time) {
-                       dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
-               } else {
+               if (!dev->sda_hold_time) {
                        /* Keep previous hold time setting if no one set it */
                        dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
                }
+               /*
+                * Workaround for avoiding TX arbitration lost in case I2C
+                * slave pulls SDA down "too quickly" after falling egde of
+                * SCL by enabling non-zero SDA RX hold. Specification says it
+                * extends incoming SDA low to high transition while SCL is
+                * high but it apprears to help also above issue.
+                */
+               if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+                       dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+               dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
        } else {
                dev_warn(dev->dev,
                        "Hardware too old to adjust SDA hold time.\n");
index 9604024..49f2084 100644 (file)
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
        { .compatible = "cnxt,cx92755-i2c" },
        { },
 };
+MODULE_DEVICE_TABLE(of, dc_i2c_match);
 
 static struct platform_driver dc_i2c_driver = {
        .probe   = dc_i2c_probe,
index 08847e8..eb3627f 100644 (file)
 #define SMBHSTCFG_HST_EN       1
 #define SMBHSTCFG_SMB_SMI_EN   2
 #define SMBHSTCFG_I2C_EN       4
+#define SMBHSTCFG_SPD_WD       0x10
 
 /* TCO configuration bits for TCOCTL */
 #define TCOCTL_EN              0x0100
@@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
                block = 1;
                break;
        case I2C_SMBUS_I2C_BLOCK_DATA:
-               /* NB: page 240 of ICH5 datasheet shows that the R/#W
-                * bit should be cleared here, even when reading */
-               outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+               /*
+                * NB: page 240 of ICH5 datasheet shows that the R/#W
+                * bit should be cleared here, even when reading.
+                * However if SPD Write Disable is set (Lynx Point and later),
+                * the read will fail if we don't set the R/#W bit.
+                */
+               outb_p(((addr & 0x7f) << 1) |
+                      ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
+                       (read_write & 0x01) : 0),
+                      SMBHSTADD(priv));
                if (read_write == I2C_SMBUS_READ) {
                        /* NB: page 240 of ICH5 datasheet also shows
                         * that DATA1 is the cmd field when reading */
@@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
                /* Disable SMBus interrupt feature if SMBus using SMI# */
                priv->features &= ~FEATURE_IRQ;
        }
+       if (temp & SMBHSTCFG_SPD_WD)
+               dev_info(&dev->dev, "SPD Write Disable is set\n");
 
        /* Clear special mode bits */
        if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
index 592a8f2..47fc1f1 100644 (file)
@@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
        rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
 
-       if (!gpio_is_valid(rinfo->sda_gpio) ||
-           !gpio_is_valid(rinfo->scl_gpio) ||
-           IS_ERR(i2c_imx->pinctrl_pins_default) ||
-           IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
+       if (rinfo->sda_gpio == -EPROBE_DEFER ||
+           rinfo->scl_gpio == -EPROBE_DEFER) {
+               return -EPROBE_DEFER;
+       } else if (!gpio_is_valid(rinfo->sda_gpio) ||
+                  !gpio_is_valid(rinfo->scl_gpio) ||
+                  IS_ERR(i2c_imx->pinctrl_pins_default) ||
+                  IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
                dev_dbg(&pdev->dev, "recovery information incomplete\n");
                return 0;
        }
index b8ea621..30132c3 100644 (file)
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
        { .compatible = "ingenic,jz4780-i2c", },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
 
 static int jz4780_i2c_probe(struct platform_device *pdev)
 {
index 50702c7..df22066 100644 (file)
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
        t_calc->div_low--;
        t_calc->div_high--;
 
+       /* Give the tuning value 0, that would not update con register */
+       t_calc->tuning = 0;
        /* Maximum divider supported by hw is 0xffff */
        if (t_calc->div_low > 0xffff) {
                t_calc->div_low = 0xffff;
index 263685c..05cf192 100644 (file)
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
        struct mbox_chan *mbox_chan;
        struct mbox_client mbox_client;
        struct completion rd_complete;
-       u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+       u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
        u32 *resp_msg;
 };
 
index 2a972ed..e29ff37 100644 (file)
@@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
        { .compatible = "netlogic,xlp980-i2c", },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
index 0968f59..ad17d88 100644 (file)
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
        },
        { }
 };
+MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
 
 static int xlr_i2c_probe(struct platform_device *pdev)
 {
index 5ab6721..1704fc8 100644 (file)
@@ -1681,6 +1681,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
 static void of_i2c_register_devices(struct i2c_adapter *adap)
 {
        struct device_node *bus, *node;
+       struct i2c_client *client;
 
        /* Only register child devices if the adapter has a node pointer set */
        if (!adap->dev.of_node)
@@ -1695,7 +1696,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
        for_each_available_child_of_node(bus, node) {
                if (of_node_test_and_set_flag(node, OF_POPULATED))
                        continue;
-               of_i2c_register_device(adap, node);
+
+               client = of_i2c_register_device(adap, node);
+               if (IS_ERR(client)) {
+                       dev_warn(&adap->dev,
+                                "Failed to create I2C device for %s\n",
+                                node->full_name);
+                       of_node_clear_flag(node, OF_POPULATED);
+               }
        }
 
        of_node_put(bus);
@@ -2299,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
                if (IS_ERR(client)) {
                        dev_err(&adap->dev, "failed to create client for '%s'\n",
                                 rd->dn->full_name);
+                       of_node_clear_flag(rd->dn, OF_POPULATED);
                        return notifier_from_errno(PTR_ERR(client));
                }
                break;
index 7edcf32..99c0514 100644 (file)
@@ -437,6 +437,8 @@ config STX104
 config TI_ADC081C
        tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
        depends on I2C
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          If you say yes here you get support for Texas Instruments ADC081C,
          ADC101C and ADC121C ADC chips.
index bd321b3..ef761a5 100644 (file)
@@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
        struct device *dev = &data->client->dev;
        int ret;
        unsigned int val;
+       __be16  rval;
 
-       ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+       ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
        if (ret)
                return ret;
 
-       dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
-                                                be16_to_cpu(val) % 100);
+       val = be16_to_cpu(rval);
+       dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
 
        ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
        if (ret)
index 39dd202..066161a 100644 (file)
@@ -123,22 +123,24 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
 {
        unsigned int storage_bytes = data->chip->read_size;
        unsigned int shift = chan->scan_type.shift + (chan->address * 8);
-       unsigned int buf;
+       __be16 buf16;
+       __be32 buf32;
        int ret;
 
-       ret = spi_read(data->spi, (void *) &buf, storage_bytes);
-       if (ret)
-               return ret;
-
        switch (storage_bytes) {
        case 2:
-               *val = be16_to_cpu(buf);
+               ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+               *val = be16_to_cpu(buf16);
                break;
        case 4:
-               *val = be32_to_cpu(buf);
+               ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+               *val = be32_to_cpu(buf32);
                break;
        }
 
+       if (ret)
+               return ret;
+
        /* check to be sure this is a valid reading */
        if (*val & data->chip->status_bit)
                return -EINVAL;
index c68746c..224ad27 100644 (file)
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        unsigned long dma_attrs = 0;
        struct scatterlist *sg, *sg_list_start;
        int need_release = 0;
+       unsigned int gup_flags = FOLL_WRITE;
 
        if (dmasync)
                dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        if (ret)
                goto out;
 
+       if (!umem->writable)
+               gup_flags |= FOLL_FORCE;
+
        need_release = 1;
        sg_list_start = umem->sg_head.sgl;
 
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                ret = get_user_pages(cur_base,
                                     min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
-                                    1, !umem->writable, page_list, vma_list);
+                                    gup_flags, page_list, vma_list);
 
                if (ret < 0)
                        goto out;
index 75077a0..1f0fe32 100644 (file)
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
        u64 off;
        int j, k, ret = 0, start_idx, npages = 0;
        u64 base_virt_addr;
+       unsigned int flags = 0;
 
        if (access_mask == 0)
                return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
                goto out_put_task;
        }
 
+       if (access_mask & ODP_WRITE_ALLOWED_BIT)
+               flags |= FOLL_WRITE;
+
        start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
        k = start_idx;
 
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
                 */
                npages = get_user_pages_remote(owning_process, owning_mm,
                                user_virt, gup_num_pages,
-                               access_mask & ODP_WRITE_ALLOWED_BIT,
-                               0, local_page_list, NULL);
+                               flags, local_page_list, NULL);
                up_read(&owning_mm->mmap_sem);
 
                if (npages < 0)
index 6c00d04..c6fe89d 100644 (file)
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
                goto out;
        }
 
-       ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+       ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
        if (ret < 0)
                goto out;
 
index 2d2b94f..75f0862 100644 (file)
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
        for (got = 0; got < num_pages; got += ret) {
                ret = get_user_pages(start_page + got * PAGE_SIZE,
-                                    num_pages - got, 1, 1,
+                                    num_pages - got,
+                                    FOLL_WRITE | FOLL_FORCE,
                                     p + got, NULL);
                if (ret < 0)
                        goto bail_release;
index a0b6ebe..1ccee6e 100644 (file)
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
        int i;
        int flags;
        dma_addr_t pa;
+       unsigned int gup_flags;
 
        if (!can_do_mlock())
                return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
 
        flags = IOMMU_READ | IOMMU_CACHE;
        flags |= (writable) ? IOMMU_WRITE : 0;
+       gup_flags = FOLL_WRITE;
+       gup_flags |= (writable) ? 0 : FOLL_FORCE;
        cur_base = addr & PAGE_MASK;
        ret = 0;
 
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
                ret = get_user_pages(cur_base,
                                        min_t(unsigned long, npages,
                                        PAGE_SIZE / sizeof(struct page *)),
-                                       1, !writable, page_list, NULL);
+                                       gup_flags, page_list, NULL);
 
                if (ret < 0)
                        goto out;
index c0e7b62..1210244 100644 (file)
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
                       idev->id_vendor, idev->id_device);
 }
 
-ipack_device_attr(id_format, "0x%hhu\n");
+ipack_device_attr(id_format, "0x%hhx\n");
 
 static DEVICE_ATTR_RO(id);
 static DEVICE_ATTR_RO(id_device);
index 82b0b5d..bc0af33 100644 (file)
@@ -158,8 +158,8 @@ config PIC32_EVIC
        select IRQ_DOMAIN
 
 config JCORE_AIC
-       bool "J-Core integrated AIC"
-       depends on OF && (SUPERH || COMPILE_TEST)
+       bool "J-Core integrated AIC" if COMPILE_TEST
+       depends on OF
        select IRQ_DOMAIN
        help
          Support for the J-Core integrated AIC.
index efbf0e4..2a7a388 100644 (file)
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
        nps_ack_gic();
 }
 
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
 {
        unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
 
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
        .name           = "NPS400 IC",
        .irq_mask       = nps400_irq_mask,
        .irq_unmask     = nps400_irq_unmask,
-       .irq_eoi        = nps400_irq_eoi,
+       .irq_ack        = nps400_irq_ack,
 };
 
 static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
@@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = {
 static int __init nps400_of_init(struct device_node *node,
                                 struct device_node *parent)
 {
-       static struct irq_domain *nps400_root_domain;
+       struct irq_domain *nps400_root_domain;
 
        if (parent) {
                pr_err("DeviceTree incore ic not a root irq controller\n");
index 003495d..c5dee30 100644 (file)
@@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its)
 
 static int its_alloc_tables(struct its_node *its)
 {
-       u64 typer = readq_relaxed(its->base + GITS_TYPER);
+       u64 typer = gic_read_typer(its->base + GITS_TYPER);
        u32 ids = GITS_TYPER_DEVBITS(typer);
        u64 shr = GITS_BASER_InnerShareable;
        u64 cache = GITS_BASER_WaWb;
@@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void)
                 * We now have to bind each collection to its target
                 * redistributor.
                 */
-               if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+               if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
                        /*
                         * This ITS wants the physical address of the
                         * redistributor.
@@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void)
                        /*
                         * This ITS wants a linear CPU number.
                         */
-                       target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+                       target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
                        target = GICR_TYPER_CPU_NUMBER(target) << 16;
                }
 
@@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res,
        INIT_LIST_HEAD(&its->its_device_list);
        its->base = its_base;
        its->phys_base = res->start;
-       its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+       its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
        its->numa_node = numa_node;
 
        its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1763,7 +1763,7 @@ out_unmap:
 
 static bool gic_rdists_supports_plpis(void)
 {
-       return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+       return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
 }
 
 int its_cpu_init(void)
index 9b81bd8..19d642e 100644 (file)
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
                        return; /* No PM support in this redistributor */
        }
 
-       while (count--) {
+       while (--count) {
                val = readl_relaxed(rbase + GICR_WAKER);
                if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
                        break;
index 58e5b4e..d6c404b 100644 (file)
@@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
                 */
                *base += 0xf000;
                cpuif_res.start += 0xf000;
-               pr_warn("GIC: Adjusting CPU interface base to %pa",
+               pr_warn("GIC: Adjusting CPU interface base to %pa\n",
                        &cpuif_res.start);
        }
 
index 84b01de..033bccb 100644 (file)
 
 static struct irq_chip jcore_aic;
 
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+       if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+               handle_percpu_irq(desc);
+       else
+               handle_simple_irq(desc);
+}
+
 static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
                                   irq_hw_number_t hwirq)
 {
        struct irq_chip *aic = d->host_data;
 
-       irq_set_chip_and_handler(irq, aic, handle_simple_irq);
+       irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
 
        return 0;
 }
index 8abde6b..6d53810 100644 (file)
@@ -266,7 +266,7 @@ static struct raid_type {
        {"raid10_offset", "raid10 offset (striped mirrors)",        0, 2, 10, ALGORITHM_RAID10_OFFSET},
        {"raid10_near",   "raid10 near (striped mirrors)",          0, 2, 10, ALGORITHM_RAID10_NEAR},
        {"raid10",        "raid10 (striped mirrors)",               0, 2, 10, ALGORITHM_RAID10_DEFAULT},
-       {"raid4",         "raid4 (dedicated last parity disk)",     1, 2, 4,  ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+       {"raid4",         "raid4 (dedicated first parity disk)",    1, 2, 5,  ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
        {"raid5_n",       "raid5 (dedicated last parity disk)",     1, 2, 5,  ALGORITHM_PARITY_N},
        {"raid5_ls",      "raid5 (left symmetric)",                 1, 2, 5,  ALGORITHM_LEFT_SYMMETRIC},
        {"raid5_rs",      "raid5 (right symmetric)",                1, 2, 5,  ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
                /*
                 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
                 */
-               if (le32_to_cpu(sb->level) != mddev->level) {
+               if (le32_to_cpu(sb->level) != mddev->new_level) {
                        DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
                        return -EINVAL;
                }
-               if (le32_to_cpu(sb->layout) != mddev->layout) {
+               if (le32_to_cpu(sb->layout) != mddev->new_layout) {
                        DMERR("Reshaping raid sets not yet supported. (raid layout change)");
                        DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
                        DMERR("  Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
                              raid10_md_layout_to_copies(mddev->layout));
                        return -EINVAL;
                }
-               if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+               if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
                        DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
                        return -EINVAL;
                }
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
                        return -EINVAL;
                }
 
+               DMINFO("Discovered old metadata format; upgrading to extended metadata format");
+
                /* Table line is checked vs. authoritative superblock */
                rs_set_new(rs);
        }
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
        if (!mddev->events && super_init_validation(rs, rdev))
                return -EINVAL;
 
-       if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+       if (le32_to_cpu(sb->compat_features) &&
+           le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
                rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
                return -EINVAL;
        }
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 9, 0},
+       .version = {1, 9, 1},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index bdf1606..9a8b710 100644 (file)
@@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
 
 struct dm_raid1_bio_record {
        struct mirror *m;
-       /* if details->bi_bdev == NULL, details were not saved */
        struct dm_bio_details details;
        region_t write_region;
 };
@@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
        struct dm_raid1_bio_record *bio_record =
          dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
 
-       bio_record->details.bi_bdev = NULL;
-
        if (rw == WRITE) {
                /* Save region for mirror_end_io() handler */
                bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
        }
 
        if (error == -EOPNOTSUPP)
-               goto out;
+               return error;
 
        if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               goto out;
+               return error;
 
        if (unlikely(error)) {
-               if (!bio_record->details.bi_bdev) {
-                       /*
-                        * There wasn't enough memory to record necessary
-                        * information for a retry or there was no other
-                        * mirror in-sync.
-                        */
-                       DMERR_LIMIT("Mirror read failed.");
-                       return -EIO;
-               }
-
                m = bio_record->m;
 
                DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                        bd = &bio_record->details;
 
                        dm_bio_restore(bd, bio);
-                       bio_record->details.bi_bdev = NULL;
+                       bio->bi_error = 0;
 
                        queue_bio(ms, bio, rw);
                        return DM_ENDIO_INCOMPLETE;
@@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                DMERR("All replicated volumes dead, failing I/O");
        }
 
-out:
-       bio_record->details.bi_bdev = NULL;
-
        return error;
 }
 
index dc75bea..1d0d2ad 100644 (file)
@@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
        kthread_init_worker(&md->kworker);
        md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
                                       "kdmwork-%s", dm_device_name(md));
-       if (IS_ERR(md->kworker_task))
-               return PTR_ERR(md->kworker_task);
+       if (IS_ERR(md->kworker_task)) {
+               int error = PTR_ERR(md->kworker_task);
+               md->kworker_task = NULL;
+               return error;
+       }
 
        elv_register_queue(md->queue);
 
index 3e407a9..c4b53b3 100644 (file)
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
 
        tgt->type = dm_get_target_type(type);
        if (!tgt->type) {
-               DMERR("%s: %s: unknown target type", dm_device_name(t->md),
-                     type);
+               DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
                return -EINVAL;
        }
 
        if (dm_target_needs_singleton(tgt->type)) {
                if (t->num_targets) {
-                       DMERR("%s: target type %s must appear alone in table",
-                             dm_device_name(t->md), type);
-                       return -EINVAL;
+                       tgt->error = "singleton target type must appear alone in table";
+                       goto bad;
                }
                t->singleton = true;
        }
 
        if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
-               DMERR("%s: target type %s may not be included in read-only tables",
-                     dm_device_name(t->md), type);
-               return -EINVAL;
+               tgt->error = "target type may not be included in a read-only table";
+               goto bad;
        }
 
        if (t->immutable_target_type) {
                if (t->immutable_target_type != tgt->type) {
-                       DMERR("%s: immutable target type %s cannot be mixed with other target types",
-                             dm_device_name(t->md), t->immutable_target_type->name);
-                       return -EINVAL;
+                       tgt->error = "immutable target type cannot be mixed with other target types";
+                       goto bad;
                }
        } else if (dm_target_is_immutable(tgt->type)) {
                if (t->num_targets) {
-                       DMERR("%s: immutable target type %s cannot be mixed with other target types",
-                             dm_device_name(t->md), tgt->type->name);
-                       return -EINVAL;
+                       tgt->error = "immutable target type cannot be mixed with other target types";
+                       goto bad;
                }
                t->immutable_target_type = tgt->type;
        }
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
         */
        if (!adjoin(t, tgt)) {
                tgt->error = "Gap in table";
-               r = -EINVAL;
                goto bad;
        }
 
index 147af95..ef7bf1d 100644 (file)
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->bs)
                bioset_free(md->bs);
 
-       cleanup_srcu_struct(&md->io_barrier);
-
        if (md->disk) {
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->queue)
                blk_cleanup_queue(md->queue);
 
+       cleanup_srcu_struct(&md->io_barrier);
+
        if (md->bdev) {
                bdput(md->bdev);
                md->bdev = NULL;
index 4769469..2c9232e 100644 (file)
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
        }
 
        /* Get user pages for DMA Xfer */
-       err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
-                       1, dma->map);
+       err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+                       dma->map, FOLL_FORCE);
 
        if (user_dma.page_count != err) {
                IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
index b094054..f7299d3 100644 (file)
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
 
        /* Get user pages for DMA Xfer */
        y_pages = get_user_pages_unlocked(y_dma.uaddr,
-                       y_dma.page_count, 0, 1, &dma->map[0]);
+                       y_dma.page_count, &dma->map[0], FOLL_FORCE);
        uv_pages = 0; /* silence gcc. value is set and consumed only if: */
        if (y_pages == y_dma.page_count) {
                uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
-                               uv_dma.page_count, 0, 1, &dma->map[y_pages]);
+                               uv_dma.page_count, &dma->map[y_pages],
+                               FOLL_FORCE);
        }
 
        if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
index e668dde..a31b95c 100644 (file)
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
        if (!vec)
                return -ENOMEM;
 
-       ret = get_vaddr_frames(virtp, 1, true, false, vec);
+       ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
        if (ret != 1) {
                frame_vector_destroy(vec);
                return -EINVAL;
index f300f06..1db0af6 100644 (file)
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
 {
        unsigned long first, last;
        int err, rw = 0;
+       unsigned int flags = FOLL_FORCE;
 
        dma->direction = direction;
        switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
        if (NULL == dma->pages)
                return -ENOMEM;
 
+       if (rw == READ)
+               flags |= FOLL_WRITE;
+
        dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
                data, size, dma->nr_pages);
 
        err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
-                            rw == READ, 1, /* force */
-                            dma->pages, NULL);
+                            flags, dma->pages, NULL);
 
        if (err != dma->nr_pages) {
                dma->nr_pages = (err >= 0) ? err : 0;
index 3c3b517..1cd322e 100644 (file)
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
        unsigned long first, last;
        unsigned long nr;
        struct frame_vector *vec;
+       unsigned int flags = FOLL_FORCE;
+
+       if (write)
+               flags |= FOLL_WRITE;
 
        first = start >> PAGE_SHIFT;
        last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
        vec = frame_vector_create(nr);
        if (!vec)
                return ERR_PTR(-ENOMEM);
-       ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+       ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
        if (ret < 0)
                goto out_destroy;
        /* We accept only complete set of PFNs */
index d34bc35..2e3cf01 100644 (file)
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
        int rc;
 
        if (!host->req) {
+               pm_runtime_get_sync(ms_dev(host));
                do {
                        rc = memstick_next_req(msh, &host->req);
                        dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
                                                host->req->error);
                        }
                } while (!rc);
+               pm_runtime_put(ms_dev(host));
        }
 
 }
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
        dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
                        __func__, param, value);
 
+       pm_runtime_get_sync(ms_dev(host));
        mutex_lock(&ucr->dev_mutex);
 
        err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
        }
 out:
        mutex_unlock(&ucr->dev_mutex);
+       pm_runtime_put(ms_dev(host));
 
        /* power-on delay */
        if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
        int err;
 
        for (;;) {
+               pm_runtime_get_sync(ms_dev(host));
                mutex_lock(&ucr->dev_mutex);
 
                /* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
                }
 
 poll_again:
+               pm_runtime_put(ms_dev(host));
                if (host->eject)
                        break;
 
index f3d34b9..2e5233b 100644 (file)
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
        if (ctx->status == STARTED)
                goto out; /* already started */
 
+       /*
+        * Increment the mapped context count for adapter. This also checks
+        * if adapter_context_lock is taken.
+        */
+       rc = cxl_adapter_context_get(ctx->afu->adapter);
+       if (rc)
+               goto out;
+
        if (task) {
                ctx->pid = get_task_pid(task, PIDTYPE_PID);
                ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -239,7 +247,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
        cxl_ctx_get();
 
        if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+               put_pid(ctx->glpid);
                put_pid(ctx->pid);
+               ctx->glpid = ctx->pid = NULL;
+               cxl_adapter_context_put(ctx->afu->adapter);
                cxl_ctx_put();
                goto out;
        }
index c466ee2..5e506c1 100644 (file)
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
        put_pid(ctx->glpid);
 
        cxl_ctx_put();
+
+       /* Decrease the attached context count on the adapter */
+       cxl_adapter_context_put(ctx->afu->adapter);
        return 0;
 }
 
index 01d372a..a144073 100644 (file)
@@ -618,6 +618,14 @@ struct cxl {
        bool perst_select_user;
        bool perst_same_image;
        bool psl_timebase_synced;
+
+       /*
+        * number of contexts mapped on to this card. Possible values are:
+        * >0: Number of contexts mapped and new one can be mapped.
+        *  0: No active contexts and new ones can be mapped.
+        * -1: No contexts mapped and new ones cannot be mapped.
+        */
+       atomic_t contexts_num;
 };
 
 int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
 
 /* decode AFU error bits in the PSL register PSL_SERR_An */
 void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
+
+/*
+ * Increments the number of attached contexts on an adapter.
+ * In case an adapter_context_lock is taken the return -EBUSY.
+ */
+int cxl_adapter_context_get(struct cxl *adapter);
+
+/* Decrements the number of attached contexts on an adapter */
+void cxl_adapter_context_put(struct cxl *adapter);
+
+/* If no active contexts then prevents contexts from being attached */
+int cxl_adapter_context_lock(struct cxl *adapter);
+
+/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
+void cxl_adapter_context_unlock(struct cxl *adapter);
+
 #endif
index 5fb9894..77080cc 100644 (file)
@@ -193,6 +193,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
 
+       /*
+        * Increment the mapped context count for adapter. This also checks
+        * if adapter_context_lock is taken.
+        */
+       rc = cxl_adapter_context_get(ctx->afu->adapter);
+       if (rc) {
+               afu_release_irqs(ctx, ctx);
+               goto out;
+       }
+
        /*
         * We grab the PID here and not in the file open to allow for the case
         * where a process (master, some daemon, etc) has opened the chardev on
@@ -205,11 +215,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
        ctx->pid = get_task_pid(current, PIDTYPE_PID);
        ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
 
+
        trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
 
        if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
                                                        amr))) {
                afu_release_irqs(ctx, ctx);
+               cxl_adapter_context_put(ctx->afu->adapter);
+               put_pid(ctx->glpid);
+               put_pid(ctx->pid);
+               ctx->glpid = ctx->pid = NULL;
                goto out;
        }
 
index 9aa58a7..3e102cd 100644 (file)
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
        if ((rc = cxl_sysfs_adapter_add(adapter)))
                goto err_put1;
 
+       /* release the context lock as the adapter is configured */
+       cxl_adapter_context_unlock(adapter);
+
        return adapter;
 
 err_put1:
index d9be23b..62e0dfb 100644 (file)
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
        if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
                goto err2;
 
-       return adapter;
+       /* start with context lock taken */
+       atomic_set(&adapter->contexts_num, -1);
 
+       return adapter;
 err2:
        cxl_remove_adapter_nr(adapter);
 err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
        return 0;
 }
 
+int cxl_adapter_context_get(struct cxl *adapter)
+{
+       int rc;
+
+       rc = atomic_inc_unless_negative(&adapter->contexts_num);
+       return rc >= 0 ? 0 : -EBUSY;
+}
+
+void cxl_adapter_context_put(struct cxl *adapter)
+{
+       atomic_dec_if_positive(&adapter->contexts_num);
+}
+
+int cxl_adapter_context_lock(struct cxl *adapter)
+{
+       int rc;
+       /* no active contexts -> contexts_num == 0 */
+       rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
+       return rc ? -EBUSY : 0;
+}
+
+void cxl_adapter_context_unlock(struct cxl *adapter)
+{
+       int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
+
+       /*
+        * contexts lock taken -> contexts_num == -1
+        * If not true then show a warning and force reset the lock.
+        * This will happen when context_unlock was requested without
+        * doing a context_lock.
+        */
+       if (val != -1) {
+               atomic_set(&adapter->contexts_num, 0);
+               WARN(1, "Adapter context unlocked with %d active contexts",
+                    val);
+       }
+}
+
 static int __init init_cxl(void)
 {
        int rc = 0;
index 7afad84..e96be9c 100644 (file)
@@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
        if ((rc = cxl_native_register_psl_err_irq(adapter)))
                goto err;
 
+       /* Release the context lock as adapter is configured */
+       cxl_adapter_context_unlock(adapter);
        return 0;
 
 err:
index b043c20..a8b6d6a 100644 (file)
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
        int val;
 
        rc = sscanf(buf, "%i", &val);
-       if ((rc != 1) || (val != 1))
+       if ((rc != 1) || (val != 1 && val != -1))
                return -EINVAL;
 
-       if ((rc = cxl_ops->adapter_reset(adapter)))
-               return rc;
-       return count;
+       /*
+        * See if we can lock the context mapping that's only allowed
+        * when there are no contexts attached to the adapter. Once
+        * taken this will also prevent any context from getting activated.
+        */
+       if (val == 1) {
+               rc =  cxl_adapter_context_lock(adapter);
+               if (rc)
+                       goto out;
+
+               rc = cxl_ops->adapter_reset(adapter);
+               /* In case reset failed release context lock */
+               if (rc)
+                       cxl_adapter_context_unlock(adapter);
+
+       } else if (val == -1) {
+               /* Perform a forced adapter reset */
+               rc = cxl_ops->adapter_reset(adapter);
+       }
+
+out:
+       return rc ? rc : count;
 }
 
 static ssize_t load_image_on_perst_show(struct device *device,
index 8a679ec..fc2794b 100644 (file)
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
                if (copy_from_user(sgl->lpage, user_addr + user_size -
                                   sgl->lpage_size, sgl->lpage_size)) {
                        rc = -EFAULT;
-                       goto err_out1;
+                       goto err_out2;
                }
        }
        return 0;
 
+ err_out2:
+       __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+                                sgl->lpage_dma_addr);
+       sgl->lpage = NULL;
+       sgl->lpage_dma_addr = 0;
  err_out1:
        __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
                                 sgl->fpage_dma_addr);
+       sgl->fpage = NULL;
+       sgl->fpage_dma_addr = 0;
  err_out:
        __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
                                 sgl->sgl_dma_addr);
+       sgl->sgl = NULL;
+       sgl->sgl_dma_addr = 0;
+       sgl->sgl_size = 0;
        return -ENOMEM;
 }
 
index e6e5e55..60415a2 100644 (file)
@@ -981,11 +981,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
        hisr = mei_txe_br_reg_read(hw, HISR_REG);
 
        aliveness = mei_txe_aliveness_get(dev);
-       if (hhisr & IPC_HHIER_SEC && aliveness)
+       if (hhisr & IPC_HHIER_SEC && aliveness) {
                ipc_isr = mei_txe_sec_reg_read_silent(hw,
                                SEC_IPC_HOST_INT_STATUS_REG);
-       else
+       } else {
                ipc_isr = 0;
+               hhisr &= ~IPC_HHIER_SEC;
+       }
 
        generated = generated ||
                (hisr & HISR_INT_STS_MSK) ||
index e0203b1..f806a44 100644 (file)
@@ -1396,8 +1396,7 @@ retry:
                pinned_pages->nr_pages = get_user_pages(
                                (u64)addr,
                                nr_pages,
-                               !!(prot & SCIF_PROT_WRITE),
-                               0,
+                               (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
                                pinned_pages->pages,
                                NULL);
                up_write(&mm->mmap_sem);
index a2d97b9..6fb773d 100644 (file)
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
 #else
        *pageshift = PAGE_SHIFT;
 #endif
-       if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
+       if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
                return -EFAULT;
        *paddr = page_to_phys(page);
        put_page(page);
index 1525870..33741ad 100644 (file)
@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
        spin_lock(&gru->gs_asid_lock);
        BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
        asids->mt_ctxbitmap ^= ctxbitmap;
-       gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+       gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
                gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
        spin_unlock(&gru->gs_asid_lock);
        spin_unlock(&gms->ms_asid_lock);
index a8cee33..b3fa738 100644 (file)
@@ -431,6 +431,12 @@ int vmci_doorbell_create(struct vmci_handle *handle,
        if (vmci_handle_is_invalid(*handle)) {
                u32 context_id = vmci_get_context_id();
 
+               if (context_id == VMCI_INVALID_ID) {
+                       pr_warn("Failed to get context ID\n");
+                       result = VMCI_ERROR_NO_RESOURCES;
+                       goto free_mem;
+               }
+
                /* Let resource code allocate a free ID for us */
                new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
        } else {
@@ -525,7 +531,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle)
 
        entry = container_of(resource, struct dbell_entry, resource);
 
-       if (vmci_guest_code_active()) {
+       if (!hlist_unhashed(&entry->node)) {
                int result;
 
                dbell_index_table_remove(entry);
index 896be15..d7eaf1e 100644 (file)
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.4.0-k");
+MODULE_VERSION("1.1.5.0-k");
 MODULE_LICENSE("GPL v2");
index c333511..709a872 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/uaccess.h>
 
 #include "queue.h"
+#include "block.h"
 
 MODULE_ALIAS("mmc:block");
 #ifdef MODULE_PARAM_PREFIX
@@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
        struct mmc_blk_data *md = mq->data;
        struct mmc_packed *packed = mqrq->packed;
        bool do_rel_wr, do_data_tag;
-       u32 *packed_cmd_hdr;
+       __le32 *packed_cmd_hdr;
        u8 hdr_blocks;
        u8 i = 1;
 
index 3c15a75..342f1e3 100644 (file)
@@ -31,7 +31,7 @@ enum mmc_packed_type {
 
 struct mmc_packed {
        struct list_head        list;
-       u32                     cmd_hdr[1024];
+       __le32                  cmd_hdr[1024];
        unsigned int            blocks;
        u8                      nr_entries;
        u8                      retries;
index 3486bc7..39fc5b2 100644 (file)
@@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
                goto out_err;
        }
 
+       if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+               err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+       if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+               err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+       /* If fails try again during next card power cycle */
+       if (err)
+               goto out_err;
+
        err = mmc_select_bus_width(card);
        if (err < 0)
                goto out_err;
@@ -1272,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
        if (err)
                goto out_err;
 
+       mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
        err = mmc_switch_status(card);
        if (err)
                goto out_err;
index 4106295..6e9c0f8 100644 (file)
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        dev_dbg(sdmmc_dev(host), "%s\n", __func__);
        mutex_lock(&ucr->dev_mutex);
 
-       if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
-               mutex_unlock(&ucr->dev_mutex);
-               return;
-       }
-
        sd_set_power_mode(host, ios->power_mode);
        sd_set_bus_width(host, ios->bus_width);
        sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
                container_of(work, struct rtsx_usb_sdmmc, led_work);
        struct rtsx_ucr *ucr = host->ucr;
 
+       pm_runtime_get_sync(sdmmc_dev(host));
        mutex_lock(&ucr->dev_mutex);
 
        if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
                rtsx_usb_turn_on_led(ucr);
 
        mutex_unlock(&ucr->dev_mutex);
+       pm_runtime_put(sdmmc_dev(host));
 }
 #endif
 
index 1f54fd8..7123ef9 100644 (file)
@@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
        struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
        u32 data;
 
-       if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+       if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
+                       reg == SDHCI_INT_STATUS)) {
                if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
                        /*
                         * Clear and then set D3CD bit to avoid missing the
@@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
        esdhc_clrset_le(host, 0xffff, val, reg);
 }
 
+static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
+{
+       u8 ret;
+       u32 val;
+
+       switch (reg) {
+       case SDHCI_HOST_CONTROL:
+               val = readl(host->ioaddr + reg);
+
+               ret = val & SDHCI_CTRL_LED;
+               ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
+               ret |= (val & ESDHC_CTRL_4BITBUS);
+               ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
+               return ret;
+       }
+
+       return readb(host->ioaddr + reg);
+}
+
 static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 static struct sdhci_ops sdhci_esdhc_ops = {
        .read_l = esdhc_readl_le,
        .read_w = esdhc_readw_le,
+       .read_b = esdhc_readb_le,
        .write_l = esdhc_writel_le,
        .write_w = esdhc_writew_le,
        .write_b = esdhc_writeb_le,
index da8e40a..410a55b 100644 (file)
@@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
        writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
 }
 
-void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
 {
        u8 ctrl;
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
        }
 }
 
+static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+                                      struct mmc_ios *ios)
+{
+       switch (ios->signal_voltage) {
+       case MMC_SIGNAL_VOLTAGE_180:
+               /*
+                * Plese don't switch to 1V8 as arasan,5.1 doesn't
+                * actually refer to this setting to indicate the
+                * signal voltage and the state machine will be broken
+                * actually if we force to enable 1V8. That's something
+                * like broken quirk but we could work around here.
+                */
+               return 0;
+       case MMC_SIGNAL_VOLTAGE_330:
+       case MMC_SIGNAL_VOLTAGE_120:
+               /* We don't support 3V3 and 1V2 */
+               break;
+       }
+
+       return -EINVAL;
+}
+
 static struct sdhci_ops sdhci_arasan_ops = {
        .set_clock = sdhci_arasan_set_clock,
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 
                host->mmc_host_ops.hs400_enhanced_strobe =
                                        sdhci_arasan_hs400_enhanced_strobe;
+               host->mmc_host_ops.start_signal_voltage_switch =
+                                       sdhci_arasan_voltage_switch;
        }
 
        ret = sdhci_add_host(host);
index 72a1f1f..1d9e00a 100644 (file)
 #include "sdhci-pci.h"
 #include "sdhci-pci-o2micro.h"
 
+static int sdhci_pci_enable_dma(struct sdhci_host *host);
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
+static void sdhci_pci_hw_reset(struct sdhci_host *host);
+static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
+                                          struct mmc_card *card,
+                                          unsigned int max_dtr, int host_drv,
+                                          int card_drv, int *drv_type);
+
 /*****************************************************************************\
  *                                                                           *
  * Hardware specific quirk handling                                          *
@@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
        return 0;
 }
 
+#define SDHCI_INTEL_PWR_TIMEOUT_CNT    20
+#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
+
+static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+                                 unsigned short vdd)
+{
+       int cntr;
+       u8 reg;
+
+       sdhci_set_power(host, mode, vdd);
+
+       if (mode == MMC_POWER_OFF)
+               return;
+
+       /*
+        * Bus power might not enable after D3 -> D0 transition due to the
+        * present state not yet having propagated. Retry for up to 2ms.
+        */
+       for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
+               reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+               if (reg & SDHCI_POWER_ON)
+                       break;
+               udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
+               reg |= SDHCI_POWER_ON;
+               sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+       }
+}
+
+static const struct sdhci_ops sdhci_intel_byt_ops = {
+       .set_clock              = sdhci_set_clock,
+       .set_power              = sdhci_intel_set_power,
+       .enable_dma             = sdhci_pci_enable_dma,
+       .set_bus_width          = sdhci_pci_set_bus_width,
+       .reset                  = sdhci_reset,
+       .set_uhs_signaling      = sdhci_set_uhs_signaling,
+       .hw_reset               = sdhci_pci_hw_reset,
+       .select_drive_strength  = sdhci_pci_select_drive_strength,
+};
+
 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
        .allow_runtime_pm = true,
        .probe_slot     = byt_emmc_probe_slot,
@@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
        .quirks2        = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
                          SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
                          SDHCI_QUIRK2_STOP_WITH_TC,
+       .ops            = &sdhci_intel_byt_ops,
 };
 
 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
                        SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
        .allow_runtime_pm = true,
        .probe_slot     = byt_sdio_probe_slot,
+       .ops            = &sdhci_intel_byt_ops,
 };
 
 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
        .allow_runtime_pm = true,
        .own_cd_for_runtime_pm = true,
        .probe_slot     = byt_sd_probe_slot,
+       .ops            = &sdhci_intel_byt_ops,
 };
 
 /* Define Host controllers for Intel Merrifield platform */
@@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
        }
 
        host->hw_name = "PCI";
-       host->ops = &sdhci_pci_ops;
+       host->ops = chip->fixes && chip->fixes->ops ?
+                   chip->fixes->ops :
+                   &sdhci_pci_ops;
        host->quirks = chip->quirks;
        host->quirks2 = chip->quirks2;
 
index 9c7c08b..6bccf56 100644 (file)
@@ -65,6 +65,8 @@ struct sdhci_pci_fixes {
 
        int                     (*suspend) (struct sdhci_pci_chip *);
        int                     (*resume) (struct sdhci_pci_chip *);
+
+       const struct sdhci_ops  *ops;
 };
 
 struct sdhci_pci_slot {
index dd1938d..d0f5c05 100644 (file)
@@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
        struct mmc_host *mmc = host->mmc;
        u8 pwr = host->pwr;
 
-       sdhci_set_power(host, mode, vdd);
+       sdhci_set_power_noreg(host, mode, vdd);
 
        if (host->pwr == pwr)
                return;
index 4805566..71654b9 100644 (file)
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
                         * host->clock is in Hz.  target_timeout is in us.
                         * Hence, us = 1000000 * cycles / Hz.  Round up.
                         */
-                       val = 1000000 * data->timeout_clks;
+                       val = 1000000ULL * data->timeout_clks;
                        if (do_div(val, host->clock))
                                target_timeout++;
                        target_timeout += val;
@@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
        /* Initially, a command has no error */
        cmd->error = 0;
 
+       if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+           cmd->opcode == MMC_STOP_TRANSMISSION)
+               cmd->flags |= MMC_RSP_BUSY;
+
        /* Wait max 10 ms */
        timeout = 10;
 
@@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
                sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
 }
 
-void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-                    unsigned short vdd)
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+                          unsigned short vdd)
 {
        u8 pwr = 0;
 
@@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                        mdelay(10);
        }
 }
-EXPORT_SYMBOL_GPL(sdhci_set_power);
+EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
 
-static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-                             unsigned short vdd)
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+                    unsigned short vdd)
 {
-       struct mmc_host *mmc = host->mmc;
-
-       if (host->ops->set_power)
-               host->ops->set_power(host, mode, vdd);
-       else if (!IS_ERR(mmc->supply.vmmc))
-               sdhci_set_power_reg(host, mode, vdd);
+       if (IS_ERR(host->mmc->supply.vmmc))
+               sdhci_set_power_noreg(host, mode, vdd);
        else
-               sdhci_set_power(host, mode, vdd);
+               sdhci_set_power_reg(host, mode, vdd);
 }
+EXPORT_SYMBOL_GPL(sdhci_set_power);
 
 /*****************************************************************************\
  *                                                                           *
@@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                }
        }
 
-       __sdhci_set_power(host, ios->power_mode, ios->vdd);
+       if (host->ops->set_power)
+               host->ops->set_power(host, ios->power_mode, ios->vdd);
+       else
+               sdhci_set_power(host, ios->power_mode, ios->vdd);
 
        if (host->ops->platform_send_init_74_clocks)
                host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
  *                                                                           *
 \*****************************************************************************/
 
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 {
        if (!host->cmd) {
                /*
@@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
                return;
        }
 
-       if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
-           !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
-           host->cmd->opcode == MMC_STOP_TRANSMISSION)
-               *mask &= ~SDHCI_INT_DATA_END;
-
        if (intmask & SDHCI_INT_RESPONSE)
                sdhci_finish_command(host);
 }
@@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
                }
 
                if (intmask & SDHCI_INT_CMD_MASK)
-                       sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
-                                     &intmask);
+                       sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
 
                if (intmask & SDHCI_INT_DATA_MASK)
                        sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
index c722cd2..766df17 100644 (file)
@@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                     unsigned short vdd);
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+                          unsigned short vdd);
 void sdhci_set_bus_width(struct sdhci_host *host, int width);
 void sdhci_reset(struct sdhci_host *host, u8 mask);
 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
index 95c4048..388e46b 100644 (file)
@@ -741,6 +741,7 @@ static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
                goto out_put;
        }
 
+       vid_hdr = ubi_get_vid_hdr(vidb);
        ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
 
        mutex_lock(&ubi->buf_mutex);
index d6384d9..2ff6215 100644 (file)
@@ -287,7 +287,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
 
                /* new_aeb is newer */
                if (cmp_res & 1) {
-                       victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
+                       victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
                        if (!victim)
                                return -ENOMEM;
 
index 8b2b740..124c243 100644 (file)
@@ -89,7 +89,7 @@ config NVDIMM_PFN
          Select Y if unsure
 
 config NVDIMM_DAX
-       tristate "NVDIMM DAX: Raw access to persistent memory"
+       bool "NVDIMM DAX: Raw access to persistent memory"
        default LIBNVDIMM
        depends on NVDIMM_PFN
        help
index 3509cff..abe5c6b 100644 (file)
@@ -2176,12 +2176,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
        return devs;
 
  err:
-       for (i = 0; devs[i]; i++)
-               if (is_nd_blk(&nd_region->dev))
-                       namespace_blk_release(devs[i]);
-               else
-                       namespace_pmem_release(devs[i]);
-       kfree(devs);
+       if (devs) {
+               for (i = 0; devs[i]; i++)
+                       if (is_nd_blk(&nd_region->dev))
+                               namespace_blk_release(devs[i]);
+                       else
+                               namespace_pmem_release(devs[i]);
+               kfree(devs);
+       }
        return NULL;
 }
 
index 42b3a82..2461843 100644 (file)
@@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
        return to_nd_region(to_dev(pmem)->parent);
 }
 
-static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
+static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
                unsigned int len)
 {
        struct device *dev = to_dev(pmem);
@@ -62,8 +62,12 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
                                __func__, (unsigned long long) sector,
                                cleared / 512, cleared / 512 > 1 ? "s" : "");
                badblocks_clear(&pmem->bb, sector, cleared / 512);
+       } else {
+               return -EIO;
        }
+
        invalidate_pmem(pmem->virt_addr + offset, len);
+       return 0;
 }
 
 static void write_pmem(void *pmem_addr, struct page *page,
@@ -123,7 +127,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
                flush_dcache_page(page);
                write_pmem(pmem_addr, page, off, len);
                if (unlikely(bad_pmem)) {
-                       pmem_clear_poison(pmem, pmem_off, len);
+                       rc = pmem_clear_poison(pmem, pmem_off, len);
                        write_pmem(pmem_addr, page, off, len);
                }
        }
index 329381a..79e679d 100644 (file)
@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
 
        /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
        c.identify.opcode = nvme_admin_identify;
-       c.identify.cns = cpu_to_le32(1);
+       c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
 
        *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
        if (!*id)
@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
        struct nvme_command c = { };
 
        c.identify.opcode = nvme_admin_identify;
-       c.identify.cns = cpu_to_le32(2);
+       c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
        c.identify.nsid = cpu_to_le32(nsid);
        return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
 }
@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
                return -ENODEV;
        }
 
-       if (ns->ctrl->vs >= NVME_VS(1, 1))
+       if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
                memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
-       if (ns->ctrl->vs >= NVME_VS(1, 2))
+       if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
                memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
 
        return 0;
@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
        int ret;
 
        while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+               if (csts == ~0)
+                       return -ENODEV;
                if ((csts & NVME_CSTS_RDY) == bit)
                        break;
 
@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        }
        page_shift = NVME_CAP_MPSMIN(cap) + 12;
 
-       if (ctrl->vs >= NVME_VS(1, 1))
+       if (ctrl->vs >= NVME_VS(1, 1, 0))
                ctrl->subsystem = NVME_CAP_NSSRC(cap);
 
        ret = nvme_identify_ctrl(ctrl, &id);
@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
                return;
 
        nn = le32_to_cpu(id->nn);
-       if (ctrl->vs >= NVME_VS(1, 1) &&
+       if (ctrl->vs >= NVME_VS(1, 1, 0) &&
            !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
                if (!nvme_scan_ns_list(ctrl, nn))
                        goto done;
index 0fc99f0..0248d0e 100644 (file)
@@ -99,6 +99,7 @@ struct nvme_dev {
        dma_addr_t cmb_dma_addr;
        u64 cmb_size;
        u32 cmbsz;
+       u32 cmbloc;
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
 };
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
-               queue_work(nvme_workq, &dev->reset_work);
+               nvme_reset(dev);
 
                /*
                 * Mark the request as handled, since the inline shutdown
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
        struct nvme_queue *nvmeq;
 
-       dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
+       dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
                                                NVME_CAP_NSSRC(cap) : 0;
 
        if (dev->subsystem &&
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
 
        /* Skip controllers under certain specific conditions. */
        if (nvme_should_reset(dev, csts)) {
-               if (queue_work(nvme_workq, &dev->reset_work))
+               if (!nvme_reset(dev))
                        dev_warn(dev->dev,
                                "Failed status: 0x%x, reset controller.\n",
                                csts);
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
        return ret >= 0 ? 0 : ret;
 }
 
+static ssize_t nvme_cmb_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+                      ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
+
 static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
 {
        u64 szu, size, offset;
-       u32 cmbloc;
        resource_size_t bar_size;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        void __iomem *cmb;
        dma_addr_t dma_addr;
 
-       if (!use_cmb_sqes)
-               return NULL;
-
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!(NVME_CMB_SZ(dev->cmbsz)))
                return NULL;
+       dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
 
-       cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+       if (!use_cmb_sqes)
+               return NULL;
 
        szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
        size = szu * NVME_CMB_SZ(dev->cmbsz);
-       offset = szu * NVME_CMB_OFST(cmbloc);
-       bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+       offset = szu * NVME_CMB_OFST(dev->cmbloc);
+       bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
 
        if (offset > bar_size)
                return NULL;
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        if (size > bar_size - offset)
                size = bar_size - offset;
 
-       dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+       dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
        cmb = ioremap_wc(dma_addr, size);
        if (!cmb)
                return NULL;
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
        return 0;
 }
 
-static void nvme_disable_io_queues(struct nvme_dev *dev)
+static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
 {
-       int pass, queues = dev->online_queues - 1;
+       int pass;
        unsigned long timeout;
        u8 opcode = nvme_admin_delete_sq;
 
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
                        dev->q_depth);
        }
 
-       if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
+       /*
+        * CMBs can currently only exist on >=1.2 PCIe devices. We only
+        * populate sysfs if a CMB is implemented. Note that we add the
+        * CMB attribute to the nvme_ctrl kobj which removes the need to remove
+        * it on exit. Since nvme_dev_attrs_group has no name we can pass
+        * NULL as final argument to sysfs_add_file_to_group.
+        */
+
+       if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
                dev->cmb = nvme_map_cmb(dev);
 
+               if (dev->cmbsz) {
+                       if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+                                                   &dev_attr_cmb.attr, NULL))
+                               dev_warn(dev->dev,
+                                        "failed to add sysfs attribute for CMB\n");
+               }
+       }
+
        pci_enable_pcie_error_reporting(pdev);
        pci_save_state(pdev);
        return 0;
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
 
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 {
-       int i;
+       int i, queues;
        u32 csts = -1;
 
        del_timer_sync(&dev->watchdog_timer);
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
 
+       queues = dev->online_queues - 1;
        for (i = dev->queue_count - 1; i > 0; i--)
                nvme_suspend_queue(dev->queues[i]);
 
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
                if (dev->queue_count)
                        nvme_suspend_queue(dev->queues[0]);
        } else {
-               nvme_disable_io_queues(dev);
+               nvme_disable_io_queues(dev, queues);
                nvme_disable_admin_queue(dev, shutdown);
        }
        nvme_pci_disable(dev);
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
 {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
                return -ENODEV;
-
+       if (work_busy(&dev->reset_work))
+               return -ENODEV;
        if (!queue_work(nvme_workq, &dev->reset_work))
                return -EBUSY;
-
-       flush_work(&dev->reset_work);
        return 0;
 }
 
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 
 static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
 {
-       return nvme_reset(to_nvme_dev(ctrl));
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
+       int ret = nvme_reset(dev);
+
+       if (!ret)
+               flush_work(&dev->reset_work);
+       return ret;
 }
 
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
        if (prepare)
                nvme_dev_disable(dev, false);
        else
-               queue_work(nvme_workq, &dev->reset_work);
+               nvme_reset(dev);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       queue_work(nvme_workq, &ndev->reset_work);
+       nvme_reset(ndev);
        return 0;
 }
 #endif
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
 
        dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
-       queue_work(nvme_workq, &dev->reset_work);
+       nvme_reset(dev);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
index c2a0a1c..3eaa4d2 100644 (file)
@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        eui = id_ns->eui64;
        len = sizeof(id_ns->eui64);
 
-       if (ns->ctrl->vs >= NVME_VS(1, 2)) {
+       if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
                if (bitmap_empty(eui, len * 8)) {
                        eui = id_ns->nguid;
                        len = sizeof(id_ns->nguid);
@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 {
        int res;
 
-       if (ns->ctrl->vs >= NVME_VS(1, 1)) {
+       if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
                res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
                if (res != -EOPNOTSUPP)
                        return res;
index 7ab9c93..6fe4c48 100644 (file)
@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
         */
 
        /* we support multiple ports and multiples hosts: */
-       id->mic = (1 << 0) | (1 << 1);
+       id->cmic = (1 << 0) | (1 << 1);
 
        /* no limit on data transfer sizes for now */
        id->mdts = 0;
@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
        case nvme_admin_identify:
                req->data_len = 4096;
                switch (le32_to_cpu(cmd->identify.cns)) {
-               case 0x00:
+               case NVME_ID_CNS_NS:
                        req->execute = nvmet_execute_identify_ns;
                        return 0;
-               case 0x01:
+               case NVME_ID_CNS_CTRL:
                        req->execute = nvmet_execute_identify_ctrl;
                        return 0;
-               case 0x02:
+               case NVME_ID_CNS_NS_ACTIVE_LIST:
                        req->execute = nvmet_execute_identify_nslist;
                        return 0;
                }
index 6559d5a..b4cacb6 100644 (file)
@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
        if (!subsys)
                return NULL;
 
-       subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+       subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
 
        switch (type) {
        case NVME_NQN_NVME:
index 6f65646..12f39ee 100644 (file)
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
        /* we support only dynamic controllers */
        e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
        e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
-       e->nqntype = type;
+       e->subtype = type;
        memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
        memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
        memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
        case nvme_admin_identify:
                req->data_len = 4096;
                switch (le32_to_cpu(cmd->identify.cns)) {
-               case 0x01:
+               case NVME_ID_CNS_CTRL:
                        req->execute =
                                nvmet_execute_identify_disc_ctrl;
                        return 0;
index 2cb7315..6537079 100644 (file)
@@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
 
        pp = &pcie->pp;
        pp->dev = dev;
+       pcie->drvdata = match->data;
        pp->ops = pcie->drvdata->ops;
 
        dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
@@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
                return PTR_ERR(pcie->pp.dbi_base);
        }
 
-       pcie->drvdata = match->data;
        pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
 
        if (!ls_pcie_is_bridge(pcie))
index 537f58a..8df6312 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
- * Authors: Joao Pinto <jpinto@synopsys.com>
+ * Authors: Joao Pinto <jpmpinto@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index bfdd074..ad70507 100644 (file)
@@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev)
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
  * @nvec: number of interrupts to allocate
+ * @affinity: flag to indicate cpu irq affinity mask should be set
  *
  * Setup the MSI capability structure of the device with the requested
  * number of interrupts.  A return value of zero indicates the successful
@@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev,
  * @dev: pointer to the pci_dev data structure of MSI-X device function
  * @entries: pointer to an array of struct msix_entry entries
  * @nvec: number of @entries
+ * @affinity: flag to indicate cpu irq affinity mask should be set
  *
  * Setup the MSI-X capability structure of device function with a
  * single MSI-X irq. A return of zero indicates the successful setup of
index c2ac764..a8ac4bc 100644 (file)
@@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
        rc = acpi_dev_get_resources(adev, &resource_list,
                                    acpi_pmu_dev_add_resource, &res);
        acpi_dev_free_resource_list(&resource_list);
-       if (rc < 0 || IS_ERR(&res)) {
+       if (rc < 0) {
                dev_err(dev, "PMU type %d: No resource address found\n", type);
                goto err;
        }
index e1ab864..c8c72e8 100644 (file)
@@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21);
 
 #define GPID2_DESC      SIG_DESC_SET(SCU8C, 9)
 
-#define D20 26
+#define F20 26
 SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
 SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
 SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
 SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
-MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
 
-#define D21 27
+#define D20 27
 SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
 SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
 SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
 SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
-MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
 
-FUNC_GROUP_DECL(GPID2, D20, D21);
+FUNC_GROUP_DECL(GPID2, F20, D20);
 
 #define GPIE_DESC      SIG_DESC_SET(HW_STRAP1, 21)
 #define GPIE0_DESC     SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
 SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
 SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
 SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
-MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
 
 FUNC_GROUP_DECL(GPIE0, B20, C20);
 
-#define SPI1_DESC      SIG_DESC_SET(HW_STRAP1, 13)
+#define SPI1_DESC              { HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC         { HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC      { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
 #define C18 64
-SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
 SS_PIN_DECL(C18, GPIOI0, SYSCS);
 
 #define E15 65
-SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
 SS_PIN_DECL(E15, GPIOI1, SYSCK);
 
-#define A14 66
-SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
-SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+#define B16 66
+SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
 
 #define C16 67
-SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
 SS_PIN_DECL(C16, GPIOI3, SYSMISO);
 
-FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+#define VB_DESC        SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B15 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+                           SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+                           SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
+
+#define C15 69
+SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+                           SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+                           SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
+
+#define A14 70
+SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
+                           SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
+                           SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
+
+#define A15 71
+SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
+                           SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
+                           SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
+
+FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
+
+#define R2 72
+SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
+SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
 
 #define L2 73
 SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
        ASPEED_PINCTRL_PIN(A12),
        ASPEED_PINCTRL_PIN(A13),
        ASPEED_PINCTRL_PIN(A14),
+       ASPEED_PINCTRL_PIN(A15),
        ASPEED_PINCTRL_PIN(A2),
        ASPEED_PINCTRL_PIN(A3),
        ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
        ASPEED_PINCTRL_PIN(B12),
        ASPEED_PINCTRL_PIN(B13),
        ASPEED_PINCTRL_PIN(B14),
+       ASPEED_PINCTRL_PIN(B15),
+       ASPEED_PINCTRL_PIN(B16),
        ASPEED_PINCTRL_PIN(B2),
        ASPEED_PINCTRL_PIN(B20),
        ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
        ASPEED_PINCTRL_PIN(C12),
        ASPEED_PINCTRL_PIN(C13),
        ASPEED_PINCTRL_PIN(C14),
+       ASPEED_PINCTRL_PIN(C15),
        ASPEED_PINCTRL_PIN(C16),
        ASPEED_PINCTRL_PIN(C18),
        ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
        ASPEED_PINCTRL_PIN(D10),
        ASPEED_PINCTRL_PIN(D2),
        ASPEED_PINCTRL_PIN(D20),
-       ASPEED_PINCTRL_PIN(D21),
        ASPEED_PINCTRL_PIN(D4),
        ASPEED_PINCTRL_PIN(D5),
        ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
        ASPEED_PINCTRL_PIN(E7),
        ASPEED_PINCTRL_PIN(E9),
        ASPEED_PINCTRL_PIN(F19),
+       ASPEED_PINCTRL_PIN(F20),
        ASPEED_PINCTRL_PIN(F9),
        ASPEED_PINCTRL_PIN(H20),
        ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
        ASPEED_PINCTRL_GROUP(RMII2),
        ASPEED_PINCTRL_GROUP(SD1),
        ASPEED_PINCTRL_GROUP(SPI1),
+       ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+       ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
        ASPEED_PINCTRL_GROUP(TIMER4),
        ASPEED_PINCTRL_GROUP(TIMER5),
        ASPEED_PINCTRL_GROUP(TIMER6),
        ASPEED_PINCTRL_GROUP(TIMER7),
        ASPEED_PINCTRL_GROUP(TIMER8),
+       ASPEED_PINCTRL_GROUP(VGABIOSROM),
 };
 
 static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
        ASPEED_PINCTRL_FUNC(RMII2),
        ASPEED_PINCTRL_FUNC(SD1),
        ASPEED_PINCTRL_FUNC(SPI1),
+       ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+       ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
        ASPEED_PINCTRL_FUNC(TIMER4),
        ASPEED_PINCTRL_FUNC(TIMER5),
        ASPEED_PINCTRL_FUNC(TIMER6),
        ASPEED_PINCTRL_FUNC(TIMER7),
        ASPEED_PINCTRL_FUNC(TIMER8),
+       ASPEED_PINCTRL_FUNC(VGABIOSROM),
 };
 
 static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
index 0391f9f..49aeba9 100644 (file)
@@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
                                bool enable, struct regmap *map)
 {
        int i;
-       bool ret;
-
-       ret = aspeed_sig_expr_eval(expr, enable, map);
-       if (ret)
-               return ret;
 
        for (i = 0; i < expr->ndescs; i++) {
+               bool ret;
                const struct aspeed_sig_desc *desc = &expr->descs[i];
                u32 pattern = enable ? desc->enable : desc->disable;
 
@@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
 static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
                                   struct regmap *map)
 {
+       if (aspeed_sig_expr_eval(expr, true, map))
+               return true;
+
        return aspeed_sig_expr_set(expr, true, map);
 }
 
 static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
                                    struct regmap *map)
 {
+       if (!aspeed_sig_expr_eval(expr, true, map))
+               return true;
+
        return aspeed_sig_expr_set(expr, false, map);
 }
 
index d22a9fe..71bbeb9 100644 (file)
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
                return PTR_ERR(vg->pctl_dev);
        }
 
+       raw_spin_lock_init(&vg->lock);
+
        ret = byt_gpio_probe(vg);
        if (ret) {
                pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
        }
 
        platform_set_drvdata(pdev, vg);
-       raw_spin_lock_init(&vg->lock);
        pm_runtime_enable(&pdev->dev);
 
        return 0;
index 63387a4..0144376 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
 
+#include "../core.h"
 #include "pinctrl-intel.h"
 
 /* Offset from regs */
@@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
 EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
 
 #ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+       const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+       if (!pd || !intel_pad_usable(pctrl, pin))
+               return false;
+
+       /*
+        * Only restore the pin if it is actually in use by the kernel (or
+        * by userspace). It is possible that some pins are used by the
+        * BIOS during resume and those are not always locked down so leave
+        * them alone.
+        */
+       if (pd->mux_owner || pd->gpio_owner ||
+           gpiochip_line_is_irq(&pctrl->chip, pin))
+               return true;
+
+       return false;
+}
+
 int intel_pinctrl_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev)
                const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
                u32 val;
 
-               if (!intel_pad_usable(pctrl, desc->number))
+               if (!intel_pinctrl_should_save(pctrl, desc->number))
                        continue;
 
                val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev)
                void __iomem *padcfg;
                u32 val;
 
-               if (!intel_pad_usable(pctrl, desc->number))
+               if (!intel_pinctrl_should_save(pctrl, desc->number))
                        continue;
 
                padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
index 07462d7..1aba2c7 100644 (file)
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
                 * much memory to the process.
                 */
                down_read(&current->mm->mmap_sem);
-               ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+               ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
+                               &page, NULL);
                up_read(&current->mm->mmap_sem);
                if (ret < 0)
                        break;
index 81b8dcc..b8a21d7 100644 (file)
@@ -576,6 +576,7 @@ config ASUS_WMI
 config ASUS_NB_WMI
        tristate "Asus Notebook WMI Driver"
        depends on ASUS_WMI
+       depends on SERIO_I8042 || SERIO_I8042 = n
        ---help---
          This is a driver for newer Asus notebooks. It adds extra features
          like wireless radio and bluetooth control, leds, hotkeys, backlight...
index d1a091b..a232394 100644 (file)
@@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
                },
        },
+       {
+               .ident = "Lenovo YOGA 910-13IKB",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
+               },
+       },
        {}
 };
 
index 436dfe8..9013a58 100644 (file)
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
                down_read(&current->mm->mmap_sem);
                pinned = get_user_pages(
                                (unsigned long)xfer->loc_addr & PAGE_MASK,
-                               nr_pages, dir == DMA_FROM_DEVICE, 0,
+                               nr_pages,
+                               dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
                                page_list, NULL);
                up_read(&current->mm->mmap_sem);
 
index 831935a..a7a8847 100644 (file)
@@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
                                 mdc, lpm);
                        return mdc;
                }
-               fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+               fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
                if (fcx_max_data < private->fcx_max_data) {
                        dev_warn(&device->cdev->dev,
                                 "The maximum data size for zHPF requests %u "
@@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
                         " data size for zHPF requests failed\n");
                return 0;
        } else
-               return mdc * FCX_MAX_DATA_FACTOR;
+               return (u32)mdc * FCX_MAX_DATA_FACTOR;
 }
 
 /*
index 46be25c..876c7e6 100644 (file)
@@ -780,7 +780,7 @@ static int cfg_wait_idle(void)
 static int __init chp_init(void)
 {
        struct chp_id chpid;
-       int ret;
+       int state, ret;
 
        ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
        if (ret)
@@ -791,7 +791,9 @@ static int __init chp_init(void)
                return 0;
        /* Register available channel-paths. */
        chp_id_for_each(&chpid) {
-               if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+               state = chp_info_get_status(chpid);
+               if (state == CHP_STATUS_CONFIGURED ||
+                   state == CHP_STATUS_STANDBY)
                        chp_new(chpid);
        }
 
index 637cf89..5810019 100644 (file)
@@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
        /* if (len > rec_len):
         * dump data up to cap_len ignoring small duplicate in rec->payload
         */
-       spin_lock_irqsave(&dbf->pay_lock, flags);
+       spin_lock(&dbf->pay_lock);
        memset(payload, 0, sizeof(*payload));
        memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
        payload->fsf_req_id = req_id;
index db27390..790babc 100644 (file)
@@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
 #endif
 
 
-static int probe_irq __initdata;
+static int probe_irq;
 
 /**
  * probe_intr  -       helper for IRQ autoprobe
@@ -365,7 +365,7 @@ static int probe_irq __initdata;
  * used by the IRQ probe code.
  */
 
-static irqreturn_t __init probe_intr(int irq, void *dev_id)
+static irqreturn_t probe_intr(int irq, void *dev_id)
 {
        probe_irq = irq;
        return IRQ_HANDLED;
@@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
  * and then looking to see what interrupt actually turned up.
  */
 
-static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
                                                int possible)
 {
        struct NCR5380_hostdata *hostdata = shost_priv(instance);
index 68138a6..d9239c2 100644 (file)
@@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 {
        struct sgl_handle *psgl_handle;
+       unsigned long flags;
 
-       spin_lock_bh(&phba->io_sgl_lock);
+       spin_lock_irqsave(&phba->io_sgl_lock, flags);
        if (phba->io_sgl_hndl_avbl) {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
                            "BM_%d : In alloc_io_sgl_handle,"
@@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
                        phba->io_sgl_alloc_index++;
        } else
                psgl_handle = NULL;
-       spin_unlock_bh(&phba->io_sgl_lock);
+       spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
        return psgl_handle;
 }
 
 static void
 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
-       spin_lock_bh(&phba->io_sgl_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&phba->io_sgl_lock, flags);
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
                    "BM_%d : In free_,io_sgl_free_index=%d\n",
                    phba->io_sgl_free_index);
@@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                             "value there=%p\n", phba->io_sgl_free_index,
                             phba->io_sgl_hndl_base
                             [phba->io_sgl_free_index]);
-                spin_unlock_bh(&phba->io_sgl_lock);
+                spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
                return;
        }
        phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                phba->io_sgl_free_index = 0;
        else
                phba->io_sgl_free_index++;
-       spin_unlock_bh(&phba->io_sgl_lock);
+       spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
 }
 
 static inline struct wrb_handle *
@@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
                       unsigned int wrbs_per_cxn)
 {
        struct wrb_handle *pwrb_handle;
+       unsigned long flags;
 
-       spin_lock_bh(&pwrb_context->wrb_lock);
+       spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
        pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
        pwrb_context->wrb_handles_available--;
        if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
                pwrb_context->alloc_index = 0;
        else
                pwrb_context->alloc_index++;
-       spin_unlock_bh(&pwrb_context->wrb_lock);
+       spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
 
        if (pwrb_handle)
                memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
@@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
                       struct wrb_handle *pwrb_handle,
                       unsigned int wrbs_per_cxn)
 {
-       spin_lock_bh(&pwrb_context->wrb_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
        pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
        pwrb_context->wrb_handles_available++;
        if (pwrb_context->free_index == (wrbs_per_cxn - 1))
                pwrb_context->free_index = 0;
        else
                pwrb_context->free_index++;
-       spin_unlock_bh(&pwrb_context->wrb_lock);
+       spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
 }
 
 /**
@@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
 {
        struct sgl_handle *psgl_handle;
+       unsigned long flags;
 
-       spin_lock_bh(&phba->mgmt_sgl_lock);
+       spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
        if (phba->eh_sgl_hndl_avbl) {
                psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
                phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
                        phba->eh_sgl_alloc_index++;
        } else
                psgl_handle = NULL;
-       spin_unlock_bh(&phba->mgmt_sgl_lock);
+       spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
        return psgl_handle;
 }
 
 void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
-       spin_lock_bh(&phba->mgmt_sgl_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                    "BM_%d : In  free_mgmt_sgl_handle,"
                    "eh_sgl_free_index=%d\n",
@@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                            "BM_%d : Double Free in eh SGL ,"
                            "eh_sgl_free_index=%d\n",
                            phba->eh_sgl_free_index);
-               spin_unlock_bh(&phba->mgmt_sgl_lock);
+               spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
                return;
        }
        phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                phba->eh_sgl_free_index = 0;
        else
                phba->eh_sgl_free_index++;
-       spin_unlock_bh(&phba->mgmt_sgl_lock);
+       spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
 }
 
 static void
index a8762a3..5324741 100644 (file)
@@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
        u32 fd_ioasc;
-       char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
 
        if (ioa_cfg->sis64)
                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
@@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
        }
 
        list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+       schedule_work(&ioa_cfg->work_q);
        hostrcb = ipr_get_free_hostrcb(ioa_cfg);
-       kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
 
        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
 }
index c051694..f9b6fba 100644 (file)
@@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 
 free_task:
        /* regular RX path uses back_lock */
-       spin_lock_bh(&session->back_lock);
+       spin_lock(&session->back_lock);
        __iscsi_put_task(task);
-       spin_unlock_bh(&session->back_lock);
+       spin_unlock(&session->back_lock);
        return NULL;
 }
 
index 54d446c..b8d3b97 100644 (file)
@@ -36,9 +36,9 @@ struct scsi_dh_blist {
 };
 
 static const struct scsi_dh_blist scsi_dh_blist[] = {
-       {"DGC", "RAID",                 "clariion" },
-       {"DGC", "DISK",                 "clariion" },
-       {"DGC", "VRAID",                "clariion" },
+       {"DGC", "RAID",                 "emc" },
+       {"DGC", "DISK",                 "emc" },
+       {"DGC", "VRAID",                "emc" },
 
        {"COMPAQ", "MSA1000 VOLUME",    "hp_sw" },
        {"COMPAQ", "HSV110",            "hp_sw" },
index 212e98d..6f7128f 100644 (file)
@@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
 static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
                                enum scsi_scan_mode rescan)
 {
-       char devname[64];
        unsigned char scsi_cmd[MAX_COMMAND_SIZE];
        unsigned int length;
        u64 lun;
@@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
                }
        }
 
-       sprintf(devname, "host %d channel %d id %d",
-               shost->host_no, sdev->channel, sdev->id);
-
        /*
         * Allocate enough to hold the header (the same size as one scsi_lun)
         * plus the number of luns we are requesting.  511 was the default
@@ -1470,12 +1466,12 @@ retry:
  out_err:
        kfree(lun_data);
  out:
-       scsi_device_put(sdev);
        if (scsi_device_created(sdev))
                /*
                 * the sdev we used didn't appear in the report luns scan
                 */
                __scsi_remove_device(sdev);
+       scsi_device_put(sdev);
        return ret;
 }
 
index 7af5226..618422e 100644 (file)
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
        res = get_user_pages_unlocked(
                uaddr,
                nr_pages,
-               rw == READ,
-               0, /* don't force */
-               pages);
+               pages,
+               rw == READ ? FOLL_WRITE : 0); /* don't force */
 
        /* Errors and no page mapped should return here */
        if (res < nr_pages)
index 396ded5..209a8f7 100644 (file)
@@ -1187,8 +1187,10 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
                hdata.type = heap->type;
                hdata.heap_id = heap->id;
 
-               ret = copy_to_user(&buffer[cnt],
-                                  &hdata, sizeof(hdata));
+               if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
+                       ret = -EFAULT;
+                       goto out;
+               }
 
                cnt++;
                if (cnt >= max_cnt)
index 15bac92..46b2bb9 100644 (file)
@@ -107,7 +107,7 @@ struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
 
                heap_pdev = of_platform_device_create(node, heaps[i].name,
                                                      &pdev->dev);
-               if (!pdev)
+               if (!heap_pdev)
                        return ERR_PTR(-ENOMEM);
                heap_pdev->dev.platform_data = &heaps[i];
 
index e36ee98..34307ac 100644 (file)
@@ -128,6 +128,7 @@ int arche_platform_change_state(enum arche_platform_state state,
        pdev = of_find_device_by_node(np);
        if (!pdev) {
                pr_err("arche-platform device not found\n");
+               of_node_put(np);
                return -ENODEV;
        }
 
index 071bb1c..baab460 100644 (file)
@@ -1548,7 +1548,8 @@ static int ap_probe(struct usb_interface *interface,
        INIT_LIST_HEAD(&es2->arpcs);
        spin_lock_init(&es2->arpc_lock);
 
-       if (es2_arpc_in_enable(es2))
+       retval = es2_arpc_in_enable(es2);
+       if (retval)
                goto error;
 
        retval = gb_hd_add(hd);
index 5e06e42..250caa0 100644 (file)
@@ -702,15 +702,13 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
        ret = gb_gpio_irqchip_add(gpio, irqc, 0,
                                   handle_level_irq, IRQ_TYPE_NONE);
        if (ret) {
-               dev_err(&connection->bundle->dev,
-                       "failed to add irq chip: %d\n", ret);
+               dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
                goto exit_line_free;
        }
 
        ret = gpiochip_add(gpio);
        if (ret) {
-               dev_err(&connection->bundle->dev,
-                       "failed to add gpio chip: %d\n", ret);
+               dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
                goto exit_gpio_irqchip_remove;
        }
 
index 69f67dd..660b467 100644 (file)
@@ -127,7 +127,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
        return module;
 
 err_put_interfaces:
-       for (--i; i > 0; --i)
+       for (--i; i >= 0; --i)
                gb_interface_put(module->interfaces[i]);
 
        put_device(&module->dev);
index 5ee7954..2633d2b 100644 (file)
@@ -888,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        minor = alloc_minor(gb_tty);
        if (minor < 0) {
                if (minor == -ENOSPC) {
-                       dev_err(&connection->bundle->dev,
+                       dev_err(&gbphy_dev->dev,
                                "no more free minor numbers\n");
                        retval = -ENODEV;
                } else {
index d626125..564b36d 100644 (file)
@@ -468,6 +468,8 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st,
        case SCA3000_MEAS_MODE_OP_2:
                *base_freq = info->option_mode_2_freq;
                break;
+       default:
+               ret = -EINVAL;
        }
 error_ret:
        return ret;
index 6eae605..23fda9d 100644 (file)
@@ -871,12 +871,10 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
 }
 LUSTRE_RW_ATTR(xattr_cache);
 
-static ssize_t unstable_stats_show(struct kobject *kobj,
-                                  struct attribute *attr,
-                                  char *buf)
+static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
 {
-       struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
-                                             ll_kobj);
+       struct super_block     *sb    = m->private;
+       struct ll_sb_info      *sbi   = ll_s2sbi(sb);
        struct cl_client_cache *cache = sbi->ll_cache;
        long pages;
        int mb;
@@ -884,19 +882,21 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
        pages = atomic_long_read(&cache->ccc_unstable_nr);
        mb = (pages * PAGE_SIZE) >> 20;
 
-       return sprintf(buf, "unstable_check:     %8d\n"
-                           "unstable_pages: %12ld\n"
-                           "unstable_mb:        %8d\n",
-                           cache->ccc_unstable_check, pages, mb);
+       seq_printf(m,
+                  "unstable_check:     %8d\n"
+                  "unstable_pages: %12ld\n"
+                  "unstable_mb:        %8d\n",
+                  cache->ccc_unstable_check, pages, mb);
+
+       return 0;
 }
 
-static ssize_t unstable_stats_store(struct kobject *kobj,
-                                   struct attribute *attr,
-                                   const char *buffer,
-                                   size_t count)
+static ssize_t ll_unstable_stats_seq_write(struct file *file,
+                                          const char __user *buffer,
+                                          size_t count, loff_t *off)
 {
-       struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
-                                             ll_kobj);
+       struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+       struct ll_sb_info *sbi = ll_s2sbi(sb);
        char kernbuf[128];
        int val, rc;
 
@@ -922,7 +922,7 @@ static ssize_t unstable_stats_store(struct kobject *kobj,
 
        return count;
 }
-LUSTRE_RW_ATTR(unstable_stats);
+LPROC_SEQ_FOPS(ll_unstable_stats);
 
 static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
                                char *buf)
@@ -995,6 +995,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
        /* { "filegroups",   lprocfs_rd_filegroups,  0, 0 }, */
        { "max_cached_mb",    &ll_max_cached_mb_fops, NULL },
        { "statahead_stats",  &ll_statahead_stats_fops, NULL, 0 },
+       { "unstable_stats",   &ll_unstable_stats_fops, NULL },
        { "sbi_flags",        &ll_sbi_flags_fops, NULL, 0 },
        { .name =               "nosquash_nids",
          .fops =               &ll_nosquash_nids_fops          },
@@ -1026,7 +1027,6 @@ static struct attribute *llite_attrs[] = {
        &lustre_attr_max_easize.attr,
        &lustre_attr_default_easize.attr,
        &lustre_attr_xattr_cache.attr,
-       &lustre_attr_unstable_stats.attr,
        &lustre_attr_root_squash.attr,
        NULL,
 };
index c29040f..1091b9f 100644 (file)
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
                actual_pages = get_user_pages(task, task->mm,
                                          (unsigned long)buf & ~(PAGE_SIZE - 1),
                                          num_pages,
-                                         (type == PAGELIST_READ) /*Write */ ,
-                                         0 /*Force */ ,
+                                         (type == PAGELIST_READ) ? FOLL_WRITE : 0,
                                          pages,
                                          NULL /*vmas */);
                up_read(&task->mm->mmap_sem);
index e11c0e0..7b6cd4d 100644 (file)
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
                current->mm,              /* mm */
                (unsigned long)virt_addr, /* start */
                num_pages,                /* len */
-               0,                        /* write */
-               0,                        /* force */
+               0,                        /* gup_flags */
                pages,                    /* pages (array of page pointers) */
                NULL);                    /* vmas */
        up_read(&current->mm->mmap_sem);
index 78f5613..6ab7443 100644 (file)
@@ -3388,7 +3388,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 
        clients_count++;
 
-       destroy_workqueue(hif_workqueue);
 _fail_:
        return result;
 }
index 39b928c..b7d747e 100644 (file)
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
         * Otherwise, initiator is not expecting a NOPIN is response.
         * Just ignore for now.
         */
+
+       if (cmd)
+               iscsit_free_cmd(cmd, false);
+
         return 0;
 }
 EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 
        pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
                " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
-               "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+               "Solicited" : "Unsolicited", cmd->init_task_tag,
                cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
 }
 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
index adf419f..15f79a2 100644 (file)
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
 
                /*
                 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
-                * Immediate Data + Unsolicitied Data-OUT if necessary..
+                * Immediate Data + Unsolicited Data-OUT if necessary..
                 */
                param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
                                                  conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
 {
        struct iscsi_session *sess = conn->sess;
        /*
-        * FIXME: Unsolicitied NopIN support for ISER
+        * FIXME: Unsolicited NopIN support for ISER
         */
        if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
                return;
index 6094a6b..7dfefd6 100644 (file)
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
 
 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
 {
-       if (scsi_status != SAM_STAT_GOOD) {
-               return;
-       }
-
-       /*
-        * Calculate new residual count based upon length of SCSI data
-        * transferred.
-        */
-       if (length < cmd->data_length) {
+       if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
                if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
                        cmd->residual_count += cmd->data_length - length;
                } else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
                }
 
                cmd->data_length = length;
-       } else if (length > cmd->data_length) {
-               cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
-               cmd->residual_count = length - cmd->data_length;
-       } else {
-               cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
-               cmd->residual_count = 0;
        }
 
        target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+       case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
                break;
        case TCM_OUT_OF_RESOURCES:
                sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
         * fabric acknowledgement that requires two target_put_sess_cmd()
         * invocations before se_cmd descriptor release.
         */
-       if (ack_kref)
-               kref_get(&se_cmd->cmd_kref);
+       if (ack_kref) {
+               if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+                       return -EINVAL;
+
+               se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+       }
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
  */
 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
 {
-       struct se_cmd *se_cmd;
+       struct se_cmd *se_cmd, *tmp_cmd;
        unsigned long flags;
        int rc;
 
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
        se_sess->sess_tearing_down = 1;
        list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
 
-       list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+       list_for_each_entry_safe(se_cmd, tmp_cmd,
+                                &se_sess->sess_wait_list, se_cmd_list) {
                rc = kref_get_unless_zero(&se_cmd->cmd_kref);
                if (rc) {
                        se_cmd->cmd_wait_set = 1;
                        spin_lock(&se_cmd->t_state_lock);
                        se_cmd->transport_state |= CMD_T_FABRIC_STOP;
                        spin_unlock(&se_cmd->t_state_lock);
-               }
+               } else
+                       list_del_init(&se_cmd->se_cmd_list);
        }
 
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
                .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
                .add_sector_info = true,
        },
+       [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+               .key = COPY_ABORTED,
+               .asc = 0x0d,
+               .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+       },
        [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
                /*
                 * Returning ILLEGAL REQUEST would cause immediate IO errors on
index 62bf4fe..4756250 100644 (file)
@@ -96,7 +96,7 @@ struct tcmu_dev {
        size_t dev_size;
        u32 cmdr_size;
        u32 cmdr_last_cleaned;
-       /* Offset of data ring from start of mb */
+       /* Offset of data area from start of mb */
        /* Must add data_off and mb_addr to get the address */
        size_t data_off;
        size_t data_size;
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
 
 /*
  * We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data ring.
+ * space available on the data area.
  *
  * Called with ring lock held.
  */
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
        return true;
 }
 
-static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static sense_reason_t
+tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
        struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
        struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 
        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        /*
         * Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
                BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
                data_length += se_cmd->t_bidi_data_sg->length;
        }
-       if ((command_size > (udev->cmdr_size / 2))
-           || data_length > udev->data_size)
-               pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
-                       "cmd/data ring buffers\n", command_size, data_length,
+       if ((command_size > (udev->cmdr_size / 2)) ||
+           data_length > udev->data_size) {
+               pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+                       "cmd ring/data area\n", command_size, data_length,
                        udev->cmdr_size, udev->data_size);
+               spin_unlock_irq(&udev->cmdr_lock);
+               return TCM_INVALID_CDB_FIELD;
+       }
 
        while (!is_ring_space_avail(udev, command_size, data_length)) {
                int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
                finish_wait(&udev->wait_cmdr, &__wait);
                if (!ret) {
                        pr_warn("tcmu: command timed out\n");
-                       return -ETIMEDOUT;
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
 
                spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
        bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
 
-       /*
-        * Fix up iovecs, and handle if allocation in data ring wrapped.
-        */
+       /* Handle allocating space from the data area */
        iov = &entry->req.iov[0];
        iov_cnt = 0;
        copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        mod_timer(&udev->timeout,
                round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
 
-       return 0;
+       return TCM_NO_SENSE;
 }
 
-static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
 {
        struct se_device *se_dev = se_cmd->se_dev;
        struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
 
        tcmu_cmd = tcmu_alloc_cmd(se_cmd);
        if (!tcmu_cmd)
-               return -ENOMEM;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        ret = tcmu_queue_cmd_ring(tcmu_cmd);
-       if (ret < 0) {
+       if (ret != TCM_NO_SENSE) {
                pr_err("TCMU: Could not queue command\n");
                spin_lock_irq(&udev->commands_lock);
                idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
                /*
                 * cmd has been completed already from timeout, just reclaim
-                * data ring space and free cmd
+                * data area space and free cmd
                 */
                free_data_area(udev, cmd);
 
@@ -1128,21 +1131,10 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
                       dev->dev_attrib.block_size);
 }
 
-static sense_reason_t
-tcmu_pass_op(struct se_cmd *se_cmd)
-{
-       int ret = tcmu_queue_cmd(se_cmd);
-
-       if (ret != 0)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       else
-               return TCM_NO_SENSE;
-}
-
 static sense_reason_t
 tcmu_parse_cdb(struct se_cmd *cmd)
 {
-       return passthrough_parse_cdb(cmd, tcmu_pass_op);
+       return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
 }
 
 static const struct target_backend_ops tcmu_ops = {
index 75cd854..094a144 100644 (file)
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
        }
        mutex_unlock(&g_device_mutex);
 
-       pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+       pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
        return -EINVAL;
 }
 
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
 
 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
                                struct xcopy_op *xop, unsigned char *p,
-                               unsigned short tdll)
+                               unsigned short tdll, sense_reason_t *sense_ret)
 {
        struct se_device *local_dev = se_cmd->se_dev;
        unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
        unsigned short start = 0;
        bool src = true;
 
+       *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
        if (offset != 0) {
                pr_err("XCOPY target descriptor list length is not"
                        " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
                rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
        else
                rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
-       if (rc < 0)
+       /*
+        * If a matching IEEE NAA 0x83 descriptor for the requested device
+        * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+        * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+        * initiator to fall back to normal copy method.
+        */
+       if (rc < 0) {
+               *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
                goto out;
+       }
 
        pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
                 xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
                                remote_port, true);
        if (rc < 0) {
+               ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
                transport_generic_free_cmd(se_cmd, 0);
                return rc;
        }
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
 
        rc = target_xcopy_issue_pt_cmd(xpt_cmd);
        if (rc < 0) {
+               ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
                transport_generic_free_cmd(se_cmd, 0);
                return rc;
        }
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
                                remote_port, false);
        if (rc < 0) {
                struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+               ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
                /*
                 * If the failure happened before the t_mem_list hand-off in
                 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
 
        rc = target_xcopy_issue_pt_cmd(xpt_cmd);
        if (rc < 0) {
+               ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
                se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
                transport_generic_free_cmd(se_cmd, 0);
                return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
 out:
        xcopy_pt_undepend_remotedev(xop);
        kfree(xop);
-
-       pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
-       ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+       /*
+        * Don't override an error scsi status if it has already been set
+        */
+       if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+               pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+                       " CHECK_CONDITION -> sending response\n", rc);
+               ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+       }
        target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
 }
 
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
                " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
                tdll, sdll, inline_dl);
 
-       rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+       rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
        if (rc <= 0)
                goto out;
 
index 216e18c..ff5de9a 100644 (file)
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
        if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
                              &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
                              ntohl(fcp->fc_dl), task_attr, data_dir,
-                             TARGET_SCF_ACK_KREF))
+                             TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
                goto err;
 
-       pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+       pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
        return;
 
 err:
index 6ffbb60..fd5c3de 100644 (file)
 
 #include "tcm_fc.h"
 
+#define TFC_SESS_DBG(lport, fmt, args...) \
+       pr_debug("host%u: rport %6.6x: " fmt,      \
+                (lport)->host->host_no,           \
+                (lport)->port_id, ##args )
+
 static void ft_sess_delete_all(struct ft_tport *);
 
 /*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
        struct ft_tport *tport;
        struct hlist_head *head;
        struct ft_sess *sess;
+       char *reason = "no session created";
 
        rcu_read_lock();
        tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
-       if (!tport)
+       if (!tport) {
+               reason = "not an FCP port";
                goto out;
+       }
 
        head = &tport->hash[ft_sess_hash(port_id)];
        hlist_for_each_entry_rcu(sess, head, hash) {
                if (sess->port_id == port_id) {
                        kref_get(&sess->kref);
                        rcu_read_unlock();
-                       pr_debug("port_id %x found %p\n", port_id, sess);
+                       TFC_SESS_DBG(lport, "port_id %x found %p\n",
+                                    port_id, sess);
                        return sess;
                }
        }
 out:
        rcu_read_unlock();
-       pr_debug("port_id %x not found\n", port_id);
+       TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+                    port_id, reason);
        return NULL;
 }
 
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
        struct ft_tport *tport = sess->tport;
        struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
 
-       pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+       TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
        hlist_add_head_rcu(&sess->hash, head);
        tport->sess_count++;
 
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
 
        sess = kzalloc(sizeof(*sess), GFP_KERNEL);
        if (!sess)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        kref_init(&sess->kref); /* ref for table entry */
        sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
                                             TARGET_PROT_NORMAL, &initiatorname[0],
                                             sess, ft_sess_alloc_cb);
        if (IS_ERR(sess->se_sess)) {
+               int rc = PTR_ERR(sess->se_sess);
                kfree(sess);
-               return NULL;
+               sess = ERR_PTR(rc);
        }
        return sess;
 }
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
                mutex_unlock(&ft_lport_lock);
                return;
        }
-       pr_debug("port_id %x\n", port_id);
+       TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
        ft_sess_unhash(sess);
        mutex_unlock(&ft_lport_lock);
        ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
                if (!(fcp_parm & FCP_SPPF_INIT_FCN))
                        return FC_SPP_RESP_CONF;
                sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
-               if (!sess)
-                       return FC_SPP_RESP_RES;
+               if (IS_ERR(sess)) {
+                       if (PTR_ERR(sess) == -EACCES) {
+                               spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+                               return FC_SPP_RESP_CONF;
+                       } else
+                               return FC_SPP_RESP_RES;
+               }
                if (!sess->params)
                        rdata->prli_count++;
                sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
        mutex_lock(&ft_lport_lock);
        ret = ft_prli_locked(rdata, spp_len, rspp, spp);
        mutex_unlock(&ft_lport_lock);
-       pr_debug("port_id %x flags %x ret %x\n",
-              rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+       TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+                    rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
        return ret;
 }
 
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
        struct ft_sess *sess;
        u32 sid = fc_frame_sid(fp);
 
-       pr_debug("sid %x\n", sid);
+       TFC_SESS_DBG(lport, "recv sid %x\n", sid);
 
        sess = ft_sess_get(lport, sid);
        if (!sess) {
-               pr_debug("sid %x sess lookup failed\n", sid);
+               TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
                /* TBD XXX - if FCP_CMND, send PRLO */
                fc_frame_free(fp);
                return;
index 9b4815e..19bf202 100644 (file)
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/acpi.h>
 #include <linux/thermal.h>
 #include <linux/pm.h>
 
 /* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_HSW_1  0x9C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_HSW_2  0x8C24 /* Haswell PCH */
 #define PCH_THERMAL_DID_WPT    0x9CA4 /* Wildcat Point */
 #define PCH_THERMAL_DID_SKL    0x9D31 /* Skylake PCH */
 
@@ -66,9 +69,53 @@ struct pch_thermal_device {
        unsigned long crt_temp;
        int hot_trip_id;
        unsigned long hot_temp;
+       int psv_trip_id;
+       unsigned long psv_temp;
        bool bios_enabled;
 };
 
+#ifdef CONFIG_ACPI
+
+/*
+ * On some platforms, there is a companion ACPI device, which adds
+ * passive trip temperature using _PSV method. There is no specific
+ * passive temperature setting in MMIO interface of this PCI device.
+ */
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+                                     int *nr_trips)
+{
+       struct acpi_device *adev;
+
+       ptd->psv_trip_id = -1;
+
+       adev = ACPI_COMPANION(&ptd->pdev->dev);
+       if (adev) {
+               unsigned long long r;
+               acpi_status status;
+
+               status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
+                                              &r);
+               if (ACPI_SUCCESS(status)) {
+                       unsigned long trip_temp;
+
+                       trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+                       if (trip_temp) {
+                               ptd->psv_temp = trip_temp;
+                               ptd->psv_trip_id = *nr_trips;
+                               ++(*nr_trips);
+                       }
+               }
+       }
+}
+#else
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+                                     int *nr_trips)
+{
+       ptd->psv_trip_id = -1;
+
+}
+#endif
+
 static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
 {
        u8 tsel;
@@ -119,6 +166,8 @@ read_trips:
                ++(*nr_trips);
        }
 
+       pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
        return 0;
 }
 
@@ -194,6 +243,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
                *type = THERMAL_TRIP_CRITICAL;
        else if (ptd->hot_trip_id == trip)
                *type = THERMAL_TRIP_HOT;
+       else if (ptd->psv_trip_id == trip)
+               *type = THERMAL_TRIP_PASSIVE;
        else
                return -EINVAL;
 
@@ -208,6 +259,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
                *temp = ptd->crt_temp;
        else if (ptd->hot_trip_id == trip)
                *temp = ptd->hot_temp;
+       else if (ptd->psv_trip_id == trip)
+               *temp = ptd->psv_temp;
        else
                return -EINVAL;
 
@@ -242,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
                ptd->ops = &pch_dev_ops_wpt;
                dev_name = "pch_skylake";
                break;
+       case PCH_THERMAL_DID_HSW_1:
+       case PCH_THERMAL_DID_HSW_2:
+               ptd->ops = &pch_dev_ops_wpt;
+               dev_name = "pch_haswell";
+               break;
        default:
                dev_err(&pdev->dev, "unknown pch thermal device\n");
                return -ENODEV;
@@ -324,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device)
 static struct pci_device_id intel_pch_thermal_id[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
index 0e4dc0a..7a22307 100644 (file)
@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
        .set_cur_state = powerclamp_set_cur_state,
 };
 
-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-
 static int __init powerclamp_probe(void)
 {
-       if (!x86_match_cpu(intel_powerclamp_ids)) {
-               pr_err("Intel powerclamp does not run on family %d model %d\n",
-                               boot_cpu_data.x86, boot_cpu_data.x86_model);
+       if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+               pr_err("CPU does not support MWAIT");
                return -ENODEV;
        }
 
index 886fcf3..b992346 100644 (file)
@@ -213,7 +213,7 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
        struct pci_dev *pdev = to_pci_dev(port->dev);
        int ret;
 
-       ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
        if (ret < 0)
                return ret;
 
index 1bfb6fd..1731b98 100644 (file)
@@ -83,7 +83,8 @@ static const struct serial8250_config uart_config[] = {
                .name           = "16550A",
                .fifo_size      = 16,
                .tx_loadsz      = 16,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+                                 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
                .rxtrig_bytes   = {1, 4, 8, 14},
                .flags          = UART_CAP_FIFO,
        },
index b8d9c8c..417d9e7 100644 (file)
@@ -99,7 +99,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
        case UART_LCR:
                valshift = UNIPHIER_UART_LCR_SHIFT;
                /* Divisor latch access bit does not exist. */
-               value &= ~(UART_LCR_DLAB << valshift);
+               value &= ~UART_LCR_DLAB;
                /* fall through */
        case UART_MCR:
                offset = UNIPHIER_UART_LCR_MCR;
@@ -199,7 +199,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!regs) {
-               dev_err(dev, "failed to get memory resource");
+               dev_err(dev, "failed to get memory resource\n");
                return -EINVAL;
        }
 
index c783140..25c1d7b 100644 (file)
@@ -1625,6 +1625,7 @@ config SERIAL_SPRD_CONSOLE
 config SERIAL_STM32
        tristate "STMicroelectronics STM32 serial port support"
        select SERIAL_CORE
+       depends on HAS_DMA
        depends on ARM || COMPILE_TEST
        help
          This driver is for the on-chip Serial Controller on
index fd8aa1f..168b10c 100644 (file)
@@ -2132,11 +2132,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
                mode |= ATMEL_US_USMODE_RS485;
        } else if (termios->c_cflag & CRTSCTS) {
                /* RS232 with hardware handshake (RTS/CTS) */
-               if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
-                       dev_info(port->dev, "not enabling hardware flow control because DMA is used");
-                       termios->c_cflag &= ~CRTSCTS;
-               } else {
+               if (atmel_use_fifo(port) &&
+                   !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+                       /*
+                        * with ATMEL_US_USMODE_HWHS set, the controller will
+                        * be able to drive the RTS pin high/low when the RX
+                        * FIFO is above RXFTHRES/below RXFTHRES2.
+                        * It will also disable the transmitter when the CTS
+                        * pin is high.
+                        * This mode is not activated if CTS pin is a GPIO
+                        * because in this case, the transmitter is always
+                        * disabled (there must be an internal pull-up
+                        * responsible for this behaviour).
+                        * If the RTS pin is a GPIO, the controller won't be
+                        * able to drive it according to the FIFO thresholds,
+                        * but it will be handled by the driver.
+                        */
                        mode |= ATMEL_US_USMODE_HWHS;
+               } else {
+                       /*
+                        * For platforms without FIFO, the flow control is
+                        * handled by the driver.
+                        */
+                       mode |= ATMEL_US_USMODE_NORMAL;
                }
        } else {
                /* RS232 without hadware handshake */
index de9d510..76103f2 100644 (file)
@@ -328,7 +328,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
 
        sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
 
-       if (xmit->tail < xmit->head) {
+       if (xmit->tail < xmit->head || xmit->head == 0) {
                sport->dma_tx_nents = 1;
                sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
        } else {
@@ -359,7 +359,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
        sport->dma_tx_in_progress = true;
        sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
        dma_async_issue_pending(sport->dma_tx_chan);
-
 }
 
 static void lpuart_dma_tx_complete(void *arg)
index d391650..42caccb 100644 (file)
@@ -419,6 +419,7 @@ static struct dmi_system_id pch_uart_dmi_table[] = {
                },
                (void *)MINNOW_UARTCLK,
        },
+       { }
 };
 
 /* Return UART clock, checking for board specific clocks. */
index 2675792..fb06725 100644 (file)
@@ -1130,9 +1130,13 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
 {
        struct sc16is7xx_port *s = gpiochip_get_data(chip);
        struct uart_port *port = &s->p[0].port;
+       u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
 
-       sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
-                             val ? BIT(offset) : 0);
+       if (val)
+               state |= BIT(offset);
+       else
+               state &= ~BIT(offset);
+       sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
        sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
                              BIT(offset));
 
index 6e4f636..f2303f3 100644 (file)
@@ -111,7 +111,7 @@ void uart_write_wakeup(struct uart_port *port)
         * closed.  No cookie for you.
         */
        BUG_ON(!state);
-       tty_wakeup(state->port.tty);
+       tty_port_tty_wakeup(&state->port);
 }
 
 static void uart_stop(struct tty_struct *tty)
@@ -632,7 +632,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
        if (port->ops->flush_buffer)
                port->ops->flush_buffer(port);
        uart_port_unlock(port, flags);
-       tty_wakeup(tty);
+       tty_port_tty_wakeup(&state->port);
 }
 
 /*
@@ -2746,8 +2746,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
        uport->cons = drv->cons;
        uport->minor = drv->tty_driver->minor_start + uport->line;
 
-       port->console = uart_console(uport);
-
        /*
         * If this port is a console, then the spinlock is already
         * initialised.
@@ -2761,6 +2759,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
 
        uart_configure_port(drv, state, uport);
 
+       port->console = uart_console(uport);
+
        num_groups = 2;
        if (uport->attr_group)
                num_groups++;
index 41d9749..cd97ceb 100644 (file)
@@ -31,7 +31,7 @@ struct stm32_usart_info {
        struct stm32_usart_config cfg;
 };
 
-#define UNDEF_REG ~0
+#define UNDEF_REG 0xff
 
 /* Register offsets */
 struct stm32_usart_info stm32f4_info = {
index f37edaa..dd4c02f 100644 (file)
@@ -1200,6 +1200,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
 OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
 OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
 OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup);
 
 /**
  * cdns_uart_console_write - perform write operation
@@ -1438,6 +1439,7 @@ static const struct of_device_id cdns_uart_of_match[] = {
        { .compatible = "xlnx,xuartps", },
        { .compatible = "cdns,uart-r1p8", },
        { .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
+       { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def },
        {}
 };
 MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
index 06fb39c..8c3bf3d 100644 (file)
@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
        if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
                return 0;
 
+       if (new_screen_size > (4 << 20))
+               return -EINVAL;
        newscreen = kmalloc(new_screen_size, GFP_USER);
        if (!newscreen)
                return -ENOMEM;
 
+       if (vc == sel_cons)
+               clear_selection();
+
        old_rows = vc->vc_rows;
        old_row_size = vc->vc_size_row;
 
@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
                        break;
                case 3: /* erase scroll-back buffer (and whole display) */
                        scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
-                                   vc->vc_screenbuf_size >> 1);
+                                   vc->vc_screenbuf_size);
                        set_origin(vc);
                        if (con_is_visible(vc))
                                update_screen(vc);
index 96ae695..111b0e0 100644 (file)
@@ -188,6 +188,8 @@ static void host_stop(struct ci_hdrc *ci)
 
        if (hcd) {
                usb_remove_hcd(hcd);
+               ci->role = CI_ROLE_END;
+               synchronize_irq(ci->irq);
                usb_put_hcd(hcd);
                if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
                        (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
index fa9b26b..4c0fa0b 100644 (file)
@@ -463,9 +463,18 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
  */
 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
 {
+       bool ret;
+
        switch (hsotg->dr_mode) {
        case USB_DR_MODE_HOST:
-               dwc2_force_mode(hsotg, true);
+               ret = dwc2_force_mode(hsotg, true);
+               /*
+                * NOTE: This is required for some rockchip soc based
+                * platforms on their host-only dwc2.
+                */
+               if (!ret)
+                       msleep(50);
+
                break;
        case USB_DR_MODE_PERIPHERAL:
                dwc2_force_mode(hsotg, false);
index aad4107..2a21a04 100644 (file)
@@ -259,6 +259,13 @@ enum dwc2_lx_state {
        DWC2_L3,        /* Off state */
 };
 
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+                                          768, 0, 0, 0, 0, 0, 0, 0}
+
 /* Gadget ep0 states */
 enum dwc2_ep0_state {
        DWC2_EP0_SETUP,
index 4cd6403..24fbebc 100644 (file)
@@ -186,10 +186,9 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
  */
 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 {
-       unsigned int fifo;
+       unsigned int ep;
        unsigned int addr;
        int timeout;
-       u32 dptxfsizn;
        u32 val;
 
        /* Reset fifo map if not correctly cleared during previous session */
@@ -217,16 +216,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
         * them to endpoints dynamically according to maxpacket size value of
         * given endpoint.
         */
-       for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {
-               dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));
-
-               val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;
-               addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;
-
-               if (addr > hsotg->fifo_mem)
-                       break;
+       for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+               if (!hsotg->g_tx_fifo_sz[ep])
+                       continue;
+               val = addr;
+               val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
+               WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+                         "insufficient fifo memory");
+               addr += hsotg->g_tx_fifo_sz[ep];
 
-               dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));
+               dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
        }
 
        /*
@@ -3807,10 +3806,36 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
 static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
 {
        struct device_node *np = hsotg->dev->of_node;
+       u32 len = 0;
+       u32 i = 0;
 
        /* Enable dma if requested in device tree */
        hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
 
+       /*
+       * Register TX periodic fifo size per endpoint.
+       * EP0 is excluded since it has no fifo configuration.
+       */
+       if (!of_find_property(np, "g-tx-fifo-size", &len))
+               goto rx_fifo;
+
+       len /= sizeof(u32);
+
+       /* Read tx fifo sizes other than ep0 */
+       if (of_property_read_u32_array(np, "g-tx-fifo-size",
+                                               &hsotg->g_tx_fifo_sz[1], len))
+               goto rx_fifo;
+
+       /* Add ep0 */
+       len++;
+
+       /* Make remaining TX fifos unavailable */
+       if (len < MAX_EPS_CHANNELS) {
+               for (i = len; i < MAX_EPS_CHANNELS; i++)
+                       hsotg->g_tx_fifo_sz[i] = 0;
+       }
+
+rx_fifo:
        /* Register RX fifo size */
        of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
 
@@ -3832,10 +3857,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
        struct device *dev = hsotg->dev;
        int epnum;
        int ret;
+       int i;
+       u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
 
        /* Initialize to legacy fifo configuration values */
        hsotg->g_rx_fifo_sz = 2048;
        hsotg->g_np_g_tx_fifo_sz = 1024;
+       memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
        /* Device tree specific probe */
        dwc2_hsotg_of_probe(hsotg);
 
@@ -3853,6 +3881,9 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
        dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
                                                hsotg->g_np_g_tx_fifo_sz);
        dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
+       for (i = 0; i < MAX_EPS_CHANNELS; i++)
+               dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
+                                               hsotg->g_tx_fifo_sz[i]);
 
        hsotg->gadget.max_speed = USB_SPEED_HIGH;
        hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
index 07cc892..1dfa56a 100644 (file)
@@ -783,6 +783,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
                req->trb = trb;
                req->trb_dma = dwc3_trb_dma_offset(dep, trb);
                req->first_trb_index = dep->trb_enqueue;
+               dep->queued_requests++;
        }
 
        dwc3_ep_inc_enq(dep);
@@ -833,8 +834,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 
        trb->ctrl |= DWC3_TRB_CTRL_HWO;
 
-       dep->queued_requests++;
-
        trace_dwc3_prepare_trb(dep, trb);
 }
 
@@ -1074,9 +1073,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 
        list_add_tail(&req->list, &dep->pending_list);
 
-       if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
-                       dep->flags & DWC3_EP_PENDING_REQUEST) {
-               if (list_empty(&dep->started_list)) {
+       /*
+        * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
+        * wait for a XferNotReady event so we will know what's the current
+        * (micro-)frame number.
+        *
+        * Without this trick, we are very, very likely gonna get Bus Expiry
+        * errors which will force us issue EndTransfer command.
+        */
+       if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+               if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
+                               list_empty(&dep->started_list)) {
                        dwc3_stop_active_transfer(dwc, dep->number, true);
                        dep->flags = DWC3_EP_ENABLED;
                }
@@ -1861,8 +1868,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
        unsigned int            s_pkt = 0;
        unsigned int            trb_status;
 
-       dep->queued_requests--;
        dwc3_ep_inc_deq(dep);
+
+       if (req->trb == trb)
+               dep->queued_requests--;
+
        trace_dwc3_complete_trb(dep, trb);
 
        /*
@@ -2980,7 +2990,7 @@ err3:
        kfree(dwc->setup_buf);
 
 err2:
-       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
                        dwc->ep0_trb, dwc->ep0_trb_addr);
 
 err1:
@@ -3005,7 +3015,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
        kfree(dwc->setup_buf);
        kfree(dwc->zlp_buf);
 
-       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
                        dwc->ep0_trb, dwc->ep0_trb_addr);
 
        dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
index 54ad100..e40d47d 100644 (file)
@@ -136,8 +136,60 @@ struct ffs_epfile {
        /*
         * Buffer for holding data from partial reads which may happen since
         * we’re rounding user read requests to a multiple of a max packet size.
+        *
+        * The pointer is initialised with NULL value and may be set by
+        * __ffs_epfile_read_data function to point to a temporary buffer.
+        *
+        * In normal operation, calls to __ffs_epfile_read_buffered will consume
+        * data from said buffer and eventually free it.  Importantly, while the
+        * function is using the buffer, it sets the pointer to NULL.  This is
+        * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
+        * can never run concurrently (they are synchronised by epfile->mutex)
+        * so the latter will not assign a new value to the pointer.
+        *
+        * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
+        * valid) and sets the pointer to READ_BUFFER_DROP value.  This special
+        * value is crux of the synchronisation between ffs_func_eps_disable and
+        * __ffs_epfile_read_data.
+        *
+        * Once __ffs_epfile_read_data is about to finish it will try to set the
+        * pointer back to its old value (as described above), but seeing as the
+        * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
+        * the buffer.
+        *
+        * == State transitions ==
+        *
+        * â€¢ ptr == NULL:  (initial state)
+        *   â—¦ __ffs_epfile_read_buffer_free: go to ptr == DROP
+        *   â—¦ __ffs_epfile_read_buffered:    nop
+        *   â—¦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
+        *   â—¦ reading finishes:              n/a, not in â€˜and reading’ state
+        * â€¢ ptr == DROP:
+        *   â—¦ __ffs_epfile_read_buffer_free: nop
+        *   â—¦ __ffs_epfile_read_buffered:    go to ptr == NULL
+        *   â—¦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
+        *   â—¦ reading finishes:              n/a, not in â€˜and reading’ state
+        * â€¢ ptr == buf:
+        *   â—¦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
+        *   â—¦ __ffs_epfile_read_buffered:    go to ptr == NULL and reading
+        *   â—¦ __ffs_epfile_read_data:        n/a, __ffs_epfile_read_buffered
+        *                                    is always called first
+        *   â—¦ reading finishes:              n/a, not in â€˜and reading’ state
+        * â€¢ ptr == NULL and reading:
+        *   â—¦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
+        *   â—¦ __ffs_epfile_read_buffered:    n/a, mutex is held
+        *   â—¦ __ffs_epfile_read_data:        n/a, mutex is held
+        *   â—¦ reading finishes and â€¦
+        *     â€¦ all data read:               free buf, go to ptr == NULL
+        *     â€¦ otherwise:                   go to ptr == buf and reading
+        * â€¢ ptr == DROP and reading:
+        *   â—¦ __ffs_epfile_read_buffer_free: nop
+        *   â—¦ __ffs_epfile_read_buffered:    n/a, mutex is held
+        *   â—¦ __ffs_epfile_read_data:        n/a, mutex is held
+        *   â—¦ reading finishes:              free buf, go to ptr == DROP
         */
-       struct ffs_buffer               *read_buffer;   /* P: epfile->mutex */
+       struct ffs_buffer               *read_buffer;
+#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
 
        char                            name[5];
 
@@ -736,25 +788,47 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
        schedule_work(&io_data->work);
 }
 
+static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
+{
+       /*
+        * See comment in struct ffs_epfile for full read_buffer pointer
+        * synchronisation story.
+        */
+       struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
+       if (buf && buf != READ_BUFFER_DROP)
+               kfree(buf);
+}
+
 /* Assumes epfile->mutex is held. */
 static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
                                          struct iov_iter *iter)
 {
-       struct ffs_buffer *buf = epfile->read_buffer;
+       /*
+        * Null out epfile->read_buffer so ffs_func_eps_disable does not free
+        * the buffer while we are using it.  See comment in struct ffs_epfile
+        * for full read_buffer pointer synchronisation story.
+        */
+       struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
        ssize_t ret;
-       if (!buf)
+       if (!buf || buf == READ_BUFFER_DROP)
                return 0;
 
        ret = copy_to_iter(buf->data, buf->length, iter);
        if (buf->length == ret) {
                kfree(buf);
-               epfile->read_buffer = NULL;
-       } else if (unlikely(iov_iter_count(iter))) {
+               return ret;
+       }
+
+       if (unlikely(iov_iter_count(iter))) {
                ret = -EFAULT;
        } else {
                buf->length -= ret;
                buf->data += ret;
        }
+
+       if (cmpxchg(&epfile->read_buffer, NULL, buf))
+               kfree(buf);
+
        return ret;
 }
 
@@ -783,7 +857,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
        buf->length = data_len;
        buf->data = buf->storage;
        memcpy(buf->storage, data + ret, data_len);
-       epfile->read_buffer = buf;
+
+       /*
+        * At this point read_buffer is NULL or READ_BUFFER_DROP (if
+        * ffs_func_eps_disable has been called in the meanwhile).  See comment
+        * in struct ffs_epfile for full read_buffer pointer synchronisation
+        * story.
+        */
+       if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
+               kfree(buf);
 
        return ret;
 }
@@ -1097,8 +1179,7 @@ ffs_epfile_release(struct inode *inode, struct file *file)
 
        ENTER();
 
-       kfree(epfile->read_buffer);
-       epfile->read_buffer = NULL;
+       __ffs_epfile_read_buffer_free(epfile);
        ffs_data_closed(epfile->ffs);
 
        return 0;
@@ -1724,24 +1805,20 @@ static void ffs_func_eps_disable(struct ffs_function *func)
        unsigned count            = func->ffs->eps_count;
        unsigned long flags;
 
+       spin_lock_irqsave(&func->ffs->eps_lock, flags);
        do {
-               if (epfile)
-                       mutex_lock(&epfile->mutex);
-               spin_lock_irqsave(&func->ffs->eps_lock, flags);
                /* pending requests get nuked */
                if (likely(ep->ep))
                        usb_ep_disable(ep->ep);
                ++ep;
-               spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
                if (epfile) {
                        epfile->ep = NULL;
-                       kfree(epfile->read_buffer);
-                       epfile->read_buffer = NULL;
-                       mutex_unlock(&epfile->mutex);
+                       __ffs_epfile_read_buffer_free(epfile);
                        ++epfile;
                }
        } while (--count);
+       spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 }
 
 static int ffs_func_eps_enable(struct ffs_function *func)
index 9c8c9ed..fe18116 100644 (file)
@@ -590,8 +590,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 
        /* throttle high/super speed IRQ rate back slightly */
        if (gadget_is_dualspeed(dev->gadget))
-               req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
-                                    dev->gadget->speed == USB_SPEED_SUPER)
+               req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
+                                      dev->gadget->speed == USB_SPEED_SUPER)) &&
+                                       !list_empty(&dev->tx_reqs))
                        ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
                        : 0;
 
index bb1f6c8..45bc997 100644 (file)
@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
                        dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
                        goto err;
                }
-               ep->ep.name = name;
+               ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
 
                ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
                ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
index 876dca4..a268d9e 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRIVER_DESC "EHCI generic platform driver"
 #define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 3
+#define EHCI_MAX_RSTS 4
 #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
 
 struct ehci_platform_priv {
index 5b5880c..b38a228 100644 (file)
@@ -221,6 +221,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
        ohci->num_ports = board->ports;
        at91_start_hc(pdev);
 
+       /*
+        * The RemoteWakeupConnected bit has to be set explicitly
+        * before calling ohci_run. The reset value of this bit is 0.
+        */
+       ohci->hc_control = OHCI_CTRL_RWC;
+
        retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (retval == 0) {
                device_wakeup_enable(hcd->self.controller);
@@ -677,9 +683,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
         * REVISIT: some boards will be able to turn VBUS off...
         */
        if (!ohci_at91->wakeup) {
-               ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
-               ohci->hc_control &= OHCI_CTRL_RWC;
-               ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
                ohci->rh_state = OHCI_RH_HALTED;
 
                /* flush the writes */
index 1700908..86612ac 100644 (file)
@@ -72,7 +72,7 @@
 static const char      hcd_name [] = "ohci_hcd";
 
 #define        STATECHANGE_DELAY       msecs_to_jiffies(300)
-#define        IO_WATCHDOG_DELAY       msecs_to_jiffies(250)
+#define        IO_WATCHDOG_DELAY       msecs_to_jiffies(275)
 
 #include "ohci.h"
 #include "pci-quirks.h"
index 730b9fd..0ef1690 100644 (file)
@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                xhci_set_link_state(xhci, port_array, wIndex,
                                                        XDEV_RESUME);
                                spin_unlock_irqrestore(&xhci->lock, flags);
-                               msleep(20);
+                               msleep(USB_RESUME_TIMEOUT);
                                spin_lock_irqsave(&xhci->lock, flags);
                                xhci_set_link_state(xhci, port_array, wIndex,
                                                        XDEV_U0);
@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        return 0;
 }
 
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(int port_index,
+                                            __le32 __iomem **port_array)
+{
+       u32 portsc;
+
+       portsc = readl(port_array[port_index]);
+
+       /* if any of these are set we are not stuck */
+       if (portsc & (PORT_CONNECT | PORT_CAS))
+               return false;
+
+       if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+           ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+               return false;
+
+       /* clear wakeup/change bits, and do a warm port reset */
+       portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+       portsc |= PORT_WR;
+       writel(portsc, port_array[port_index]);
+       /* flush write */
+       readl(port_array[port_index]);
+       return true;
+}
+
 int xhci_bus_resume(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                u32 temp;
 
                temp = readl(port_array[port_index]);
+
+               /* warm reset CAS limited ports stuck in polling/compliance */
+               if ((xhci->quirks & XHCI_MISSING_CAS) &&
+                   (hcd->speed >= HCD_USB3) &&
+                   xhci_port_missing_cas_quirk(port_index, port_array)) {
+                       xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+                       continue;
+               }
                if (DEV_SUPERSPEED_ANY(temp))
                        temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
                else
@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
 
        if (need_usb2_u3_exit) {
                spin_unlock_irqrestore(&xhci->lock, flags);
-               msleep(20);
+               msleep(USB_RESUME_TIMEOUT);
                spin_lock_irqsave(&xhci->lock, flags);
        }
 
index d7b0f97..e96ae80 100644 (file)
 
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI     0x8c31
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI       0x9cb1
 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI            0x22b5
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI             0x0aa8
 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI             0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI                   0x5aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-               pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+               (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
                xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
        }
@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
                xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+               xhci->quirks |= XHCI_MISSING_CAS;
+
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_EJ168) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
index b2c1dc5..f945380 100644 (file)
@@ -314,6 +314,8 @@ struct xhci_op_regs {
 #define XDEV_U2                (0x2 << 5)
 #define XDEV_U3                (0x3 << 5)
 #define XDEV_INACTIVE  (0x6 << 5)
+#define XDEV_POLLING   (0x7 << 5)
+#define XDEV_COMP_MODE  (0xa << 5)
 #define XDEV_RESUME    (0xf << 5)
 /* true: port has power (see HCC_PPC) */
 #define PORT_POWER     (1 << 9)
@@ -1653,6 +1655,7 @@ struct xhci_hcd {
 #define XHCI_MTK_HOST          (1 << 21)
 #define XHCI_SSIC_PORT_UNUSED  (1 << 22)
 #define XHCI_NO_64BIT_SUPPORT  (1 << 23)
+#define XHCI_MISSING_CAS       (1 << 24)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index bff4869..4042ea0 100644 (file)
@@ -1255,6 +1255,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
 
        map_dma_buffer(request, musb, musb_ep);
 
+       pm_runtime_get_sync(musb->controller);
        spin_lock_irqsave(&musb->lock, lockflags);
 
        /* don't queue if the ep is down */
@@ -1275,6 +1276,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
 
 unlock:
        spin_unlock_irqrestore(&musb->lock, lockflags);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
+
        return status;
 }
 
index 1ab6973..cc12254 100644 (file)
@@ -287,6 +287,7 @@ static int omap2430_musb_init(struct musb *musb)
        }
        musb->isr = omap2430_musb_interrupt;
        phy_init(musb->phy);
+       phy_power_on(musb->phy);
 
        l = musb_readl(musb->mregs, OTG_INTERFSEL);
 
@@ -323,8 +324,6 @@ static void omap2430_musb_enable(struct musb *musb)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
 
-       if (!WARN_ON(!musb->phy))
-               phy_power_on(musb->phy);
 
        switch (glue->status) {
 
@@ -361,9 +360,6 @@ static void omap2430_musb_disable(struct musb *musb)
        struct device *dev = musb->controller;
        struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
-       if (!WARN_ON(!musb->phy))
-               phy_power_off(musb->phy);
-
        if (glue->status != MUSB_UNKNOWN)
                omap_control_usb_set_mode(glue->control_otghs,
                        USB_MODE_DISCONNECT);
@@ -375,6 +371,7 @@ static int omap2430_musb_exit(struct musb *musb)
        struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
        omap2430_low_level_exit(musb);
+       phy_power_off(musb->phy);
        phy_exit(musb->phy);
        musb->phy = NULL;
        cancel_work_sync(&glue->omap_musb_mailbox_work);
index 1d70add..d544b33 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 
+#include <linux/delay.h>
 #include <linux/io.h>
 #include "common.h"
 #include "rcar3.h"
@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
 
        usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
 
-       if (enable)
+       if (enable) {
                usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
-       else
+               /* The controller on R-Car Gen3 needs to wait up to 45 usec */
+               udelay(45);
+       } else {
                usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+       }
 
        return 0;
 }
index 54a4de0..f61477b 100644 (file)
@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
        u8 control;
        int result;
 
-       cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+       result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+       if (result)
+               return result;
 
        result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
                |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
index b2d767e..0ff7f38 100644 (file)
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
        /* ekey Devices */
        { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
        /* Infineon Devices */
-       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
        /* GE Healthcare devices */
        { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
        /* Active Research (Actisense) devices */
index f87a938..21011c0 100644 (file)
 /*
  * Infineon Technologies
  */
-#define INFINEON_VID           0x058b
-#define INFINEON_TRIBOARD_PID  0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID                   0x058b
+#define INFINEON_TRIBOARD_TC1798_PID   0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID    0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
 
 /*
  * Acton Research Corp.
index d213cf4..4a037b4 100644 (file)
@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
 
        serial->disconnected = 0;
 
-       usb_serial_console_init(serial->port[0]->minor);
+       if (num_ports > 0)
+               usb_serial_console_init(serial->port[0]->minor);
 exit:
        module_put(type->driver.owner);
        return 0;
index 79b2b62..79451f7 100644 (file)
@@ -133,6 +133,13 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
                bo[itr] = bi1[itr] ^ bi2[itr];
 }
 
+/* Scratch space for MAC calculations. */
+struct wusb_mac_scratch {
+       struct aes_ccm_b0 b0;
+       struct aes_ccm_b1 b1;
+       struct aes_ccm_a ax;
+};
+
 /*
  * CC-MAC function WUSB1.0[6.5]
  *
@@ -197,16 +204,15 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
  *       what sg[4] is for. Maybe there is a smarter way to do this.
  */
 static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
-                       struct crypto_cipher *tfm_aes, void *mic,
+                       struct crypto_cipher *tfm_aes,
+                       struct wusb_mac_scratch *scratch,
+                       void *mic,
                        const struct aes_ccm_nonce *n,
                        const struct aes_ccm_label *a, const void *b,
                        size_t blen)
 {
        int result = 0;
        SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
-       struct aes_ccm_b0 b0;
-       struct aes_ccm_b1 b1;
-       struct aes_ccm_a ax;
        struct scatterlist sg[4], sg_dst;
        void *dst_buf;
        size_t dst_size;
@@ -218,16 +224,17 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
         * These checks should be compile time optimized out
         * ensure @a fills b1's mac_header and following fields
         */
-       WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
-       WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
-       WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
-       WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+       WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
+       WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
+       WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
+       WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
 
        result = -ENOMEM;
        zero_padding = blen % sizeof(struct aes_ccm_block);
        if (zero_padding)
                zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
-       dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+       dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) +
+               zero_padding;
        dst_buf = kzalloc(dst_size, GFP_KERNEL);
        if (!dst_buf)
                goto error_dst_buf;
@@ -235,9 +242,9 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
        memset(iv, 0, sizeof(iv));
 
        /* Setup B0 */
-       b0.flags = 0x59;        /* Format B0 */
-       b0.ccm_nonce = *n;
-       b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+       scratch->b0.flags = 0x59;       /* Format B0 */
+       scratch->b0.ccm_nonce = *n;
+       scratch->b0.lm = cpu_to_be16(0);        /* WUSB1.0[6.5] sez l(m) is 0 */
 
        /* Setup B1
         *
@@ -246,12 +253,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
         * 14'--after clarification, it means to use A's contents
         * for MAC Header, EO, sec reserved and padding.
         */
-       b1.la = cpu_to_be16(blen + 14);
-       memcpy(&b1.mac_header, a, sizeof(*a));
+       scratch->b1.la = cpu_to_be16(blen + 14);
+       memcpy(&scratch->b1.mac_header, a, sizeof(*a));
 
        sg_init_table(sg, ARRAY_SIZE(sg));
-       sg_set_buf(&sg[0], &b0, sizeof(b0));
-       sg_set_buf(&sg[1], &b1, sizeof(b1));
+       sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0));
+       sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
        sg_set_buf(&sg[2], b, blen);
        /* 0 if well behaved :) */
        sg_set_buf(&sg[3], bzero, zero_padding);
@@ -276,11 +283,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
         * POS Crypto API: size is assumed to be AES's block size.
         * Thanks for documenting it -- tip taken from airo.c
         */
-       ax.flags = 0x01;                /* as per WUSB 1.0 spec */
-       ax.ccm_nonce = *n;
-       ax.counter = 0;
-       crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
-       bytewise_xor(mic, &ax, iv, 8);
+       scratch->ax.flags = 0x01;               /* as per WUSB 1.0 spec */
+       scratch->ax.ccm_nonce = *n;
+       scratch->ax.counter = 0;
+       crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax,
+                                 (void *)&scratch->ax);
+       bytewise_xor(mic, &scratch->ax, iv, 8);
        result = 8;
 error_cbc_crypt:
        kfree(dst_buf);
@@ -303,6 +311,7 @@ ssize_t wusb_prf(void *out, size_t out_size,
        struct aes_ccm_nonce n = *_n;
        struct crypto_skcipher *tfm_cbc;
        struct crypto_cipher *tfm_aes;
+       struct wusb_mac_scratch *scratch;
        u64 sfn = 0;
        __le64 sfn_le;
 
@@ -329,17 +338,25 @@ ssize_t wusb_prf(void *out, size_t out_size,
                printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
                goto error_setkey_aes;
        }
+       scratch = kmalloc(sizeof(*scratch), GFP_KERNEL);
+       if (!scratch) {
+               result = -ENOMEM;
+               goto error_alloc_scratch;
+       }
 
        for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
                sfn_le = cpu_to_le64(sfn++);
                memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
-               result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+               result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes,
                                      &n, a, b, blen);
                if (result < 0)
                        goto error_ccm_mac;
                bytes += result;
        }
        result = bytes;
+
+       kfree(scratch);
+error_alloc_scratch:
 error_ccm_mac:
 error_setkey_aes:
        crypto_free_cipher(tfm_aes);
index 3b1ca44..a2564ab 100644 (file)
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
        if (!pages)
                return -ENOMEM;
 
-       ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
-                       0, pages);
+       ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
+                       FOLL_WRITE);
 
        if (ret < nr_pages) {
                nr_pages = ret;
index 60bdad3..150ce2a 100644 (file)
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
        /* Get the physical addresses of the source buffer */
        down_read(&current->mm->mmap_sem);
        num_pinned = get_user_pages(param.local_vaddr - lb_offset,
-               num_pages, (param.source == -1) ? READ : WRITE,
-               0, pages, NULL);
+               num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
+               pages, NULL);
        up_read(&current->mm->mmap_sem);
 
        if (num_pinned != num_pages) {
index 15b6407..bdbadaa 100644 (file)
@@ -156,12 +156,16 @@ size_t vme_get_size(struct vme_resource *resource)
        case VME_MASTER:
                retval = vme_master_get(resource, &enabled, &base, &size,
                        &aspace, &cycle, &dwidth);
+               if (retval)
+                       return 0;
 
                return size;
                break;
        case VME_SLAVE:
                retval = vme_slave_get(resource, &enabled, &base, &size,
                        &buf_base, &aspace, &cycle);
+               if (retval)
+                       return 0;
 
                return size;
                break;
index e473e3b..6d1fbda 100644 (file)
@@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev)
                ret = wdat_wdt_enable_reboot(wdat);
                if (ret)
                        return ret;
+
+               ret = wdat_wdt_ping(&wdat->wdd);
+               if (ret)
+                       return ret;
        }
 
        return wdat_wdt_start(&wdat->wdd);
index e12bd36..26e5e85 100644 (file)
@@ -168,7 +168,9 @@ out:
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
 
 struct shutdown_handler {
-       const char *command;
+#define SHUTDOWN_CMD_SIZE 11
+       const char command[SHUTDOWN_CMD_SIZE];
+       bool flag;
        void (*cb)(void);
 };
 
@@ -206,22 +208,22 @@ static void do_reboot(void)
        ctrl_alt_del();
 }
 
+static struct shutdown_handler shutdown_handlers[] = {
+       { "poweroff",   true,   do_poweroff },
+       { "halt",       false,  do_poweroff },
+       { "reboot",     true,   do_reboot   },
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+       { "suspend",    true,   do_suspend  },
+#endif
+};
+
 static void shutdown_handler(struct xenbus_watch *watch,
                             const char **vec, unsigned int len)
 {
        char *str;
        struct xenbus_transaction xbt;
        int err;
-       static struct shutdown_handler handlers[] = {
-               { "poweroff",   do_poweroff },
-               { "halt",       do_poweroff },
-               { "reboot",     do_reboot   },
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-               { "suspend",    do_suspend  },
-#endif
-               {NULL, NULL},
-       };
-       static struct shutdown_handler *handler;
+       int idx;
 
        if (shutting_down != SHUTDOWN_INVALID)
                return;
@@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch,
                return;
        }
 
-       for (handler = &handlers[0]; handler->command; handler++) {
-               if (strcmp(str, handler->command) == 0)
+       for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+               if (strcmp(str, shutdown_handlers[idx].command) == 0)
                        break;
        }
 
        /* Only acknowledge commands which we are prepared to handle. */
-       if (handler->cb)
+       if (idx < ARRAY_SIZE(shutdown_handlers))
                xenbus_write(xbt, "control", "shutdown", "");
 
        err = xenbus_transaction_end(xbt, 0);
@@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
                goto again;
        }
 
-       if (handler->cb) {
-               handler->cb();
+       if (idx < ARRAY_SIZE(shutdown_handlers)) {
+               shutdown_handlers[idx].cb();
        } else {
                pr_info("Ignoring shutdown request: %s\n", str);
                shutting_down = SHUTDOWN_INVALID;
@@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = {
 static int setup_shutdown_watcher(void)
 {
        int err;
+       int idx;
+#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-"))
+       char node[FEATURE_PATH_SIZE];
 
        err = register_xenbus_watch(&shutdown_watch);
        if (err) {
@@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void)
        }
 #endif
 
+       for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+               if (!shutdown_handlers[idx].flag)
+                       continue;
+               snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
+                        shutdown_handlers[idx].command);
+               xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+       }
+
        return 0;
 }
 
index c1010f0..1e8be12 100644 (file)
@@ -364,7 +364,7 @@ out:
 
 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
 {
-       struct watch_adapter *watch, *tmp_watch;
+       struct watch_adapter *watch;
        char *path, *token;
        int err, rc;
        LIST_HEAD(staging_q);
@@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
                }
                list_add(&watch->list, &u->watches);
        } else {
-               list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+               list_for_each_entry(watch, &u->watches, list) {
                        if (!strcmp(watch->token, token) &&
                            !strcmp(watch->watch.node, path)) {
                                unregister_xenbus_watch(&watch->watch);
index 611a231..6d40a97 100644 (file)
@@ -335,7 +335,9 @@ static int backend_state;
 static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
                                        const char **v, unsigned int l)
 {
-       xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+       if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
+                        &backend_state) != 1)
+               backend_state = XenbusStateUnknown;
        printk(KERN_DEBUG "XENBUS: backend %s %s\n",
                        v[XS_WATCH_PATH], xenbus_strstate(backend_state));
        wake_up(&backend_state_wq);
index ccc70d9..d4d8b7e 100644 (file)
@@ -698,7 +698,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
                        ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
                        if (ret) {
-                               bio->bi_error = ret;
+                               comp_bio->bi_error = ret;
                                bio_endio(comp_bio);
                        }
 
@@ -728,7 +728,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
        if (ret) {
-               bio->bi_error = ret;
+               comp_bio->bi_error = ret;
                bio_endio(comp_bio);
        }
 
index 01bc36c..71261b4 100644 (file)
@@ -5805,6 +5805,64 @@ static int changed_extent(struct send_ctx *sctx,
        int ret = 0;
 
        if (sctx->cur_ino != sctx->cmp_key->objectid) {
+
+               if (result == BTRFS_COMPARE_TREE_CHANGED) {
+                       struct extent_buffer *leaf_l;
+                       struct extent_buffer *leaf_r;
+                       struct btrfs_file_extent_item *ei_l;
+                       struct btrfs_file_extent_item *ei_r;
+
+                       leaf_l = sctx->left_path->nodes[0];
+                       leaf_r = sctx->right_path->nodes[0];
+                       ei_l = btrfs_item_ptr(leaf_l,
+                                             sctx->left_path->slots[0],
+                                             struct btrfs_file_extent_item);
+                       ei_r = btrfs_item_ptr(leaf_r,
+                                             sctx->right_path->slots[0],
+                                             struct btrfs_file_extent_item);
+
+                       /*
+                        * We may have found an extent item that has changed
+                        * only its disk_bytenr field and the corresponding
+                        * inode item was not updated. This case happens due to
+                        * very specific timings during relocation when a leaf
+                        * that contains file extent items is COWed while
+                        * relocation is ongoing and its in the stage where it
+                        * updates data pointers. So when this happens we can
+                        * safely ignore it since we know it's the same extent,
+                        * but just at different logical and physical locations
+                        * (when an extent is fully replaced with a new one, we
+                        * know the generation number must have changed too,
+                        * since snapshot creation implies committing the current
+                        * transaction, and the inode item must have been updated
+                        * as well).
+                        * This replacement of the disk_bytenr happens at
+                        * relocation.c:replace_file_extents() through
+                        * relocation.c:btrfs_reloc_cow_block().
+                        */
+                       if (btrfs_file_extent_generation(leaf_l, ei_l) ==
+                           btrfs_file_extent_generation(leaf_r, ei_r) &&
+                           btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
+                           btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
+                           btrfs_file_extent_compression(leaf_l, ei_l) ==
+                           btrfs_file_extent_compression(leaf_r, ei_r) &&
+                           btrfs_file_extent_encryption(leaf_l, ei_l) ==
+                           btrfs_file_extent_encryption(leaf_r, ei_r) &&
+                           btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
+                           btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
+                           btrfs_file_extent_type(leaf_l, ei_l) ==
+                           btrfs_file_extent_type(leaf_r, ei_r) &&
+                           btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
+                           btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
+                           btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
+                           btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
+                           btrfs_file_extent_offset(leaf_l, ei_l) ==
+                           btrfs_file_extent_offset(leaf_r, ei_r) &&
+                           btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
+                           btrfs_file_extent_num_bytes(leaf_r, ei_r))
+                               return 0;
+               }
+
                inconsistent_snapshot_error(sctx, result, "extent");
                return -EIO;
        }
index 528cae1..3d33c4e 100644 (file)
@@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
                                             int index, int error)
 {
        struct btrfs_log_ctx *ctx;
+       struct btrfs_log_ctx *safe;
 
-       if (!error) {
-               INIT_LIST_HEAD(&root->log_ctxs[index]);
-               return;
-       }
-
-       list_for_each_entry(ctx, &root->log_ctxs[index], list)
+       list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
+               list_del_init(&ctx->list);
                ctx->log_ret = error;
+       }
 
        INIT_LIST_HEAD(&root->log_ctxs[index]);
 }
@@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        mutex_unlock(&root->log_mutex);
 
 out_wake_log_root:
-       /*
-        * We needn't get log_mutex here because we are sure all
-        * the other tasks are blocked.
-        */
+       mutex_lock(&log_root_tree->log_mutex);
        btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
 
-       mutex_lock(&log_root_tree->log_mutex);
        log_root_tree->log_transid_committed++;
        atomic_set(&log_root_tree->log_commit[index2], 0);
        mutex_unlock(&log_root_tree->log_mutex);
@@ -2978,10 +2972,8 @@ out_wake_log_root:
        if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
                wake_up(&log_root_tree->log_commit_wait[index2]);
 out:
-       /* See above. */
-       btrfs_remove_all_log_ctxs(root, index1, ret);
-
        mutex_lock(&root->log_mutex);
+       btrfs_remove_all_log_ctxs(root, index1, ret);
        root->log_transid_committed++;
        atomic_set(&root->log_commit[index1], 0);
        mutex_unlock(&root->log_mutex);
index 7bf0882..18630e8 100644 (file)
@@ -1272,7 +1272,8 @@ again:
                statret = __ceph_do_getattr(inode, page,
                                            CEPH_STAT_CAP_INLINE_DATA, !!page);
                if (statret < 0) {
-                        __free_page(page);
+                       if (page)
+                               __free_page(page);
                        if (statret == -ENODATA) {
                                BUG_ON(retry_op != READ_INLINE);
                                goto again;
index bca1b49..ef4d046 100644 (file)
@@ -1511,7 +1511,8 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
                        ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
        }
 
-       if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
+       if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
+           !(rinfo->hash_order && req->r_path2)) {
                /* note dir version at start of readdir so we can tell
                 * if any dentries get dropped */
                req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
index a29ffce..b382e59 100644 (file)
@@ -845,6 +845,8 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
                err = ceph_fs_debugfs_init(fsc);
                if (err < 0)
                        goto fail;
+       } else {
+               root = dget(fsc->sb->s_root);
        }
 
        fsc->mount_state = CEPH_MOUNT_MOUNTED;
index 40b7032..febc28f 100644 (file)
@@ -16,7 +16,7 @@
 static int __remove_xattr(struct ceph_inode_info *ci,
                          struct ceph_inode_xattr *xattr);
 
-const struct xattr_handler ceph_other_xattr_handler;
+static const struct xattr_handler ceph_other_xattr_handler;
 
 /*
  * List of handlers for synthetic system.* attributes. Other
@@ -1086,7 +1086,7 @@ static int ceph_set_xattr_handler(const struct xattr_handler *handler,
        return __ceph_setxattr(inode, name, value, size, flags);
 }
 
-const struct xattr_handler ceph_other_xattr_handler = {
+static const struct xattr_handler ceph_other_xattr_handler = {
        .prefix = "",  /* match any name => handlers called with full name */
        .get = ceph_get_xattr_handler,
        .set = ceph_set_xattr_handler,
index 61057b7..98f87fe 100644 (file)
@@ -151,7 +151,10 @@ static int do_page_crypto(struct inode *inode,
                        struct page *src_page, struct page *dest_page,
                        gfp_t gfp_flags)
 {
-       u8 xts_tweak[FS_XTS_TWEAK_SIZE];
+       struct {
+               __le64 index;
+               u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
+       } xts_tweak;
        struct skcipher_request *req = NULL;
        DECLARE_FS_COMPLETION_RESULT(ecr);
        struct scatterlist dst, src;
@@ -171,17 +174,15 @@ static int do_page_crypto(struct inode *inode,
                req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                page_crypt_complete, &ecr);
 
-       BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
-       memcpy(xts_tweak, &index, sizeof(index));
-       memset(&xts_tweak[sizeof(index)], 0,
-                       FS_XTS_TWEAK_SIZE - sizeof(index));
+       BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
+       xts_tweak.index = cpu_to_le64(index);
+       memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
 
        sg_init_table(&dst, 1);
        sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
        sg_init_table(&src, 1);
        sg_set_page(&src, src_page, PAGE_SIZE, 0);
-       skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
-                                       xts_tweak);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
        if (rw == FS_DECRYPT)
                res = crypto_skcipher_decrypt(req);
        else
index ed115ac..6865663 100644 (file)
@@ -109,6 +109,8 @@ int fscrypt_process_policy(struct file *filp,
        if (ret)
                return ret;
 
+       inode_lock(inode);
+
        if (!inode_has_encryption_context(inode)) {
                if (!S_ISDIR(inode->i_mode))
                        ret = -EINVAL;
@@ -127,6 +129,8 @@ int fscrypt_process_policy(struct file *filp,
                ret = -EINVAL;
        }
 
+       inode_unlock(inode);
+
        mnt_drop_write_file(filp);
        return ret;
 }
index 6fcfb3f..4e497b9 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 {
        struct page *page;
        int ret;
+       unsigned int gup_flags = FOLL_FORCE;
 
 #ifdef CONFIG_STACK_GROWSUP
        if (write) {
@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                        return NULL;
        }
 #endif
+
+       if (write)
+               gup_flags |= FOLL_WRITE;
+
        /*
         * We are doing an exec().  'current' is the process
         * doing the exec and bprm->mm is the new process's mm.
         */
-       ret = get_user_pages_remote(current, bprm->mm, pos, 1, write,
-                       1, &page, NULL);
+       ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
+                       &page, NULL);
        if (ret <= 0)
                return NULL;
 
index 7910165..42f9a0a 100644 (file)
@@ -137,7 +137,7 @@ Espan:
 bad_entry:
        EXOFS_ERR(
                "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
-               "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
+               "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
                dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
                _LLU(le64_to_cpu(p->inode_no)),
                rec_len, p->name_len);
index d831e24..41b8b44 100644 (file)
@@ -622,7 +622,7 @@ static int ext2_get_blocks(struct inode *inode,
                           u32 *bno, bool *new, bool *boundary,
                           int create)
 {
-       int err = -EIO;
+       int err;
        int offsets[4];
        Indirect chain[4];
        Indirect *partial;
@@ -639,7 +639,7 @@ static int ext2_get_blocks(struct inode *inode,
        depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 
        if (depth == 0)
-               return (err);
+               return -EIO;
 
        partial = ext2_get_branch(inode, depth, offsets, chain, &err);
        /* Simplest case - block found, no allocation needed */
@@ -761,7 +761,6 @@ static int ext2_get_blocks(struct inode *inode,
        ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
        mutex_unlock(&ei->truncate_mutex);
 got_it:
-       *bno = le32_to_cpu(chain[depth-1].key);
        if (count > blocks_to_boundary)
                *boundary = true;
        err = count;
@@ -772,6 +771,8 @@ cleanup:
                brelse(partial->bh);
                partial--;
        }
+       if (err > 0)
+               *bno = le32_to_cpu(chain[depth-1].key);
        return err;
 }
 
index 02ddec6..fdb1954 100644 (file)
@@ -128,12 +128,12 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
        node = rb_first(&sbi->system_blks);
        while (node) {
                entry = rb_entry(node, struct ext4_system_zone, node);
-               printk("%s%llu-%llu", first ? "" : ", ",
+               printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
                       entry->start_blk, entry->start_blk + entry->count - 1);
                first = 0;
                node = rb_next(node);
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int ext4_setup_system_zone(struct super_block *sb)
index 3ef1df6..1aba469 100644 (file)
 #ifdef CONFIG_EXT4_DEBUG
 extern ushort ext4_mballoc_debug;
 
-#define mb_debug(n, fmt, a...)                                         \
-       do {                                                            \
-               if ((n) <= ext4_mballoc_debug) {                        \
-                       printk(KERN_DEBUG "(%s, %d): %s: ",             \
-                              __FILE__, __LINE__, __func__);           \
-                       printk(fmt, ## a);                              \
-               }                                                       \
-       } while (0)
+#define mb_debug(n, fmt, ...)                                          \
+do {                                                                   \
+       if ((n) <= ext4_mballoc_debug) {                                \
+               printk(KERN_DEBUG "(%s, %d): %s: " fmt,                 \
+                      __FILE__, __LINE__, __func__, ##__VA_ARGS__);    \
+       }                                                               \
+} while (0)
 #else
-#define mb_debug(n, fmt, a...)         no_printk(fmt, ## a)
+#define mb_debug(n, fmt, ...)  no_printk(fmt, ##__VA_ARGS__)
 #endif
 
 #define EXT4_MB_HISTORY_ALLOC          1       /* allocation */
index f92f10d..104f8bf 100644 (file)
@@ -577,12 +577,13 @@ static inline unsigned dx_node_limit(struct inode *dir)
 static void dx_show_index(char * label, struct dx_entry *entries)
 {
        int i, n = dx_get_count (entries);
-       printk(KERN_DEBUG "%s index ", label);
+       printk(KERN_DEBUG "%s index", label);
        for (i = 0; i < n; i++) {
-               printk("%x->%lu ", i ? dx_get_hash(entries + i) :
-                               0, (unsigned long)dx_get_block(entries + i));
+               printk(KERN_CONT " %x->%lu",
+                      i ? dx_get_hash(entries + i) : 0,
+                      (unsigned long)dx_get_block(entries + i));
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 struct stats
@@ -679,7 +680,7 @@ static struct stats dx_show_leaf(struct inode *dir,
                }
                de = ext4_next_entry(de, size);
        }
-       printk("(%i)\n", names);
+       printk(KERN_CONT "(%i)\n", names);
        return (struct stats) { names, space, 1 };
 }
 
@@ -798,7 +799,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
                q = entries + count - 1;
                while (p <= q) {
                        m = p + (q - p) / 2;
-                       dxtrace(printk("."));
+                       dxtrace(printk(KERN_CONT "."));
                        if (dx_get_hash(m) > hash)
                                q = m - 1;
                        else
@@ -810,7 +811,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
                        at = entries;
                        while (n--)
                        {
-                               dxtrace(printk(","));
+                               dxtrace(printk(KERN_CONT ","));
                                if (dx_get_hash(++at) > hash)
                                {
                                        at--;
@@ -821,7 +822,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
                }
 
                at = p - 1;
-               dxtrace(printk(" %x->%u\n", at == entries ? 0 : dx_get_hash(at),
+               dxtrace(printk(KERN_CONT " %x->%u\n",
+                              at == entries ? 0 : dx_get_hash(at),
                               dx_get_block(at)));
                frame->entries = entries;
                frame->at = at;
index 6db81fb..20da99d 100644 (file)
@@ -597,14 +597,15 @@ void __ext4_std_error(struct super_block *sb, const char *function,
 void __ext4_abort(struct super_block *sb, const char *function,
                unsigned int line, const char *fmt, ...)
 {
+       struct va_format vaf;
        va_list args;
 
        save_error_info(sb, function, line);
        va_start(args, fmt);
-       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
-              function, line);
-       vprintk(fmt, args);
-       printk("\n");
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
+              sb->s_id, function, line, &vaf);
        va_end(args);
 
        if ((sb->s_flags & MS_RDONLY) == 0) {
@@ -2715,12 +2716,12 @@ static void print_daily_error_info(unsigned long arg)
                       es->s_first_error_func,
                       le32_to_cpu(es->s_first_error_line));
                if (es->s_first_error_ino)
-                       printk(": inode %u",
+                       printk(KERN_CONT ": inode %u",
                               le32_to_cpu(es->s_first_error_ino));
                if (es->s_first_error_block)
-                       printk(": block %llu", (unsigned long long)
+                       printk(KERN_CONT ": block %llu", (unsigned long long)
                               le64_to_cpu(es->s_first_error_block));
-               printk("\n");
+               printk(KERN_CONT "\n");
        }
        if (es->s_last_error_time) {
                printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
@@ -2729,12 +2730,12 @@ static void print_daily_error_info(unsigned long arg)
                       es->s_last_error_func,
                       le32_to_cpu(es->s_last_error_line));
                if (es->s_last_error_ino)
-                       printk(": inode %u",
+                       printk(KERN_CONT ": inode %u",
                               le32_to_cpu(es->s_last_error_ino));
                if (es->s_last_error_block)
-                       printk(": block %llu", (unsigned long long)
+                       printk(KERN_CONT ": block %llu", (unsigned long long)
                               le64_to_cpu(es->s_last_error_block));
-               printk("\n");
+               printk(KERN_CONT "\n");
        }
        mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
 }
index 73bcfd4..42145be 100644 (file)
@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
 EXT4_ATTR_FEATURE(lazy_itable_init);
 EXT4_ATTR_FEATURE(batched_discard);
 EXT4_ATTR_FEATURE(meta_bg_resize);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
 EXT4_ATTR_FEATURE(encryption);
+#endif
 EXT4_ATTR_FEATURE(metadata_csum_seed);
 
 static struct attribute *ext4_feat_attrs[] = {
        ATTR_LIST(lazy_itable_init),
        ATTR_LIST(batched_discard),
        ATTR_LIST(meta_bg_resize),
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
        ATTR_LIST(encryption),
+#endif
        ATTR_LIST(metadata_csum_seed),
        NULL,
 };
index c15d633..d77be9e 100644 (file)
 #include "acl.h"
 
 #ifdef EXT4_XATTR_DEBUG
-# define ea_idebug(inode, f...) do { \
-               printk(KERN_DEBUG "inode %s:%lu: ", \
-                       inode->i_sb->s_id, inode->i_ino); \
-               printk(f); \
-               printk("\n"); \
-       } while (0)
-# define ea_bdebug(bh, f...) do { \
-               printk(KERN_DEBUG "block %pg:%lu: ",               \
-                      bh->b_bdev, (unsigned long) bh->b_blocknr); \
-               printk(f); \
-               printk("\n"); \
-       } while (0)
+# define ea_idebug(inode, fmt, ...)                                    \
+       printk(KERN_DEBUG "inode %s:%lu: " fmt "\n",                    \
+              inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
+# define ea_bdebug(bh, fmt, ...)                                       \
+       printk(KERN_DEBUG "block %pg:%lu: " fmt "\n",                   \
+              bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
 #else
 # define ea_idebug(inode, fmt, ...)    no_printk(fmt, ##__VA_ARGS__)
 # define ea_bdebug(bh, fmt, ...)       no_printk(fmt, ##__VA_ARGS__)
@@ -241,7 +235,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
        int error = -EFSCORRUPTED;
 
        if (((void *) header >= end) ||
-           (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
+           (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
                goto errout;
        error = ext4_xattr_check_names(entry, end, entry);
 errout:
index 93985c6..6f14ee9 100644 (file)
@@ -852,16 +852,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 
        for (segno = start_segno; segno < end_segno; segno++) {
 
-               if (get_valid_blocks(sbi, segno, 1) == 0 ||
-                                       unlikely(f2fs_cp_error(sbi)))
-                       goto next;
-
                /* find segment summary of victim */
                sum_page = find_get_page(META_MAPPING(sbi),
                                        GET_SUM_BLOCK(sbi, segno));
-               f2fs_bug_on(sbi, !PageUptodate(sum_page));
                f2fs_put_page(sum_page, 0);
 
+               if (get_valid_blocks(sbi, segno, 1) == 0 ||
+                               !PageUptodate(sum_page) ||
+                               unlikely(f2fs_cp_error(sbi)))
+                       goto next;
+
                sum = page_address(sum_page);
                f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
 
index 013d1d3..a8ee8c3 100644 (file)
@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
        struct page *page = data;
        int ret;
 
-       ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length,
-                       NULL, iomap);
+       ret = __block_write_begin_int(page, pos, length, NULL, iomap);
        if (ret)
                return ret;
 
@@ -561,7 +560,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
        }
 
        while (len > 0) {
-               ret = iomap_apply(inode, start, len, 0, ops, &ctx,
+               ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
                                iomap_fiemap_actor);
                /* inode with no (attribute) mapping will give ENOENT */
                if (ret == -ENOENT)
index ad0c745..871c8b3 100644 (file)
@@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
        pri_bh = NULL;
 
 root_found:
+       /* We don't support read-write mounts */
+       if (!(s->s_flags & MS_RDONLY)) {
+               error = -EACCES;
+               goto out_freebh;
+       }
 
        if (joliet_level && (pri == NULL || !opt.rock)) {
                /* This is the case of Joliet with the norock mount flag.
@@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb,
 static struct dentry *isofs_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
-       /* We don't support read-write mounts */
-       if (!(flags & MS_RDONLY))
-               return ERR_PTR(-EACCES);
        return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
 }
 
index 3d8246a..e165266 100644 (file)
@@ -1149,6 +1149,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
                spin_lock(&journal->j_list_lock);
                __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+               spin_unlock(&journal->j_list_lock);
        } else if (jh->b_transaction == journal->j_committing_transaction) {
                /* first access by this transaction */
                jh->b_modified = 0;
@@ -1156,8 +1157,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
                JBUFFER_TRACE(jh, "set next transaction");
                spin_lock(&journal->j_list_lock);
                jh->b_next_transaction = transaction;
+               spin_unlock(&journal->j_list_lock);
        }
-       spin_unlock(&journal->j_list_lock);
        jbd_unlock_bh_state(bh);
 
        /*
index 2bcb86e..78219d5 100644 (file)
@@ -911,6 +911,7 @@ const struct file_operations kernfs_file_fops = {
        .open           = kernfs_fop_open,
        .release        = kernfs_fop_release,
        .poll           = kernfs_fop_poll,
+       .fsync          = noop_fsync,
 };
 
 /**
index ce93b41..22c5b4a 100644 (file)
@@ -1609,6 +1609,7 @@ int fcntl_getlease(struct file *filp)
 
        ctx = smp_load_acquire(&inode->i_flctx);
        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+               percpu_down_read_preempt_disable(&file_rwsem);
                spin_lock(&ctx->flc_lock);
                time_out_leases(inode, &dispose);
                list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@@ -1618,6 +1619,8 @@ int fcntl_getlease(struct file *filp)
                        break;
                }
                spin_unlock(&ctx->flc_lock);
+               percpu_up_read_preempt_enable(&file_rwsem);
+
                locks_dispose_list(&dispose);
        }
        return type;
@@ -2529,11 +2532,14 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
        if (list_empty(&ctx->flc_lease))
                return;
 
+       percpu_down_read_preempt_disable(&file_rwsem);
        spin_lock(&ctx->flc_lock);
        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
                if (filp == fl->fl_file)
                        lease_modify(fl, F_UNLCK, &dispose);
        spin_unlock(&ctx->flc_lock);
+       percpu_up_read_preempt_enable(&file_rwsem);
+
        locks_dispose_list(&dispose);
 }
 
index 2178476..2905479 100644 (file)
@@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work)
                u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
                u64 end = (hdr->args.offset + hdr->args.count +
                        PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
+               u64 lwb = hdr->args.offset + hdr->args.count;
 
                ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
-                                       (end - start) >> SECTOR_SHIFT, end);
+                                       (end - start) >> SECTOR_SHIFT, lwb);
        }
 
        pnfs_ld_write_done(hdr);
index ad917bd..7897826 100644 (file)
@@ -1545,7 +1545,7 @@ static int update_open_stateid(struct nfs4_state *state,
        struct nfs_client *clp = server->nfs_client;
        struct nfs_inode *nfsi = NFS_I(state->inode);
        struct nfs_delegation *deleg_cur;
-       nfs4_stateid freeme = {0};
+       nfs4_stateid freeme = { };
        int ret = 0;
 
        fmode &= (FMODE_READ|FMODE_WRITE);
index 1e8fe84..5355efb 100644 (file)
@@ -73,7 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
                }
        }
 
-       dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+       orangefs_set_timeout(dentry);
        ret = 1;
 out_release_op:
        op_release(new_op);
@@ -94,8 +94,9 @@ out_drop:
 static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
        int ret;
+       unsigned long time = (unsigned long) dentry->d_fsdata;
 
-       if (time_before(jiffies, dentry->d_time))
+       if (time_before(jiffies, time))
                return 1;
 
        if (flags & LOOKUP_RCU)
index 66ea0cc..02cc613 100644 (file)
@@ -621,9 +621,9 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
         * readahead cache (if any); this forces an expensive refresh of
         * data for the next caller of mmap (or 'get_block' accesses)
         */
-       if (file->f_path.dentry->d_inode &&
-           file->f_path.dentry->d_inode->i_mapping &&
-           mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) {
+       if (file_inode(file) &&
+           file_inode(file)->i_mapping &&
+           mapping_nrpages(&file_inode(file)->i_data)) {
                if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {
                        gossip_debug(GOSSIP_INODE_DEBUG,
                            "calling flush_racache on %pU\n",
@@ -632,7 +632,7 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
                        gossip_debug(GOSSIP_INODE_DEBUG,
                            "flush_racache finished\n");
                }
-               truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
+               truncate_inode_pages(file_inode(file)->i_mapping,
                                     0);
        }
        return 0;
@@ -648,7 +648,7 @@ static int orangefs_fsync(struct file *file,
 {
        int ret = -EINVAL;
        struct orangefs_inode_s *orangefs_inode =
-               ORANGEFS_I(file->f_path.dentry->d_inode);
+               ORANGEFS_I(file_inode(file));
        struct orangefs_kernel_op_s *new_op = NULL;
 
        /* required call */
@@ -661,7 +661,7 @@ static int orangefs_fsync(struct file *file,
 
        ret = service_operation(new_op,
                        "orangefs_fsync",
-                       get_interruptible_flag(file->f_path.dentry->d_inode));
+                       get_interruptible_flag(file_inode(file)));
 
        gossip_debug(GOSSIP_FILE_DEBUG,
                     "orangefs_fsync got return value of %d\n",
@@ -669,7 +669,7 @@ static int orangefs_fsync(struct file *file,
 
        op_release(new_op);
 
-       orangefs_flush_inode(file->f_path.dentry->d_inode);
+       orangefs_flush_inode(file_inode(file));
        return ret;
 }
 
index d15d3d2..a290ff6 100644 (file)
@@ -72,7 +72,7 @@ static int orangefs_create(struct inode *dir,
 
        d_instantiate(dentry, inode);
        unlock_new_inode(inode);
-       dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+       orangefs_set_timeout(dentry);
        ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
        gossip_debug(GOSSIP_NAME_DEBUG,
@@ -183,7 +183,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
                goto out;
        }
 
-       dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+       orangefs_set_timeout(dentry);
 
        inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
        if (IS_ERR(inode)) {
@@ -322,7 +322,7 @@ static int orangefs_symlink(struct inode *dir,
 
        d_instantiate(dentry, inode);
        unlock_new_inode(inode);
-       dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+       orangefs_set_timeout(dentry);
        ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
        gossip_debug(GOSSIP_NAME_DEBUG,
@@ -386,7 +386,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
 
        d_instantiate(dentry, inode);
        unlock_new_inode(inode);
-       dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+       orangefs_set_timeout(dentry);
        ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
        gossip_debug(GOSSIP_NAME_DEBUG,
index 0a82048..3bf803d 100644 (file)
@@ -580,4 +580,11 @@ static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
 #endif
 }
 
+static inline void orangefs_set_timeout(struct dentry *dentry)
+{
+       unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+
+       dentry->d_fsdata = (void *) time;
+}
+
 #endif /* __ORANGEFSKERNEL_H */
index 89600fd..81818ad 100644 (file)
@@ -412,10 +412,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        mm = get_task_mm(task);
        if (mm) {
                vsize = task_vsize(mm);
-               if (permitted) {
-                       eip = KSTK_EIP(task);
-                       esp = KSTK_ESP(task);
-               }
+               /*
+                * esp and eip are intentionally zeroed out.  There is no
+                * non-racy way to read them without freezing the task.
+                * Programs that need reliable values can use ptrace(2).
+                */
        }
 
        get_task_comm(tcomm, task);
index c2964d8..ca651ac 100644 (file)
@@ -832,6 +832,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        unsigned long addr = *ppos;
        ssize_t copied;
        char *page;
+       unsigned int flags;
 
        if (!mm)
                return 0;
@@ -844,6 +845,11 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        if (!atomic_inc_not_zero(&mm->mm_users))
                goto free;
 
+       /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
+       flags = FOLL_FORCE;
+       if (write)
+               flags |= FOLL_WRITE;
+
        while (count > 0) {
                int this_len = min_t(int, count, PAGE_SIZE);
 
@@ -852,7 +858,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
                        break;
                }
 
-               this_len = access_remote_vm(mm, addr, page, this_len, write);
+               this_len = access_remote_vm(mm, addr, page, this_len, flags);
                if (!this_len) {
                        if (!copied)
                                copied = -EIO;
@@ -964,8 +970,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                max_len = min_t(size_t, PAGE_SIZE, count);
                this_len = min(max_len, this_len);
 
-               retval = access_remote_vm(mm, (env_start + src),
-                       page, this_len, 0);
+               retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
 
                if (retval <= 0) {
                        ret = retval;
@@ -1007,6 +1012,9 @@ static ssize_t auxv_read(struct file *file, char __user *buf,
 {
        struct mm_struct *mm = file->private_data;
        unsigned int nwords = 0;
+
+       if (!mm)
+               return 0;
        do {
                nwords += 2;
        } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
index 6909582..35b92d8 100644 (file)
@@ -266,24 +266,15 @@ static int do_maps_open(struct inode *inode, struct file *file,
  * /proc/PID/maps that is the stack of the main task.
  */
 static int is_stack(struct proc_maps_private *priv,
-                   struct vm_area_struct *vma, int is_pid)
+                   struct vm_area_struct *vma)
 {
-       int stack = 0;
-
-       if (is_pid) {
-               stack = vma->vm_start <= vma->vm_mm->start_stack &&
-                       vma->vm_end >= vma->vm_mm->start_stack;
-       } else {
-               struct inode *inode = priv->inode;
-               struct task_struct *task;
-
-               rcu_read_lock();
-               task = pid_task(proc_pid(inode), PIDTYPE_PID);
-               if (task)
-                       stack = vma_is_stack_for_task(vma, task);
-               rcu_read_unlock();
-       }
-       return stack;
+       /*
+        * We make no effort to guess what a given thread considers to be
+        * its "stack".  It's not even well-defined for programs written
+        * languages like Go.
+        */
+       return vma->vm_start <= vma->vm_mm->start_stack &&
+               vma->vm_end >= vma->vm_mm->start_stack;
 }
 
 static void
@@ -354,7 +345,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                        goto done;
                }
 
-               if (is_stack(priv, vma, is_pid))
+               if (is_stack(priv, vma))
                        name = "[stack]";
        }
 
@@ -1669,7 +1660,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
                seq_file_path(m, file, "\n\t= ");
        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
                seq_puts(m, " heap");
-       } else if (is_stack(proc_priv, vma, is_pid)) {
+       } else if (is_stack(proc_priv, vma)) {
                seq_puts(m, " stack");
        }
 
index faacb0c..3717562 100644 (file)
@@ -124,25 +124,17 @@ unsigned long task_statm(struct mm_struct *mm,
 }
 
 static int is_stack(struct proc_maps_private *priv,
-                   struct vm_area_struct *vma, int is_pid)
+                   struct vm_area_struct *vma)
 {
        struct mm_struct *mm = vma->vm_mm;
-       int stack = 0;
-
-       if (is_pid) {
-               stack = vma->vm_start <= mm->start_stack &&
-                       vma->vm_end >= mm->start_stack;
-       } else {
-               struct inode *inode = priv->inode;
-               struct task_struct *task;
-
-               rcu_read_lock();
-               task = pid_task(proc_pid(inode), PIDTYPE_PID);
-               if (task)
-                       stack = vma_is_stack_for_task(vma, task);
-               rcu_read_unlock();
-       }
-       return stack;
+
+       /*
+        * We make no effort to guess what a given thread considers to be
+        * its "stack".  It's not even well-defined for programs written
+        * languages like Go.
+        */
+       return vma->vm_start <= mm->start_stack &&
+               vma->vm_end >= mm->start_stack;
 }
 
 /*
@@ -184,7 +176,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
        if (file) {
                seq_pad(m, ' ');
                seq_file_path(m, file, "");
-       } else if (mm && is_stack(priv, vma, is_pid)) {
+       } else if (mm && is_stack(priv, vma)) {
                seq_pad(m, ' ');
                seq_printf(m, "[stack]");
        }
index c8f60df..bd4a5e8 100644 (file)
@@ -439,7 +439,7 @@ static unsigned int vfs_dent_type(uint8_t type)
  */
 static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 {
-       int err;
+       int err = 0;
        struct qstr nm;
        union ubifs_key key;
        struct ubifs_dent_node *dent;
@@ -541,14 +541,12 @@ out:
        kfree(file->private_data);
        file->private_data = NULL;
 
-       if (err != -ENOENT) {
+       if (err != -ENOENT)
                ubifs_err(c, "cannot find next direntry, error %d", err);
-               return err;
-       }
 
        /* 2 is a special value indicating that there are no more direntries */
        ctx->pos = 2;
-       return 0;
+       return err;
 }
 
 /* Free saved readdir() state when the directory is closed */
@@ -1060,9 +1058,9 @@ static void unlock_4_inodes(struct inode *inode1, struct inode *inode2,
        mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
 }
 
-static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
-                       struct inode *new_dir, struct dentry *new_dentry,
-                       unsigned int flags)
+static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+                    struct inode *new_dir, struct dentry *new_dentry,
+                    unsigned int flags)
 {
        struct ubifs_info *c = old_dir->i_sb->s_fs_info;
        struct inode *old_inode = d_inode(old_dentry);
@@ -1323,7 +1321,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
        return err;
 }
 
-static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        struct inode *new_dir, struct dentry *new_dentry,
                        unsigned int flags)
 {
@@ -1336,7 +1334,7 @@ static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
        if (flags & RENAME_EXCHANGE)
                return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
 
-       return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+       return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
 }
 
 int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -1387,7 +1385,7 @@ const struct inode_operations ubifs_dir_inode_operations = {
        .mkdir       = ubifs_mkdir,
        .rmdir       = ubifs_rmdir,
        .mknod       = ubifs_mknod,
-       .rename      = ubifs_rename2,
+       .rename      = ubifs_rename,
        .setattr     = ubifs_setattr,
        .getattr     = ubifs_getattr,
        .listxattr   = ubifs_listxattr,
index 6c2f4d4..d9f9615 100644 (file)
@@ -172,6 +172,7 @@ out_cancel:
        host_ui->xattr_cnt -= 1;
        host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
        host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+       host_ui->xattr_names -= nm->len;
        mutex_unlock(&host_ui->ui_mutex);
 out_free:
        make_bad_inode(inode);
@@ -478,6 +479,7 @@ out_cancel:
        host_ui->xattr_cnt += 1;
        host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
        host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+       host_ui->xattr_names += nm->len;
        mutex_unlock(&host_ui->ui_mutex);
        ubifs_release_budget(c, &req);
        make_bad_inode(inode);
index c27344c..c6eb219 100644 (file)
@@ -3974,9 +3974,6 @@ xfs_bmap_remap_alloc(
         * allocating, so skip that check by pretending to be freeing.
         */
        error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
-       if (error)
-               goto error0;
-error0:
        xfs_perag_put(args.pag);
        if (error)
                trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
@@ -3999,6 +3996,39 @@ xfs_bmap_alloc(
        return xfs_bmap_btalloc(ap);
 }
 
+/* Trim extent to fit a logical block range. */
+void
+xfs_trim_extent(
+       struct xfs_bmbt_irec    *irec,
+       xfs_fileoff_t           bno,
+       xfs_filblks_t           len)
+{
+       xfs_fileoff_t           distance;
+       xfs_fileoff_t           end = bno + len;
+
+       if (irec->br_startoff + irec->br_blockcount <= bno ||
+           irec->br_startoff >= end) {
+               irec->br_blockcount = 0;
+               return;
+       }
+
+       if (irec->br_startoff < bno) {
+               distance = bno - irec->br_startoff;
+               if (isnullstartblock(irec->br_startblock))
+                       irec->br_startblock = DELAYSTARTBLOCK;
+               if (irec->br_startblock != DELAYSTARTBLOCK &&
+                   irec->br_startblock != HOLESTARTBLOCK)
+                       irec->br_startblock += distance;
+               irec->br_startoff += distance;
+               irec->br_blockcount -= distance;
+       }
+
+       if (end < irec->br_startoff + irec->br_blockcount) {
+               distance = irec->br_startoff + irec->br_blockcount - end;
+               irec->br_blockcount -= distance;
+       }
+}
+
 /*
  * Trim the returned map to the required bounds
  */
@@ -4829,6 +4859,219 @@ xfs_bmap_split_indlen(
        return stolen;
 }
 
+int
+xfs_bmap_del_extent_delay(
+       struct xfs_inode        *ip,
+       int                     whichfork,
+       xfs_extnum_t            *idx,
+       struct xfs_bmbt_irec    *got,
+       struct xfs_bmbt_irec    *del)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
+       struct xfs_bmbt_irec    new;
+       int64_t                 da_old, da_new, da_diff = 0;
+       xfs_fileoff_t           del_endoff, got_endoff;
+       xfs_filblks_t           got_indlen, new_indlen, stolen;
+       int                     error = 0, state = 0;
+       bool                    isrt;
+
+       XFS_STATS_INC(mp, xs_del_exlist);
+
+       isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
+       del_endoff = del->br_startoff + del->br_blockcount;
+       got_endoff = got->br_startoff + got->br_blockcount;
+       da_old = startblockval(got->br_startblock);
+       da_new = 0;
+
+       ASSERT(*idx >= 0);
+       ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+       ASSERT(del->br_blockcount > 0);
+       ASSERT(got->br_startoff <= del->br_startoff);
+       ASSERT(got_endoff >= del_endoff);
+
+       if (isrt) {
+               int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
+
+               do_div(rtexts, mp->m_sb.sb_rextsize);
+               xfs_mod_frextents(mp, rtexts);
+       }
+
+       /*
+        * Update the inode delalloc counter now and wait to update the
+        * sb counters as we might have to borrow some blocks for the
+        * indirect block accounting.
+        */
+       xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,
+                       isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+       ip->i_delayed_blks -= del->br_blockcount;
+
+       if (whichfork == XFS_COW_FORK)
+               state |= BMAP_COWFORK;
+
+       if (got->br_startoff == del->br_startoff)
+               state |= BMAP_LEFT_CONTIG;
+       if (got_endoff == del_endoff)
+               state |= BMAP_RIGHT_CONTIG;
+
+       switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+       case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+               /*
+                * Matches the whole extent.  Delete the entry.
+                */
+               xfs_iext_remove(ip, *idx, 1, state);
+               --*idx;
+               break;
+       case BMAP_LEFT_CONTIG:
+               /*
+                * Deleting the first part of the extent.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               got->br_startoff = del_endoff;
+               got->br_blockcount -= del->br_blockcount;
+               da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
+                               got->br_blockcount), da_old);
+               got->br_startblock = nullstartblock((int)da_new);
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               break;
+       case BMAP_RIGHT_CONTIG:
+               /*
+                * Deleting the last part of the extent.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               got->br_blockcount = got->br_blockcount - del->br_blockcount;
+               da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
+                               got->br_blockcount), da_old);
+               got->br_startblock = nullstartblock((int)da_new);
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               break;
+       case 0:
+               /*
+                * Deleting the middle of the extent.
+                *
+                * Distribute the original indlen reservation across the two new
+                * extents.  Steal blocks from the deleted extent if necessary.
+                * Stealing blocks simply fudges the fdblocks accounting below.
+                * Warn if either of the new indlen reservations is zero as this
+                * can lead to delalloc problems.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+
+               got->br_blockcount = del->br_startoff - got->br_startoff;
+               got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
+
+               new.br_blockcount = got_endoff - del_endoff;
+               new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
+
+               WARN_ON_ONCE(!got_indlen || !new_indlen);
+               stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
+                                                      del->br_blockcount);
+
+               got->br_startblock = nullstartblock((int)got_indlen);
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+               trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
+
+               new.br_startoff = del_endoff;
+               new.br_state = got->br_state;
+               new.br_startblock = nullstartblock((int)new_indlen);
+
+               ++*idx;
+               xfs_iext_insert(ip, *idx, 1, &new, state);
+
+               da_new = got_indlen + new_indlen - stolen;
+               del->br_blockcount -= stolen;
+               break;
+       }
+
+       ASSERT(da_old >= da_new);
+       da_diff = da_old - da_new;
+       if (!isrt)
+               da_diff += del->br_blockcount;
+       if (da_diff)
+               xfs_mod_fdblocks(mp, da_diff, false);
+       return error;
+}
+
+void
+xfs_bmap_del_extent_cow(
+       struct xfs_inode        *ip,
+       xfs_extnum_t            *idx,
+       struct xfs_bmbt_irec    *got,
+       struct xfs_bmbt_irec    *del)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+       struct xfs_bmbt_irec    new;
+       xfs_fileoff_t           del_endoff, got_endoff;
+       int                     state = BMAP_COWFORK;
+
+       XFS_STATS_INC(mp, xs_del_exlist);
+
+       del_endoff = del->br_startoff + del->br_blockcount;
+       got_endoff = got->br_startoff + got->br_blockcount;
+
+       ASSERT(*idx >= 0);
+       ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+       ASSERT(del->br_blockcount > 0);
+       ASSERT(got->br_startoff <= del->br_startoff);
+       ASSERT(got_endoff >= del_endoff);
+       ASSERT(!isnullstartblock(got->br_startblock));
+
+       if (got->br_startoff == del->br_startoff)
+               state |= BMAP_LEFT_CONTIG;
+       if (got_endoff == del_endoff)
+               state |= BMAP_RIGHT_CONTIG;
+
+       switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+       case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+               /*
+                * Matches the whole extent.  Delete the entry.
+                */
+               xfs_iext_remove(ip, *idx, 1, state);
+               --*idx;
+               break;
+       case BMAP_LEFT_CONTIG:
+               /*
+                * Deleting the first part of the extent.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               got->br_startoff = del_endoff;
+               got->br_blockcount -= del->br_blockcount;
+               got->br_startblock = del->br_startblock + del->br_blockcount;
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               break;
+       case BMAP_RIGHT_CONTIG:
+               /*
+                * Deleting the last part of the extent.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               got->br_blockcount -= del->br_blockcount;
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               break;
+       case 0:
+               /*
+                * Deleting the middle of the extent.
+                */
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               got->br_blockcount = del->br_startoff - got->br_startoff;
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               new.br_startoff = del_endoff;
+               new.br_blockcount = got_endoff - del_endoff;
+               new.br_state = got->br_state;
+               new.br_startblock = del->br_startblock + del->br_blockcount;
+
+               ++*idx;
+               xfs_iext_insert(ip, *idx, 1, &new, state);
+               break;
+       }
+}
+
 /*
  * Called by xfs_bmapi to update file extent records and the btree
  * after removing space (or undoing a delayed allocation).
@@ -5171,175 +5414,6 @@ done:
        return error;
 }
 
-/* Remove an extent from the CoW fork.  Similar to xfs_bmap_del_extent. */
-int
-xfs_bunmapi_cow(
-       struct xfs_inode                *ip,
-       struct xfs_bmbt_irec            *del)
-{
-       xfs_filblks_t                   da_new;
-       xfs_filblks_t                   da_old;
-       xfs_fsblock_t                   del_endblock = 0;
-       xfs_fileoff_t                   del_endoff;
-       int                             delay;
-       struct xfs_bmbt_rec_host        *ep;
-       int                             error;
-       struct xfs_bmbt_irec            got;
-       xfs_fileoff_t                   got_endoff;
-       struct xfs_ifork                *ifp;
-       struct xfs_mount                *mp;
-       xfs_filblks_t                   nblks;
-       struct xfs_bmbt_irec            new;
-       /* REFERENCED */
-       uint                            qfield;
-       xfs_filblks_t                   temp;
-       xfs_filblks_t                   temp2;
-       int                             state = BMAP_COWFORK;
-       int                             eof;
-       xfs_extnum_t                    eidx;
-
-       mp = ip->i_mount;
-       XFS_STATS_INC(mp, xs_del_exlist);
-
-       ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof,
-                       &eidx, &got, &new);
-
-       ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp;
-       ASSERT((eidx >= 0) && (eidx < ifp->if_bytes /
-               (uint)sizeof(xfs_bmbt_rec_t)));
-       ASSERT(del->br_blockcount > 0);
-       ASSERT(got.br_startoff <= del->br_startoff);
-       del_endoff = del->br_startoff + del->br_blockcount;
-       got_endoff = got.br_startoff + got.br_blockcount;
-       ASSERT(got_endoff >= del_endoff);
-       delay = isnullstartblock(got.br_startblock);
-       ASSERT(isnullstartblock(del->br_startblock) == delay);
-       qfield = 0;
-       error = 0;
-       /*
-        * If deleting a real allocation, must free up the disk space.
-        */
-       if (!delay) {
-               nblks = del->br_blockcount;
-               qfield = XFS_TRANS_DQ_BCOUNT;
-               /*
-                * Set up del_endblock and cur for later.
-                */
-               del_endblock = del->br_startblock + del->br_blockcount;
-               da_old = da_new = 0;
-       } else {
-               da_old = startblockval(got.br_startblock);
-               da_new = 0;
-               nblks = 0;
-       }
-       qfield = qfield;
-       nblks = nblks;
-
-       /*
-        * Set flag value to use in switch statement.
-        * Left-contig is 2, right-contig is 1.
-        */
-       switch (((got.br_startoff == del->br_startoff) << 1) |
-               (got_endoff == del_endoff)) {
-       case 3:
-               /*
-                * Matches the whole extent.  Delete the entry.
-                */
-               xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK);
-               --eidx;
-               break;
-
-       case 2:
-               /*
-                * Deleting the first part of the extent.
-                */
-               trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
-               xfs_bmbt_set_startoff(ep, del_endoff);
-               temp = got.br_blockcount - del->br_blockcount;
-               xfs_bmbt_set_blockcount(ep, temp);
-               if (delay) {
-                       temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
-                               da_old);
-                       xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-                       trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
-                       da_new = temp;
-                       break;
-               }
-               xfs_bmbt_set_startblock(ep, del_endblock);
-               trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
-               break;
-
-       case 1:
-               /*
-                * Deleting the last part of the extent.
-                */
-               temp = got.br_blockcount - del->br_blockcount;
-               trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(ep, temp);
-               if (delay) {
-                       temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
-                               da_old);
-                       xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-                       trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
-                       da_new = temp;
-                       break;
-               }
-               trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
-               break;
-
-       case 0:
-               /*
-                * Deleting the middle of the extent.
-                */
-               temp = del->br_startoff - got.br_startoff;
-               trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(ep, temp);
-               new.br_startoff = del_endoff;
-               temp2 = got_endoff - del_endoff;
-               new.br_blockcount = temp2;
-               new.br_state = got.br_state;
-               if (!delay) {
-                       new.br_startblock = del_endblock;
-               } else {
-                       temp = xfs_bmap_worst_indlen(ip, temp);
-                       xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-                       temp2 = xfs_bmap_worst_indlen(ip, temp2);
-                       new.br_startblock = nullstartblock((int)temp2);
-                       da_new = temp + temp2;
-                       while (da_new > da_old) {
-                               if (temp) {
-                                       temp--;
-                                       da_new--;
-                                       xfs_bmbt_set_startblock(ep,
-                                               nullstartblock((int)temp));
-                               }
-                               if (da_new == da_old)
-                                       break;
-                               if (temp2) {
-                                       temp2--;
-                                       da_new--;
-                                       new.br_startblock =
-                                               nullstartblock((int)temp2);
-                               }
-                       }
-               }
-               trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
-               xfs_iext_insert(ip, eidx + 1, 1, &new, state);
-               ++eidx;
-               break;
-       }
-
-       /*
-        * Account for change in delayed indirect blocks.
-        * Nothing to do for disk quota accounting here.
-        */
-       ASSERT(da_old >= da_new);
-       if (da_old > da_new)
-               xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
-
-       return error;
-}
-
 /*
  * Unmap (remove) blocks from a file.
  * If nexts is nonzero then the number of extents to remove is limited to
index f97db71..7cae6ec 100644 (file)
@@ -190,6 +190,8 @@ void        xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
 #define        XFS_BMAP_TRACE_EXLIST(ip,c,w)
 #endif
 
+void   xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
+               xfs_filblks_t len);
 int    xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
 void   xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void   xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
@@ -221,7 +223,11 @@ int        xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t bno, xfs_filblks_t len, int flags,
                xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
                struct xfs_defer_ops *dfops, int *done);
-int    xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del);
+int    xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
+               xfs_extnum_t *idx, struct xfs_bmbt_irec *got,
+               struct xfs_bmbt_irec *del);
+void   xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
+               struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
 int    xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
                xfs_extnum_t num);
 uint   xfs_default_attroffset(struct xfs_inode *ip);
index 5c8e6f2..0e80993 100644 (file)
@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
        return rval;
 }
 
-int
+static int
 xfs_btree_count_blocks_helper(
        struct xfs_btree_cur    *cur,
        int                     level,
index 3cc3cf7..ac9a003 100644 (file)
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
        if (mp->m_quotainfo)
                ndquots = mp->m_quotainfo->qi_dqperchunk;
        else
-               ndquots = xfs_calc_dquots_per_chunk(
-                                       XFS_BB_TO_FSB(mp, bp->b_length));
+               ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
 
        for (i = 0; i < ndquots; i++, d++) {
                if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
index f6547fc..6b7579e 100644 (file)
@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
  * padding field for v3 inodes.
  */
 #define        XFS_DINODE_MAGIC                0x494e  /* 'IN' */
-#define XFS_DINODE_GOOD_VERSION(v)     ((v) >= 1 && (v) <= 3)
 typedef struct xfs_dinode {
        __be16          di_magic;       /* inode magic # = XFS_DINODE_MAGIC */
        __be16          di_mode;        /* mode and type of file */
index 8de9a3a..134424f 100644 (file)
@@ -57,6 +57,17 @@ xfs_inobp_check(
 }
 #endif
 
+bool
+xfs_dinode_good_version(
+       struct xfs_mount *mp,
+       __u8            version)
+{
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               return version == 3;
+
+       return version == 1 || version == 2;
+}
+
 /*
  * If we are doing readahead on an inode buffer, we might be in log recovery
  * reading an inode allocation buffer that hasn't yet been replayed, and hence
@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
 
                dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
                di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
-                           XFS_DINODE_GOOD_VERSION(dip->di_version);
+                       xfs_dinode_good_version(mp, dip->di_version);
                if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
                                                XFS_ERRTAG_ITOBP_INOTOBP,
                                                XFS_RANDOM_ITOBP_INOTOBP))) {
index 62d9d46..3cfe12a 100644 (file)
@@ -74,6 +74,8 @@ void  xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
 void   xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
                               struct xfs_dinode *to);
 
+bool   xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
+
 #if defined(DEBUG)
 void   xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
 #else
index a314fc7..6e4f7f9 100644 (file)
@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
        struct xfs_inode        *ip = XFS_I(inode);
        loff_t                  isize = i_size_read(inode);
        size_t                  count = iov_iter_count(to);
+       loff_t                  end = iocb->ki_pos + count - 1;
        struct iov_iter         data;
        struct xfs_buftarg      *target;
        ssize_t                 ret = 0;
@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
 
        file_accessed(iocb->ki_filp);
 
-       /*
-        * Locking is a bit tricky here. If we take an exclusive lock for direct
-        * IO, we effectively serialise all new concurrent read IO to this file
-        * and block it behind IO that is currently in progress because IO in
-        * progress holds the IO lock shared. We only need to hold the lock
-        * exclusive to blow away the page cache, so only take lock exclusively
-        * if the page cache needs invalidation. This allows the normal direct
-        * IO case of no page cache pages to proceeed concurrently without
-        * serialisation.
-        */
        xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
        if (mapping->nrpages) {
-               xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
-               xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+               ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+               if (ret)
+                       goto out_unlock;
 
                /*
-                * The generic dio code only flushes the range of the particular
-                * I/O. Because we take an exclusive lock here, this whole
-                * sequence is considerably more expensive for us. This has a
-                * noticeable performance impact for any file with cached pages,
-                * even when outside of the range of the particular I/O.
-                *
-                * Hence, amortize the cost of the lock against a full file
-                * flush and reduce the chances of repeated iolock cycles going
-                * forward.
+                * Invalidate whole pages. This can return an error if we fail
+                * to invalidate a page, but this should never happen on XFS.
+                * Warn if it does fail.
                 */
-               if (mapping->nrpages) {
-                       ret = filemap_write_and_wait(mapping);
-                       if (ret) {
-                               xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
-                               return ret;
-                       }
-
-                       /*
-                        * Invalidate whole pages. This can return an error if
-                        * we fail to invalidate a page, but this should never
-                        * happen on XFS. Warn if it does fail.
-                        */
-                       ret = invalidate_inode_pages2(mapping);
-                       WARN_ON_ONCE(ret);
-                       ret = 0;
-               }
-               xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+               ret = invalidate_inode_pages2_range(mapping,
+                               iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+               WARN_ON_ONCE(ret);
+               ret = 0;
        }
 
        data = *to;
@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
                iocb->ki_pos += ret;
                iov_iter_advance(to, ret);
        }
-       xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 
+out_unlock:
+       xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
        return ret;
 }
 
@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
        if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
                return -EINVAL;
 
-       /* "unaligned" here means not aligned to a filesystem block */
-       if ((iocb->ki_pos & mp->m_blockmask) ||
-           ((iocb->ki_pos + count) & mp->m_blockmask))
-               unaligned_io = 1;
-
        /*
-        * We don't need to take an exclusive lock unless there page cache needs
-        * to be invalidated or unaligned IO is being executed. We don't need to
-        * consider the EOF extension case here because
-        * xfs_file_aio_write_checks() will relock the inode as necessary for
-        * EOF zeroing cases and fill out the new inode size as appropriate.
+        * Don't take the exclusive iolock here unless the I/O is unaligned to
+        * the file system block size.  We don't need to consider the EOF
+        * extension case here because xfs_file_aio_write_checks() will relock
+        * the inode as necessary for EOF zeroing cases and fill out the new
+        * inode size as appropriate.
         */
-       if (unaligned_io || mapping->nrpages)
+       if ((iocb->ki_pos & mp->m_blockmask) ||
+           ((iocb->ki_pos + count) & mp->m_blockmask)) {
+               unaligned_io = 1;
                iolock = XFS_IOLOCK_EXCL;
-       else
+       } else {
                iolock = XFS_IOLOCK_SHARED;
-       xfs_rw_ilock(ip, iolock);
-
-       /*
-        * Recheck if there are cached pages that need invalidate after we got
-        * the iolock to protect against other threads adding new pages while
-        * we were waiting for the iolock.
-        */
-       if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
-               xfs_rw_iunlock(ip, iolock);
-               iolock = XFS_IOLOCK_EXCL;
-               xfs_rw_ilock(ip, iolock);
        }
 
+       xfs_rw_ilock(ip, iolock);
+
        ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
                goto out;
        count = iov_iter_count(from);
        end = iocb->ki_pos + count - 1;
 
-       /*
-        * See xfs_file_dio_aio_read() for why we do a full-file flush here.
-        */
        if (mapping->nrpages) {
-               ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+               ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
                if (ret)
                        goto out;
+
                /*
                 * Invalidate whole pages. This can return an error if we fail
                 * to invalidate a page, but this should never happen on XFS.
                 * Warn if it does fail.
                 */
-               ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
+               ret = invalidate_inode_pages2_range(mapping,
+                               iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
                WARN_ON_ONCE(ret);
                ret = 0;
        }
 
        /*
         * If we are doing unaligned IO, wait for all other IO to drain,
-        * otherwise demote the lock if we had to flush cached pages
+        * otherwise demote the lock if we had to take the exclusive lock
+        * for other reasons in xfs_file_aio_write_checks.
         */
        if (unaligned_io)
                inode_dio_wait(inode);
@@ -947,134 +909,6 @@ out_unlock:
        return error;
 }
 
-/*
- * Flush all file writes out to disk.
- */
-static int
-xfs_file_wait_for_io(
-       struct inode    *inode,
-       loff_t          offset,
-       size_t          len)
-{
-       loff_t          rounding;
-       loff_t          ioffset;
-       loff_t          iendoffset;
-       loff_t          bs;
-       int             ret;
-
-       bs = inode->i_sb->s_blocksize;
-       inode_dio_wait(inode);
-
-       rounding = max_t(xfs_off_t, bs, PAGE_SIZE);
-       ioffset = round_down(offset, rounding);
-       iendoffset = round_up(offset + len, rounding) - 1;
-       ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
-                                          iendoffset);
-       return ret;
-}
-
-/* Hook up to the VFS reflink function */
-STATIC int
-xfs_file_share_range(
-       struct file     *file_in,
-       loff_t          pos_in,
-       struct file     *file_out,
-       loff_t          pos_out,
-       u64             len,
-       bool            is_dedupe)
-{
-       struct inode    *inode_in;
-       struct inode    *inode_out;
-       ssize_t         ret;
-       loff_t          bs;
-       loff_t          isize;
-       int             same_inode;
-       loff_t          blen;
-       unsigned int    flags = 0;
-
-       inode_in = file_inode(file_in);
-       inode_out = file_inode(file_out);
-       bs = inode_out->i_sb->s_blocksize;
-
-       /* Don't touch certain kinds of inodes */
-       if (IS_IMMUTABLE(inode_out))
-               return -EPERM;
-       if (IS_SWAPFILE(inode_in) ||
-           IS_SWAPFILE(inode_out))
-               return -ETXTBSY;
-
-       /* Reflink only works within this filesystem. */
-       if (inode_in->i_sb != inode_out->i_sb)
-               return -EXDEV;
-       same_inode = (inode_in->i_ino == inode_out->i_ino);
-
-       /* Don't reflink dirs, pipes, sockets... */
-       if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
-               return -EISDIR;
-       if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
-               return -EINVAL;
-       if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
-               return -EINVAL;
-
-       /* Don't share DAX file data for now. */
-       if (IS_DAX(inode_in) || IS_DAX(inode_out))
-               return -EINVAL;
-
-       /* Are we going all the way to the end? */
-       isize = i_size_read(inode_in);
-       if (isize == 0)
-               return 0;
-       if (len == 0)
-               len = isize - pos_in;
-
-       /* Ensure offsets don't wrap and the input is inside i_size */
-       if (pos_in + len < pos_in || pos_out + len < pos_out ||
-           pos_in + len > isize)
-               return -EINVAL;
-
-       /* Don't allow dedupe past EOF in the dest file */
-       if (is_dedupe) {
-               loff_t  disize;
-
-               disize = i_size_read(inode_out);
-               if (pos_out >= disize || pos_out + len > disize)
-                       return -EINVAL;
-       }
-
-       /* If we're linking to EOF, continue to the block boundary. */
-       if (pos_in + len == isize)
-               blen = ALIGN(isize, bs) - pos_in;
-       else
-               blen = len;
-
-       /* Only reflink if we're aligned to block boundaries */
-       if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
-           !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
-               return -EINVAL;
-
-       /* Don't allow overlapped reflink within the same file */
-       if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)
-               return -EINVAL;
-
-       /* Wait for the completion of any pending IOs on srcfile */
-       ret = xfs_file_wait_for_io(inode_in, pos_in, len);
-       if (ret)
-               goto out;
-       ret = xfs_file_wait_for_io(inode_out, pos_out, len);
-       if (ret)
-               goto out;
-
-       if (is_dedupe)
-               flags |= XFS_REFLINK_DEDUPE;
-       ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
-                       pos_out, len, flags);
-       if (ret < 0)
-               goto out;
-
-out:
-       return ret;
-}
-
 STATIC ssize_t
 xfs_file_copy_range(
        struct file     *file_in,
@@ -1086,7 +920,7 @@ xfs_file_copy_range(
 {
        int             error;
 
-       error = xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+       error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
                                     len, false);
        if (error)
                return error;
@@ -1101,7 +935,7 @@ xfs_file_clone_range(
        loff_t          pos_out,
        u64             len)
 {
-       return xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+       return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
                                     len, false);
 }
 
@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
        if (len > XFS_MAX_DEDUPE_LEN)
                len = XFS_MAX_DEDUPE_LEN;
 
-       error = xfs_file_share_range(src_file, loff, dst_file, dst_loff,
+       error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
                                     len, true);
        if (error)
                return error;
index 14796b7..f295049 100644 (file)
@@ -1656,9 +1656,9 @@ void
 xfs_inode_set_cowblocks_tag(
        xfs_inode_t     *ip)
 {
-       trace_xfs_inode_set_eofblocks_tag(ip);
+       trace_xfs_inode_set_cowblocks_tag(ip);
        return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
-                       trace_xfs_perag_set_eofblocks,
+                       trace_xfs_perag_set_cowblocks,
                        XFS_ICI_COWBLOCKS_TAG);
 }
 
@@ -1666,7 +1666,7 @@ void
 xfs_inode_clear_cowblocks_tag(
        xfs_inode_t     *ip)
 {
-       trace_xfs_inode_clear_eofblocks_tag(ip);
+       trace_xfs_inode_clear_cowblocks_tag(ip);
        return __xfs_inode_clear_eofblocks_tag(ip,
-                       trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG);
+                       trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
 }
index d907eb9..436e109 100644 (file)
@@ -566,6 +566,17 @@ xfs_file_iomap_begin_delay(
        xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
                        &got, &prev);
        if (!eof && got.br_startoff <= offset_fsb) {
+               if (xfs_is_reflink_inode(ip)) {
+                       bool            shared;
+
+                       end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
+                                       maxbytes_fsb);
+                       xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
+                       error = xfs_reflink_reserve_cow(ip, &got, &shared);
+                       if (error)
+                               goto out_unlock;
+               }
+
                trace_xfs_iomap_found(ip, offset, count, 0, &got);
                goto done;
        }
@@ -961,19 +972,13 @@ xfs_file_iomap_begin(
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_bmbt_irec    imap;
        xfs_fileoff_t           offset_fsb, end_fsb;
-       bool                    shared, trimmed;
        int                     nimaps = 1, error = 0;
+       bool                    shared = false, trimmed = false;
        unsigned                lockmode;
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return -EIO;
 
-       if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
-               error = xfs_reflink_reserve_cow_range(ip, offset, length);
-               if (error < 0)
-                       return error;
-       }
-
        if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
                   !xfs_get_extsz_hint(ip)) {
                /* Reserve delalloc blocks for regular writeback. */
@@ -981,7 +986,16 @@ xfs_file_iomap_begin(
                                iomap);
        }
 
-       lockmode = xfs_ilock_data_map_shared(ip);
+       /*
+        * COW writes will allocate delalloc space, so we need to make sure
+        * to take the lock exclusively here.
+        */
+       if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+               lockmode = XFS_ILOCK_EXCL;
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+       } else {
+               lockmode = xfs_ilock_data_map_shared(ip);
+       }
 
        ASSERT(offset <= mp->m_super->s_maxbytes);
        if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
@@ -991,16 +1005,24 @@ xfs_file_iomap_begin(
 
        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
                               &nimaps, 0);
-       if (error) {
-               xfs_iunlock(ip, lockmode);
-               return error;
+       if (error)
+               goto out_unlock;
+
+       if (flags & IOMAP_REPORT) {
+               /* Trim the mapping to the nearest shared extent boundary. */
+               error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
+                               &trimmed);
+               if (error)
+                       goto out_unlock;
        }
 
-       /* Trim the mapping to the nearest shared extent boundary. */
-       error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
-       if (error) {
-               xfs_iunlock(ip, lockmode);
-               return error;
+       if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+               error = xfs_reflink_reserve_cow(ip, &imap, &shared);
+               if (error)
+                       goto out_unlock;
+
+               end_fsb = imap.br_startoff + imap.br_blockcount;
+               length = XFS_FSB_TO_B(mp, end_fsb) - offset;
        }
 
        if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
@@ -1039,6 +1061,9 @@ xfs_file_iomap_begin(
        if (shared)
                iomap->flags |= IOMAP_F_SHARED;
        return 0;
+out_unlock:
+       xfs_iunlock(ip, lockmode);
+       return error;
 }
 
 static int
index fc78739..b341f10 100644 (file)
@@ -1009,6 +1009,7 @@ xfs_mountfs(
  out_quota:
        xfs_qm_unmount_quotas(mp);
  out_rtunmount:
+       mp->m_super->s_flags &= ~MS_ACTIVE;
        xfs_rtunmount_inodes(mp);
  out_rele_rip:
        IRELE(rip);
index 5965e94..a279b4e 100644 (file)
@@ -182,7 +182,8 @@ xfs_reflink_trim_around_shared(
        if (!xfs_is_reflink_inode(ip) ||
            ISUNWRITTEN(irec) ||
            irec->br_startblock == HOLESTARTBLOCK ||
-           irec->br_startblock == DELAYSTARTBLOCK) {
+           irec->br_startblock == DELAYSTARTBLOCK ||
+           isnullstartblock(irec->br_startblock)) {
                *shared = false;
                return 0;
        }
@@ -227,50 +228,54 @@ xfs_reflink_trim_around_shared(
        }
 }
 
-/* Create a CoW reservation for a range of blocks within a file. */
-static int
-__xfs_reflink_reserve_cow(
+/*
+ * Trim the passed in imap to the next shared/unshared extent boundary, and
+ * if imap->br_startoff points to a shared extent reserve space for it in the
+ * COW fork.  In this case *shared is set to true, else to false.
+ *
+ * Note that imap will always contain the block numbers for the existing blocks
+ * in the data fork, as the upper layers need them for read-modify-write
+ * operations.
+ */
+int
+xfs_reflink_reserve_cow(
        struct xfs_inode        *ip,
-       xfs_fileoff_t           *offset_fsb,
-       xfs_fileoff_t           end_fsb,
-       bool                    *skipped)
+       struct xfs_bmbt_irec    *imap,
+       bool                    *shared)
 {
-       struct xfs_bmbt_irec    got, prev, imap;
-       xfs_fileoff_t           orig_end_fsb;
-       int                     nimaps, eof = 0, error = 0;
-       bool                    shared = false, trimmed = false;
+       struct xfs_bmbt_irec    got, prev;
+       xfs_fileoff_t           end_fsb, orig_end_fsb;
+       int                     eof = 0, error = 0;
+       bool                    trimmed;
        xfs_extnum_t            idx;
        xfs_extlen_t            align;
 
-       /* Already reserved?  Skip the refcount btree access. */
-       xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx,
+       /*
+        * Search the COW fork extent list first.  This serves two purposes:
+        * first this implement the speculative preallocation using cowextisze,
+        * so that we also unshared block adjacent to shared blocks instead
+        * of just the shared blocks themselves.  Second the lookup in the
+        * extent list is generally faster than going out to the shared extent
+        * tree.
+        */
+       xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx,
                        &got, &prev);
-       if (!eof && got.br_startoff <= *offset_fsb) {
-               end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount;
-               trace_xfs_reflink_cow_found(ip, &got);
-               goto done;
-       }
+       if (!eof && got.br_startoff <= imap->br_startoff) {
+               trace_xfs_reflink_cow_found(ip, imap);
+               xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
 
-       /* Read extent from the source file. */
-       nimaps = 1;
-       error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
-                       &imap, &nimaps, 0);
-       if (error)
-               goto out_unlock;
-       ASSERT(nimaps == 1);
+               *shared = true;
+               return 0;
+       }
 
        /* Trim the mapping to the nearest shared extent boundary. */
-       error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
+       error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
        if (error)
-               goto out_unlock;
-
-       end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount;
+               return error;
 
        /* Not shared?  Just report the (potentially capped) extent. */
-       if (!shared) {
-               *skipped = true;
-               goto done;
-       }
+       if (!*shared)
+               return 0;
 
        /*
         * Fork all the shared blocks from our write offset until the end of
@@ -278,72 +283,38 @@ __xfs_reflink_reserve_cow(
         */
        error = xfs_qm_dqattach_locked(ip, 0);
        if (error)
-               goto out_unlock;
+               return error;
+
+       end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount;
 
        align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip));
        if (align)
                end_fsb = roundup_64(end_fsb, align);
 
 retry:
-       error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb,
-                       end_fsb - *offset_fsb, &got,
-                       &prev, &idx, eof);
+       error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
+                       end_fsb - imap->br_startoff, &got, &prev, &idx, eof);
        switch (error) {
        case 0:
                break;
        case -ENOSPC:
        case -EDQUOT:
                /* retry without any preallocation */
-               trace_xfs_reflink_cow_enospc(ip, &imap);
+               trace_xfs_reflink_cow_enospc(ip, imap);
                if (end_fsb != orig_end_fsb) {
                        end_fsb = orig_end_fsb;
                        goto retry;
                }
                /*FALLTHRU*/
        default:
-               goto out_unlock;
+               return error;
        }
 
        if (end_fsb != orig_end_fsb)
                xfs_inode_set_cowblocks_tag(ip);
 
        trace_xfs_reflink_cow_alloc(ip, &got);
-done:
-       *offset_fsb = end_fsb;
-out_unlock:
-       return error;
-}
-
-/* Create a CoW reservation for part of a file. */
-int
-xfs_reflink_reserve_cow_range(
-       struct xfs_inode        *ip,
-       xfs_off_t               offset,
-       xfs_off_t               count)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       xfs_fileoff_t           offset_fsb, end_fsb;
-       bool                    skipped = false;
-       int                     error;
-
-       trace_xfs_reflink_reserve_cow_range(ip, offset, count);
-
-       offset_fsb = XFS_B_TO_FSBT(mp, offset);
-       end_fsb = XFS_B_TO_FSB(mp, offset + count);
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       while (offset_fsb < end_fsb) {
-               error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb,
-                               &skipped);
-               if (error) {
-                       trace_xfs_reflink_reserve_cow_range_error(ip, error,
-                               _RET_IP_);
-                       break;
-               }
-       }
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-       return error;
+       return 0;
 }
 
 /* Allocate all CoW reservations covering a range of blocks in a file. */
@@ -358,9 +329,8 @@ __xfs_reflink_allocate_cow(
        struct xfs_defer_ops    dfops;
        struct xfs_trans        *tp;
        xfs_fsblock_t           first_block;
-       xfs_fileoff_t           next_fsb;
        int                     nimaps = 1, error;
-       bool                    skipped = false;
+       bool                    shared;
 
        xfs_defer_init(&dfops, &first_block);
 
@@ -371,33 +341,38 @@ __xfs_reflink_allocate_cow(
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
 
-       next_fsb = *offset_fsb;
-       error = __xfs_reflink_reserve_cow(ip, &next_fsb, end_fsb, &skipped);
+       /* Read extent from the source file. */
+       nimaps = 1;
+       error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
+                       &imap, &nimaps, 0);
+       if (error)
+               goto out_unlock;
+       ASSERT(nimaps == 1);
+
+       error = xfs_reflink_reserve_cow(ip, &imap, &shared);
        if (error)
                goto out_trans_cancel;
 
-       if (skipped) {
-               *offset_fsb = next_fsb;
+       if (!shared) {
+               *offset_fsb = imap.br_startoff + imap.br_blockcount;
                goto out_trans_cancel;
        }
 
        xfs_trans_ijoin(tp, ip, 0);
-       error = xfs_bmapi_write(tp, ip, *offset_fsb, next_fsb - *offset_fsb,
+       error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
                        XFS_BMAPI_COWFORK, &first_block,
                        XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
                        &imap, &nimaps, &dfops);
        if (error)
                goto out_trans_cancel;
 
-       /* We might not have been able to map the whole delalloc extent */
-       *offset_fsb = min(*offset_fsb + imap.br_blockcount, next_fsb);
-
        error = xfs_defer_finish(&tp, &dfops, NULL);
        if (error)
                goto out_trans_cancel;
 
        error = xfs_trans_commit(tp);
 
+       *offset_fsb = imap.br_startoff + imap.br_blockcount;
 out_unlock:
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
        return error;
@@ -536,58 +511,49 @@ xfs_reflink_cancel_cow_blocks(
        xfs_fileoff_t                   offset_fsb,
        xfs_fileoff_t                   end_fsb)
 {
-       struct xfs_bmbt_irec            irec;
-       xfs_filblks_t                   count_fsb;
+       struct xfs_ifork                *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+       struct xfs_bmbt_irec            got, prev, del;
+       xfs_extnum_t                    idx;
        xfs_fsblock_t                   firstfsb;
        struct xfs_defer_ops            dfops;
-       int                             error = 0;
-       int                             nimaps;
+       int                             error = 0, eof = 0;
 
        if (!xfs_is_reflink_inode(ip))
                return 0;
 
-       /* Go find the old extent in the CoW fork. */
-       while (offset_fsb < end_fsb) {
-               nimaps = 1;
-               count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
-               error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
-                               &nimaps, XFS_BMAPI_COWFORK);
-               if (error)
-                       break;
-               ASSERT(nimaps == 1);
-
-               trace_xfs_reflink_cancel_cow(ip, &irec);
+       xfs_bmap_search_extents(ip, offset_fsb, XFS_COW_FORK, &eof, &idx,
+                       &got, &prev);
+       if (eof)
+               return 0;
 
-               if (irec.br_startblock == DELAYSTARTBLOCK) {
-                       /* Free a delayed allocation. */
-                       xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount,
-                                       false);
-                       ip->i_delayed_blks -= irec.br_blockcount;
+       while (got.br_startoff < end_fsb) {
+               del = got;
+               xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+               trace_xfs_reflink_cancel_cow(ip, &del);
 
-                       /* Remove the mapping from the CoW fork. */
-                       error = xfs_bunmapi_cow(ip, &irec);
+               if (isnullstartblock(del.br_startblock)) {
+                       error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
+                                       &idx, &got, &del);
                        if (error)
                                break;
-               } else if (irec.br_startblock == HOLESTARTBLOCK) {
-                       /* empty */
                } else {
                        xfs_trans_ijoin(*tpp, ip, 0);
                        xfs_defer_init(&dfops, &firstfsb);
 
                        /* Free the CoW orphan record. */
                        error = xfs_refcount_free_cow_extent(ip->i_mount,
-                                       &dfops, irec.br_startblock,
-                                       irec.br_blockcount);
+                                       &dfops, del.br_startblock,
+                                       del.br_blockcount);
                        if (error)
                                break;
 
                        xfs_bmap_add_free(ip->i_mount, &dfops,
-                                       irec.br_startblock, irec.br_blockcount,
+                                       del.br_startblock, del.br_blockcount,
                                        NULL);
 
                        /* Update quota accounting */
                        xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
-                                       -(long)irec.br_blockcount);
+                                       -(long)del.br_blockcount);
 
                        /* Roll the transaction */
                        error = xfs_defer_finish(tpp, &dfops, ip);
@@ -597,15 +563,18 @@ xfs_reflink_cancel_cow_blocks(
                        }
 
                        /* Remove the mapping from the CoW fork. */
-                       error = xfs_bunmapi_cow(ip, &irec);
-                       if (error)
-                               break;
+                       xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
                }
 
-               /* Roll on... */
-               offset_fsb = irec.br_startoff + irec.br_blockcount;
+               if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec))
+                       break;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
        }
 
+       /* clear tag if cow fork is emptied */
+       if (!ifp->if_bytes)
+               xfs_inode_clear_cowblocks_tag(ip);
+
        return error;
 }
 
@@ -668,25 +637,26 @@ xfs_reflink_end_cow(
        xfs_off_t                       offset,
        xfs_off_t                       count)
 {
-       struct xfs_bmbt_irec            irec;
-       struct xfs_bmbt_irec            uirec;
+       struct xfs_ifork                *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+       struct xfs_bmbt_irec            got, prev, del;
        struct xfs_trans                *tp;
        xfs_fileoff_t                   offset_fsb;
        xfs_fileoff_t                   end_fsb;
-       xfs_filblks_t                   count_fsb;
        xfs_fsblock_t                   firstfsb;
        struct xfs_defer_ops            dfops;
-       int                             error;
+       int                             error, eof = 0;
        unsigned int                    resblks;
-       xfs_filblks_t                   ilen;
        xfs_filblks_t                   rlen;
-       int                             nimaps;
+       xfs_extnum_t                    idx;
 
        trace_xfs_reflink_end_cow(ip, offset, count);
 
+       /* No COW extents?  That's easy! */
+       if (ifp->if_bytes == 0)
+               return 0;
+
        offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
        end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
-       count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
 
        /* Start a rolling transaction to switch the mappings */
        resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
@@ -698,72 +668,65 @@ xfs_reflink_end_cow(
        xfs_ilock(ip, XFS_ILOCK_EXCL);
        xfs_trans_ijoin(tp, ip, 0);
 
-       /* Go find the old extent in the CoW fork. */
-       while (offset_fsb < end_fsb) {
-               /* Read extent from the source file */
-               nimaps = 1;
-               count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
-               error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
-                               &nimaps, XFS_BMAPI_COWFORK);
-               if (error)
-                       goto out_cancel;
-               ASSERT(nimaps == 1);
+       xfs_bmap_search_extents(ip, end_fsb - 1, XFS_COW_FORK, &eof, &idx,
+                       &got, &prev);
 
-               ASSERT(irec.br_startblock != DELAYSTARTBLOCK);
-               trace_xfs_reflink_cow_remap(ip, &irec);
+       /* If there is a hole at end_fsb - 1 go to the previous extent */
+       if (eof || got.br_startoff > end_fsb) {
+               ASSERT(idx > 0);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
+       }
 
-               /*
-                * We can have a hole in the CoW fork if part of a directio
-                * write is CoW but part of it isn't.
-                */
-               rlen = ilen = irec.br_blockcount;
-               if (irec.br_startblock == HOLESTARTBLOCK)
+       /* Walk backwards until we're out of the I/O range... */
+       while (got.br_startoff + got.br_blockcount > offset_fsb) {
+               del = got;
+               xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+
+               /* Extent delete may have bumped idx forward */
+               if (!del.br_blockcount) {
+                       idx--;
                        goto next_extent;
+               }
+
+               ASSERT(!isnullstartblock(got.br_startblock));
 
                /* Unmap the old blocks in the data fork. */
-               while (rlen) {
-                       xfs_defer_init(&dfops, &firstfsb);
-                       error = __xfs_bunmapi(tp, ip, irec.br_startoff,
-                                       &rlen, 0, 1, &firstfsb, &dfops);
-                       if (error)
-                               goto out_defer;
-
-                       /*
-                        * Trim the extent to whatever got unmapped.
-                        * Remember, bunmapi works backwards.
-                        */
-                       uirec.br_startblock = irec.br_startblock + rlen;
-                       uirec.br_startoff = irec.br_startoff + rlen;
-                       uirec.br_blockcount = irec.br_blockcount - rlen;
-                       irec.br_blockcount = rlen;
-                       trace_xfs_reflink_cow_remap_piece(ip, &uirec);
+               xfs_defer_init(&dfops, &firstfsb);
+               rlen = del.br_blockcount;
+               error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
+                               &firstfsb, &dfops);
+               if (error)
+                       goto out_defer;
 
-                       /* Free the CoW orphan record. */
-                       error = xfs_refcount_free_cow_extent(tp->t_mountp,
-                                       &dfops, uirec.br_startblock,
-                                       uirec.br_blockcount);
-                       if (error)
-                               goto out_defer;
+               /* Trim the extent to whatever got unmapped. */
+               if (rlen) {
+                       xfs_trim_extent(&del, del.br_startoff + rlen,
+                               del.br_blockcount - rlen);
+               }
+               trace_xfs_reflink_cow_remap(ip, &del);
 
-                       /* Map the new blocks into the data fork. */
-                       error = xfs_bmap_map_extent(tp->t_mountp, &dfops,
-                                       ip, &uirec);
-                       if (error)
-                               goto out_defer;
+               /* Free the CoW orphan record. */
+               error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
+                               del.br_startblock, del.br_blockcount);
+               if (error)
+                       goto out_defer;
 
-                       /* Remove the mapping from the CoW fork. */
-                       error = xfs_bunmapi_cow(ip, &uirec);
-                       if (error)
-                               goto out_defer;
+               /* Map the new blocks into the data fork. */
+               error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
+               if (error)
+                       goto out_defer;
 
-                       error = xfs_defer_finish(&tp, &dfops, ip);
-                       if (error)
-                               goto out_defer;
-               }
+               /* Remove the mapping from the CoW fork. */
+               xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
+
+               error = xfs_defer_finish(&tp, &dfops, ip);
+               if (error)
+                       goto out_defer;
 
 next_extent:
-               /* Roll on... */
-               offset_fsb = irec.br_startoff + ilen;
+               if (idx < 0)
+                       break;
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
        }
 
        error = xfs_trans_commit(tp);
@@ -774,7 +737,6 @@ next_extent:
 
 out_defer:
        xfs_defer_cancel(&dfops);
-out_cancel:
        xfs_trans_cancel(tp);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
@@ -1312,19 +1274,26 @@ out_error:
  */
 int
 xfs_reflink_remap_range(
-       struct xfs_inode        *src,
-       xfs_off_t               srcoff,
-       struct xfs_inode        *dest,
-       xfs_off_t               destoff,
-       xfs_off_t               len,
-       unsigned int            flags)
+       struct file             *file_in,
+       loff_t                  pos_in,
+       struct file             *file_out,
+       loff_t                  pos_out,
+       u64                     len,
+       bool                    is_dedupe)
 {
+       struct inode            *inode_in = file_inode(file_in);
+       struct xfs_inode        *src = XFS_I(inode_in);
+       struct inode            *inode_out = file_inode(file_out);
+       struct xfs_inode        *dest = XFS_I(inode_out);
        struct xfs_mount        *mp = src->i_mount;
+       loff_t                  bs = inode_out->i_sb->s_blocksize;
+       bool                    same_inode = (inode_in == inode_out);
        xfs_fileoff_t           sfsbno, dfsbno;
        xfs_filblks_t           fsblen;
-       int                     error;
        xfs_extlen_t            cowextsize;
-       bool                    is_same;
+       loff_t                  isize;
+       ssize_t                 ret;
+       loff_t                  blen;
 
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return -EOPNOTSUPP;
@@ -1332,17 +1301,8 @@ xfs_reflink_remap_range(
        if (XFS_FORCED_SHUTDOWN(mp))
                return -EIO;
 
-       /* Don't reflink realtime inodes */
-       if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
-               return -EINVAL;
-
-       if (flags & ~XFS_REFLINK_ALL)
-               return -EINVAL;
-
-       trace_xfs_reflink_remap_range(src, srcoff, len, dest, destoff);
-
        /* Lock both files against IO */
-       if (src->i_ino == dest->i_ino) {
+       if (same_inode) {
                xfs_ilock(src, XFS_IOLOCK_EXCL);
                xfs_ilock(src, XFS_MMAPLOCK_EXCL);
        } else {
@@ -1350,39 +1310,126 @@ xfs_reflink_remap_range(
                xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
        }
 
+       /* Don't touch certain kinds of inodes */
+       ret = -EPERM;
+       if (IS_IMMUTABLE(inode_out))
+               goto out_unlock;
+
+       ret = -ETXTBSY;
+       if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
+               goto out_unlock;
+
+
+       /* Don't reflink dirs, pipes, sockets... */
+       ret = -EISDIR;
+       if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
+               goto out_unlock;
+       ret = -EINVAL;
+       if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
+               goto out_unlock;
+       if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
+               goto out_unlock;
+
+       /* Don't reflink realtime inodes */
+       if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
+               goto out_unlock;
+
+       /* Don't share DAX file data for now. */
+       if (IS_DAX(inode_in) || IS_DAX(inode_out))
+               goto out_unlock;
+
+       /* Are we going all the way to the end? */
+       isize = i_size_read(inode_in);
+       if (isize == 0) {
+               ret = 0;
+               goto out_unlock;
+       }
+
+       if (len == 0)
+               len = isize - pos_in;
+
+       /* Ensure offsets don't wrap and the input is inside i_size */
+       if (pos_in + len < pos_in || pos_out + len < pos_out ||
+           pos_in + len > isize)
+               goto out_unlock;
+
+       /* Don't allow dedupe past EOF in the dest file */
+       if (is_dedupe) {
+               loff_t  disize;
+
+               disize = i_size_read(inode_out);
+               if (pos_out >= disize || pos_out + len > disize)
+                       goto out_unlock;
+       }
+
+       /* If we're linking to EOF, continue to the block boundary. */
+       if (pos_in + len == isize)
+               blen = ALIGN(isize, bs) - pos_in;
+       else
+               blen = len;
+
+       /* Only reflink if we're aligned to block boundaries */
+       if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
+           !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
+               goto out_unlock;
+
+       /* Don't allow overlapped reflink within the same file */
+       if (same_inode) {
+               if (pos_out + blen > pos_in && pos_out < pos_in + blen)
+                       goto out_unlock;
+       }
+
+       /* Wait for the completion of any pending IOs on both files */
+       inode_dio_wait(inode_in);
+       if (!same_inode)
+               inode_dio_wait(inode_out);
+
+       ret = filemap_write_and_wait_range(inode_in->i_mapping,
+                       pos_in, pos_in + len - 1);
+       if (ret)
+               goto out_unlock;
+
+       ret = filemap_write_and_wait_range(inode_out->i_mapping,
+                       pos_out, pos_out + len - 1);
+       if (ret)
+               goto out_unlock;
+
+       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
        /*
         * Check that the extents are the same.
         */
-       if (flags & XFS_REFLINK_DEDUPE) {
-               is_same = false;
-               error = xfs_compare_extents(VFS_I(src), srcoff, VFS_I(dest),
-                               destoff, len, &is_same);
-               if (error)
-                       goto out_error;
+       if (is_dedupe) {
+               bool            is_same = false;
+
+               ret = xfs_compare_extents(inode_in, pos_in, inode_out, pos_out,
+                               len, &is_same);
+               if (ret)
+                       goto out_unlock;
                if (!is_same) {
-                       error = -EBADE;
-                       goto out_error;
+                       ret = -EBADE;
+                       goto out_unlock;
                }
        }
 
-       error = xfs_reflink_set_inode_flag(src, dest);
-       if (error)
-               goto out_error;
+       ret = xfs_reflink_set_inode_flag(src, dest);
+       if (ret)
+               goto out_unlock;
 
        /*
         * Invalidate the page cache so that we can clear any CoW mappings
         * in the destination file.
         */
-       truncate_inode_pages_range(&VFS_I(dest)->i_data, destoff,
-                                  PAGE_ALIGN(destoff + len) - 1);
+       truncate_inode_pages_range(&inode_out->i_data, pos_out,
+                                  PAGE_ALIGN(pos_out + len) - 1);
 
-       dfsbno = XFS_B_TO_FSBT(mp, destoff);
-       sfsbno = XFS_B_TO_FSBT(mp, srcoff);
+       dfsbno = XFS_B_TO_FSBT(mp, pos_out);
+       sfsbno = XFS_B_TO_FSBT(mp, pos_in);
        fsblen = XFS_B_TO_FSB(mp, len);
-       error = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
-                       destoff + len);
-       if (error)
-               goto out_error;
+       ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
+                       pos_out + len);
+       if (ret)
+               goto out_unlock;
 
        /*
         * Carry the cowextsize hint from src to dest if we're sharing the
@@ -1390,26 +1437,24 @@ xfs_reflink_remap_range(
         * has a cowextsize hint, and the destination file does not.
         */
        cowextsize = 0;
-       if (srcoff == 0 && len == i_size_read(VFS_I(src)) &&
+       if (pos_in == 0 && len == i_size_read(inode_in) &&
            (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
-           destoff == 0 && len >= i_size_read(VFS_I(dest)) &&
+           pos_out == 0 && len >= i_size_read(inode_out) &&
            !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
                cowextsize = src->i_d.di_cowextsize;
 
-       error = xfs_reflink_update_dest(dest, destoff + len, cowextsize);
-       if (error)
-               goto out_error;
+       ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize);
 
-out_error:
+out_unlock:
        xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
        xfs_iunlock(src, XFS_IOLOCK_EXCL);
        if (src->i_ino != dest->i_ino) {
                xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
                xfs_iunlock(dest, XFS_IOLOCK_EXCL);
        }
-       if (error)
-               trace_xfs_reflink_remap_range_error(dest, error, _RET_IP_);
-       return error;
+       if (ret)
+               trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+       return ret;
 }
 
 /*
index 5dc3c8a..fad1160 100644 (file)
@@ -26,8 +26,8 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
 extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
                struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
 
-extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip,
-               xfs_off_t offset, xfs_off_t count);
+extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
+               struct xfs_bmbt_irec *imap, bool *shared);
 extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
                xfs_off_t offset, xfs_off_t count);
 extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
@@ -43,11 +43,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
 extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
                xfs_off_t count);
 extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
-#define XFS_REFLINK_DEDUPE     1       /* only reflink if contents match */
-#define XFS_REFLINK_ALL                (XFS_REFLINK_DEDUPE)
-extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff,
-               struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len,
-               unsigned int flags);
+extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
+               struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
 extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
                struct xfs_trans **tpp);
 extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
index 5f8d55d..276d302 100644 (file)
@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
 };
 
 
-struct kobj_type xfs_error_cfg_ktype = {
+static struct kobj_type xfs_error_cfg_ktype = {
        .release = xfs_sysfs_release,
        .sysfs_ops = &xfs_sysfs_ops,
        .default_attrs = xfs_error_attrs,
 };
 
-struct kobj_type xfs_error_ktype = {
+static struct kobj_type xfs_error_ktype = {
        .release = xfs_sysfs_release,
        .sysfs_ops = &xfs_sysfs_ops,
 };
index ad188d3..0907752 100644 (file)
@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
 
-DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range);
+DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
 DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
 
 DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
 DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
 DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece);
 
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
index 17a940a..8caa79c 100644 (file)
@@ -21,7 +21,7 @@ extern void pcc_mbox_free_channel(struct mbox_chan *chan);
 static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
                                                         int subspace_id)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
 #endif
index 43199a0..63554e9 100644 (file)
@@ -70,7 +70,7 @@ KSYM(__kcrctab_\name):
 #include <generated/autoksyms.h>
 
 #define __EXPORT_SYMBOL(sym, val, sec)                         \
-       __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
+       __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
 #define __cond_export_sym(sym, val, sec, conf)                 \
        ___cond_export_sym(sym, val, sec, conf)
 #define ___cond_export_sym(sym, val, sec, enabled)             \
index ddbeda6..689a8b9 100644 (file)
@@ -326,6 +326,7 @@ struct pci_dev;
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
 bool acpi_isa_irq_available(int irq);
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
 extern int ec_read(u8 addr, u8 *val);
index af59638..a428aec 100644 (file)
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
  * routines, one at of_clk_init(), and one at platform device probe
  */
 #define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
-       static void name##_of_clk_init_driver(struct device_node *np)   \
+       static void __init name##_of_clk_init_driver(struct device_node *np) \
        {                                                               \
                of_node_clear_flag(np, OF_POPULATED);                   \
                fn(np);                                                 \
index 5fa55fc..32dc0cb 100644 (file)
@@ -677,10 +677,10 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
                if (best == table - 1)
                        return pos - table;
 
-               return best - pos;
+               return best - table;
        }
 
-       return best - pos;
+       return best - table;
 }
 
 /* Works only on sorted freq-tables */
index 9b207a8..afe641c 100644 (file)
@@ -81,6 +81,7 @@ enum cpuhp_state {
        CPUHP_AP_ARM_ARCH_TIMER_STARTING,
        CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
        CPUHP_AP_DUMMY_TIMER_STARTING,
+       CPUHP_AP_JCORE_TIMER_STARTING,
        CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
        CPUHP_AP_ARM_TWD_STARTING,
        CPUHP_AP_METAG_TIMER_STARTING,
index e2c8419..82ef36e 100644 (file)
@@ -141,4 +141,26 @@ enum {
 void *memremap(resource_size_t offset, size_t size, unsigned long flags);
 void memunmap(void *addr);
 
+/*
+ * On x86 PAT systems we have memory tracking that keeps track of
+ * the allowed mappings on memory ranges. This tracking works for
+ * all the in-kernel mapping APIs (ioremap*), but where the user
+ * wishes to map a range from a physical device into user memory
+ * the tracking won't be updated. This API is to be used by
+ * drivers which remap physical device pages into userspace,
+ * and wants to make sure they are mapped WC and not UC.
+ */
+#ifndef arch_io_reserve_memtype_wc
+static inline int arch_io_reserve_memtype_wc(resource_size_t base,
+                                            resource_size_t size)
+{
+       return 0;
+}
+
+static inline void arch_io_free_memtype_wc(resource_size_t base,
+                                          resource_size_t size)
+{
+}
+#endif
+
 #endif /* _LINUX_IO_H */
index e63e288..7892f55 100644 (file)
@@ -19,11 +19,15 @@ struct vm_fault;
 #define IOMAP_UNWRITTEN        0x04    /* blocks allocated @blkno in unwritten state */
 
 /*
- * Flags for iomap mappings:
+ * Flags for all iomap mappings:
  */
-#define IOMAP_F_MERGED 0x01    /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x02    /* block shared with another file */
-#define IOMAP_F_NEW    0x04    /* blocks have been newly allocated */
+#define IOMAP_F_NEW    0x01    /* blocks have been newly allocated */
+
+/*
+ * Flags that only need to be reported for IOMAP_REPORT requests:
+ */
+#define IOMAP_F_MERGED 0x10    /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x20    /* block shared with another file */
 
 /*
  * Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
 /*
  * Flags for iomap_begin / iomap_end.  No flag implies a read.
  */
-#define IOMAP_WRITE            (1 << 0)
-#define IOMAP_ZERO             (1 << 1)
+#define IOMAP_WRITE            (1 << 0) /* writing, must allocate blocks */
+#define IOMAP_ZERO             (1 << 1) /* zeroing operation, may skip holes */
+#define IOMAP_REPORT           (1 << 2) /* report extent status, e.g. FIEMAP */
 
 struct iomap_ops {
        /*
index 8361c8d..b7e3431 100644 (file)
 #define GITS_BASER_TYPE_SHIFT                  (56)
 #define GITS_BASER_TYPE(r)             (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
 #define GITS_BASER_ENTRY_SIZE_SHIFT            (48)
-#define GITS_BASER_ENTRY_SIZE(r)       ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_ENTRY_SIZE(r)       ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
 #define GITS_BASER_SHAREABILITY_SHIFT  (10)
 #define GITS_BASER_InnerShareable                                      \
        GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
index d600303..820c0ad 100644 (file)
@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
 void kasan_unpoison_shadow(const void *address, size_t size);
 
 void kasan_unpoison_task_stack(struct task_struct *task);
+void kasan_unpoison_stack_above_sp_to(const void *watermark);
 
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
 
 static inline void kasan_enable_current(void) {}
 static inline void kasan_disable_current(void) {}
index 15ec117..8f2e059 100644 (file)
@@ -31,7 +31,6 @@
  * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
  * the last step cherry picks the 2nd arg, we get a zero.
  */
-#define config_enabled(cfg)            ___is_defined(cfg)
 #define __is_defined(x)                        ___is_defined(x)
 #define ___is_defined(val)             ____is_defined(__ARG_PLACEHOLDER_##val)
 #define ____is_defined(arg1_or_junk)   __take_second_arg(arg1_or_junk 1, 0)
  * otherwise. For boolean options, this is equivalent to
  * IS_ENABLED(CONFIG_FOO).
  */
-#define IS_BUILTIN(option) config_enabled(option)
+#define IS_BUILTIN(option) __is_defined(option)
 
 /*
  * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
  * otherwise.
  */
-#define IS_MODULE(option) config_enabled(option##_MODULE)
+#define IS_MODULE(option) __is_defined(option##_MODULE)
 
 /*
  * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
index e9caec6..a92c8d7 100644 (file)
@@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk,
 }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+               unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-               void *buf, int len, int write);
+               void *buf, int len, unsigned int gup_flags);
 
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                     unsigned long start, unsigned long nr_pages,
-                     unsigned int foll_flags, struct page **pages,
-                     struct vm_area_struct **vmas, int *nonblocking);
 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
                            unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages,
+                           unsigned int gup_flags, struct page **pages,
                            struct vm_area_struct **vmas);
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages,
+                           unsigned int gup_flags, struct page **pages,
                            struct vm_area_struct **vmas);
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
-                   int write, int force, struct page **pages, int *locked);
+                   unsigned int gup_flags, struct page **pages, int *locked);
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                               unsigned long start, unsigned long nr_pages,
-                              int write, int force, struct page **pages,
-                              unsigned int gup_flags);
+                              struct page **pages, unsigned int gup_flags);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
-                   int write, int force, struct page **pages);
+                   struct page **pages, unsigned int gup_flags);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
@@ -1306,7 +1302,7 @@ struct frame_vector {
 struct frame_vector *frame_vector_create(unsigned int nr_frames);
 void frame_vector_destroy(struct frame_vector *vec);
 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
-                    bool write, bool force, struct frame_vector *vec);
+                    unsigned int gup_flags, struct frame_vector *vec);
 void put_vaddr_frames(struct frame_vector *vec);
 int frame_vector_to_pages(struct frame_vector *vec);
 void frame_vector_to_pfns(struct frame_vector *vec);
@@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
                !vma_growsup(vma->vm_next, addr);
 }
 
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
+int vma_is_stack_for_current(struct vm_area_struct *vma);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_TRIED     0x800   /* a retry, previous pass started an IO */
 #define FOLL_MLOCK     0x1000  /* lock present pages */
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
+#define FOLL_COW       0x4000  /* internal GUP flag */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
index 7f2ae99..0f088f3 100644 (file)
@@ -440,33 +440,7 @@ struct zone {
        seqlock_t               span_seqlock;
 #endif
 
-       /*
-        * wait_table           -- the array holding the hash table
-        * wait_table_hash_nr_entries   -- the size of the hash table array
-        * wait_table_bits      -- wait_table_size == (1 << wait_table_bits)
-        *
-        * The purpose of all these is to keep track of the people
-        * waiting for a page to become available and make them
-        * runnable again when possible. The trouble is that this
-        * consumes a lot of space, especially when so few things
-        * wait on pages at a given time. So instead of using
-        * per-page waitqueues, we use a waitqueue hash table.
-        *
-        * The bucket discipline is to sleep on the same queue when
-        * colliding and wake all in that wait queue when removing.
-        * When something wakes, it must check to be sure its page is
-        * truly available, a la thundering herd. The cost of a
-        * collision is great, but given the expected load of the
-        * table, they should be so rare as to be outweighed by the
-        * benefits from the saved space.
-        *
-        * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
-        * primary users of these fields, and in mm/page_alloc.c
-        * free_area_init_core() performs the initialization of them.
-        */
-       wait_queue_head_t       *wait_table;
-       unsigned long           wait_table_hash_nr_entries;
-       unsigned long           wait_table_bits;
+       int initialized;
 
        /* Write-intensive fields used from the page allocator */
        ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
 
 static inline bool zone_is_initialized(struct zone *zone)
 {
-       return !!zone->wait_table;
+       return zone->initialized;
 }
 
 static inline bool zone_is_empty(struct zone *zone)
index 7676557..fc3c242 100644 (file)
@@ -16,7 +16,6 @@
 #define _LINUX_NVME_H
 
 #include <linux/types.h>
-#include <linux/uuid.h>
 
 /* NQN names in commands fields specified one size */
 #define NVMF_NQN_FIELD_LEN     256
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
        char                    fr[8];
        __u8                    rab;
        __u8                    ieee[3];
-       __u8                    mic;
+       __u8                    cmic;
        __u8                    mdts;
        __le16                  cntlid;
        __le32                  ver;
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
        __u8                    apsta;
        __le16                  wctemp;
        __le16                  cctemp;
-       __u8                    rsvd270[50];
+       __le16                  mtfa;
+       __le32                  hmpre;
+       __le32                  hmmin;
+       __u8                    tnvmcap[16];
+       __u8                    unvmcap[16];
+       __le32                  rpmbs;
+       __u8                    rsvd316[4];
        __le16                  kas;
        __u8                    rsvd322[190];
        __u8                    sqes;
@@ -267,7 +272,7 @@ struct nvme_id_ns {
        __le16                  nabo;
        __le16                  nabspf;
        __u16                   rsvd46;
-       __le64                  nvmcap[2];
+       __u8                    nvmcap[16];
        __u8                    rsvd64[40];
        __u8                    nguid[16];
        __u8                    eui64[8];
@@ -276,6 +281,16 @@ struct nvme_id_ns {
        __u8                    vs[3712];
 };
 
+enum {
+       NVME_ID_CNS_NS                  = 0x00,
+       NVME_ID_CNS_CTRL                = 0x01,
+       NVME_ID_CNS_NS_ACTIVE_LIST      = 0x02,
+       NVME_ID_CNS_NS_PRESENT_LIST     = 0x10,
+       NVME_ID_CNS_NS_PRESENT          = 0x11,
+       NVME_ID_CNS_CTRL_NS_LIST        = 0x12,
+       NVME_ID_CNS_CTRL_LIST           = 0x13,
+};
+
 enum {
        NVME_NS_FEAT_THIN       = 1 << 0,
        NVME_NS_FLBAS_LBA_MASK  = 0xf,
@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
        nvme_admin_set_features         = 0x09,
        nvme_admin_get_features         = 0x0a,
        nvme_admin_async_event          = 0x0c,
+       nvme_admin_ns_mgmt              = 0x0d,
        nvme_admin_activate_fw          = 0x10,
        nvme_admin_download_fw          = 0x11,
+       nvme_admin_ns_attach            = 0x15,
        nvme_admin_keep_alive           = 0x18,
        nvme_admin_format_nvm           = 0x80,
        nvme_admin_security_send        = 0x81,
@@ -583,6 +600,7 @@ enum {
        NVME_FEAT_WRITE_ATOMIC  = 0x0a,
        NVME_FEAT_ASYNC_EVENT   = 0x0b,
        NVME_FEAT_AUTO_PST      = 0x0c,
+       NVME_FEAT_HOST_MEM_BUF  = 0x0d,
        NVME_FEAT_KATO          = 0x0f,
        NVME_FEAT_SW_PROGRESS   = 0x80,
        NVME_FEAT_HOST_ID       = 0x81,
@@ -745,7 +763,7 @@ struct nvmf_common_command {
 struct nvmf_disc_rsp_page_entry {
        __u8            trtype;
        __u8            adrfam;
-       __u8            nqntype;
+       __u8            subtype;
        __u8            treq;
        __le16          portid;
        __le16          cntlid;
@@ -794,7 +812,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-       uuid_be         hostid;
+       __u8            hostid[16];
        __le16          cntlid;
        char            resv4[238];
        char            subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -905,12 +923,23 @@ enum {
        NVME_SC_INVALID_VECTOR          = 0x108,
        NVME_SC_INVALID_LOG_PAGE        = 0x109,
        NVME_SC_INVALID_FORMAT          = 0x10a,
-       NVME_SC_FIRMWARE_NEEDS_RESET    = 0x10b,
+       NVME_SC_FW_NEEDS_CONV_RESET     = 0x10b,
        NVME_SC_INVALID_QUEUE           = 0x10c,
        NVME_SC_FEATURE_NOT_SAVEABLE    = 0x10d,
        NVME_SC_FEATURE_NOT_CHANGEABLE  = 0x10e,
        NVME_SC_FEATURE_NOT_PER_NS      = 0x10f,
-       NVME_SC_FW_NEEDS_RESET_SUBSYS   = 0x110,
+       NVME_SC_FW_NEEDS_SUBSYS_RESET   = 0x110,
+       NVME_SC_FW_NEEDS_RESET          = 0x111,
+       NVME_SC_FW_NEEDS_MAX_TIME       = 0x112,
+       NVME_SC_FW_ACIVATE_PROHIBITED   = 0x113,
+       NVME_SC_OVERLAPPING_RANGE       = 0x114,
+       NVME_SC_NS_INSUFFICENT_CAP      = 0x115,
+       NVME_SC_NS_ID_UNAVAILABLE       = 0x116,
+       NVME_SC_NS_ALREADY_ATTACHED     = 0x118,
+       NVME_SC_NS_IS_PRIVATE           = 0x119,
+       NVME_SC_NS_NOT_ATTACHED         = 0x11a,
+       NVME_SC_THIN_PROV_NOT_SUPP      = 0x11b,
+       NVME_SC_CTRL_LIST_INVALID       = 0x11c,
 
        /*
         * I/O Command Set Specific - NVM commands:
@@ -941,6 +970,7 @@ enum {
        NVME_SC_REFTAG_CHECK            = 0x284,
        NVME_SC_COMPARE_FAILED          = 0x285,
        NVME_SC_ACCESS_DENIED           = 0x286,
+       NVME_SC_UNWRITTEN_BLOCK         = 0x287,
 
        NVME_SC_DNR                     = 0x4000,
 };
@@ -960,6 +990,7 @@ struct nvme_completion {
        __le16  status;         /* did the command fail, and if so, why? */
 };
 
-#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+#define NVME_VS(major, minor, tertiary) \
+       (((major) << 16) | ((minor) << 8) | (tertiary))
 
 #endif /* _LINUX_NVME_H */
index 060d0ed..4741ecd 100644 (file)
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
 extern void perf_event_enable(struct perf_event *event);
 extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_disable_local(struct perf_event *event);
+extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
index 0d7abb8..91a740f 100644 (file)
@@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
                                  unsigned long prot, int pkey);
 asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
 asmlinkage long sys_pkey_free(int pkey);
-//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
-//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
-//                          unsigned long flags);
 
 #endif
index 45f004e..2873baf 100644 (file)
 struct timespec;
 struct compat_timespec;
 
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-struct thread_info {
-       unsigned long           flags;          /* low level flags */
-};
-
-#define INIT_THREAD_INFO(tsk)                  \
-{                                              \
-       .flags          = 0,                    \
-}
-#endif
-
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 #define current_thread_info() ((struct thread_info *)current)
 #endif
index fb8e3b6..c211900 100644 (file)
@@ -177,6 +177,7 @@ enum tcm_sense_reason_table {
        TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED    = R(0x15),
        TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED  = R(0x16),
        TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED  = R(0x17),
+       TCM_COPY_TARGET_DEVICE_NOT_REACHABLE    = R(0x18),
 #undef R
 };
 
index dbfee7e..9b1462e 100644 (file)
@@ -730,10 +730,6 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
 __SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
 #define __NR_pkey_free 290
 __SYSCALL(__NR_pkey_free,     sys_pkey_free)
-#define __NR_pkey_get 291
-//__SYSCALL(__NR_pkey_get,      sys_pkey_get)
-#define __NR_pkey_set 292
-//__SYSCALL(__NR_pkey_set,      sys_pkey_set)
 
 #undef __NR_syscalls
 #define __NR_syscalls 291
index 6965d09..cd2be1c 100644 (file)
@@ -75,6 +75,7 @@ header-y += bpf_perf_event.h
 header-y += bpf.h
 header-y += bpqether.h
 header-y += bsg.h
+header-y += bt-bmc.h
 header-y += btrfs.h
 header-y += can.h
 header-y += capability.h
diff --git a/include/uapi/linux/bt-bmc.h b/include/uapi/linux/bt-bmc.h
new file mode 100644 (file)
index 0000000..d9ec766
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_BT_BMC_H
+#define _UAPI_LINUX_BT_BMC_H
+
+#include <linux/ioctl.h>
+
+#define __BT_BMC_IOCTL_MAGIC   0xb1
+#define BT_BMC_IOCTL_SMS_ATN   _IO(__BT_BMC_IOCTL_MAGIC, 0x00)
+
+#endif /* _UAPI_LINUX_BT_BMC_H */
index a521999..bf74eaa 100644 (file)
@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len)
        size_t alen;
 
        alen = min(len, DATALEN_MSG);
-       msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
+       msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT);
        if (msg == NULL)
                return NULL;
 
@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len)
        while (len > 0) {
                struct msg_msgseg *seg;
                alen = min(len, DATALEN_SEG);
-               seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
+               seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
                if (seg == NULL)
                        goto out_err;
                *pseg = seg;
index 5df20d6..29de1a9 100644 (file)
@@ -228,7 +228,7 @@ static struct {
        .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-       .dep_map = {.name = "cpu_hotplug.lock" },
+       .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
 #endif
 };
 
index c6e47e9..0e29213 100644 (file)
@@ -1960,6 +1960,12 @@ void perf_event_disable(struct perf_event *event)
 }
 EXPORT_SYMBOL_GPL(perf_event_disable);
 
+void perf_event_disable_inatomic(struct perf_event *event)
+{
+       event->pending_disable = 1;
+       irq_work_queue(&event->pending);
+}
+
 static void perf_set_shadow_time(struct perf_event *event,
                                 struct perf_event_context *ctx,
                                 u64 tstamp)
@@ -7075,8 +7081,8 @@ static int __perf_event_overflow(struct perf_event *event,
        if (events && atomic_dec_and_test(&event->event_limit)) {
                ret = 1;
                event->pending_kill = POLL_HUP;
-               event->pending_disable = 1;
-               irq_work_queue(&event->pending);
+
+               perf_event_disable_inatomic(event);
        }
 
        READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -8855,7 +8861,10 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
 
 void perf_pmu_unregister(struct pmu *pmu)
 {
+       int remove_device;
+
        mutex_lock(&pmus_lock);
+       remove_device = pmu_bus_running;
        list_del_rcu(&pmu->entry);
        mutex_unlock(&pmus_lock);
 
@@ -8869,10 +8878,12 @@ void perf_pmu_unregister(struct pmu *pmu)
        free_percpu(pmu->pmu_disable_count);
        if (pmu->type >= PERF_TYPE_MAX)
                idr_remove(&pmu_idr, pmu->type);
-       if (pmu->nr_addr_filters)
-               device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
-       device_del(pmu->dev);
-       put_device(pmu->dev);
+       if (remove_device) {
+               if (pmu->nr_addr_filters)
+                       device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
+               device_del(pmu->dev);
+               put_device(pmu->dev);
+       }
        free_pmu_context(pmu);
 }
 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
index d4129bb..f9ec9ad 100644 (file)
@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
 
 retry:
        /* Read the page with vaddr into memory */
-       ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+       ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
+                       &vma);
        if (ret <= 0)
                return ret;
 
@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
         * but we treat this as a 'remote' access since it is
         * essentially a kernel access to the memory.
         */
-       result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+       result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
+                       NULL);
        if (result < 0)
                return result;
 
index 0c5f1a5..9c4d304 100644 (file)
@@ -721,6 +721,7 @@ int irq_set_parent(int irq, int parent_irq)
        irq_put_desc_unlock(desc, flags);
        return 0;
 }
+EXPORT_SYMBOL_GPL(irq_set_parent);
 #endif
 
 /*
index 8d44b3f..30e6d05 100644 (file)
@@ -53,8 +53,15 @@ void notrace __sanitizer_cov_trace_pc(void)
        /*
         * We are interested in code coverage as a function of a syscall inputs,
         * so we ignore code executed in interrupts.
+        * The checks for whether we are in an interrupt are open-coded, because
+        * 1. We can't use in_interrupt() here, since it also returns true
+        *    when we are inside local_bh_disable() section.
+        * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
+        *    since that leads to slower generated code (three separate tests,
+        *    one for each of the flags).
         */
-       if (!t || in_interrupt())
+       if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
+                                                       | NMI_MASK)))
                return;
        mode = READ_ONCE(t->kcov_mode);
        if (mode == KCOV_MODE_TRACE) {
index 1e7f5da..6ccb08f 100644 (file)
@@ -498,9 +498,9 @@ static int enter_state(suspend_state_t state)
 
 #ifndef CONFIG_SUSPEND_SKIP_SYNC
        trace_suspend_resume(TPS("sync_filesystems"), 0, true);
-       printk(KERN_INFO "PM: Syncing filesystems ... ");
+       pr_info("PM: Syncing filesystems ... ");
        sys_sync();
-       printk("done.\n");
+       pr_cont("done.\n");
        trace_suspend_resume(TPS("sync_filesystems"), 0, false);
 #endif
 
index d5e3973..de08fc9 100644 (file)
@@ -1769,6 +1769,10 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
                cont_flush();
        }
 
+       /* Skip empty continuation lines that couldn't be added - they just flush */
+       if (!text_len && (lflags & LOG_CONT))
+               return 0;
+
        /* If it doesn't end in a newline, try to buffer the current line */
        if (!(lflags & LOG_NEWLINE)) {
                if (cont_add(facility, level, lflags, text, text_len))
index 2a99027..e6474f7 100644 (file)
@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
                int this_len, retval;
 
                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
-               retval = access_process_vm(tsk, src, buf, this_len, 0);
+               retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
                if (!retval) {
                        if (copied)
                                break;
@@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
                if (copy_from_user(buf, src, this_len))
                        return -EFAULT;
-               retval = access_process_vm(tsk, dst, buf, this_len, 1);
+               retval = access_process_vm(tsk, dst, buf, this_len,
+                               FOLL_FORCE | FOLL_WRITE);
                if (!retval) {
                        if (copied)
                                break;
@@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
        unsigned long tmp;
        int copied;
 
-       copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+       copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
        if (copied != sizeof(tmp))
                return -EIO;
        return put_user(tmp, (unsigned long __user *)data);
@@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 {
        int copied;
 
-       copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+       copied = access_process_vm(tsk, addr, &data, sizeof(data),
+                       FOLL_FORCE | FOLL_WRITE);
        return (copied == sizeof(data)) ? 0 : -EIO;
 }
 
@@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
        switch (request) {
        case PTRACE_PEEKTEXT:
        case PTRACE_PEEKDATA:
-               ret = access_process_vm(child, addr, &word, sizeof(word), 0);
+               ret = access_process_vm(child, addr, &word, sizeof(word),
+                               FOLL_FORCE);
                if (ret != sizeof(word))
                        ret = -EIO;
                else
@@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 
        case PTRACE_POKETEXT:
        case PTRACE_POKEDATA:
-               ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+               ret = access_process_vm(child, addr, &data, sizeof(data),
+                               FOLL_FORCE | FOLL_WRITE);
                ret = (ret != sizeof(data) ? -EIO : 0);
                break;
 
index 94732d1..42d4027 100644 (file)
@@ -7515,11 +7515,27 @@ static struct kmem_cache *task_group_cache __read_mostly;
 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
 
+#define WAIT_TABLE_BITS 8
+#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
+static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
+
+wait_queue_head_t *bit_waitqueue(void *word, int bit)
+{
+       const int shift = BITS_PER_LONG == 32 ? 5 : 6;
+       unsigned long val = (unsigned long)word << shift | bit;
+
+       return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
+}
+EXPORT_SYMBOL(bit_waitqueue);
+
 void __init sched_init(void)
 {
        int i, j;
        unsigned long alloc_size = 0, ptr;
 
+       for (i = 0; i < WAIT_TABLE_SIZE; i++)
+               init_waitqueue_head(bit_wait_table + i);
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
 #endif
index 2d4ad72..c242944 100644 (file)
@@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)
         * will definitely be update (after enqueue).
         */
        sa->period_contrib = 1023;
-       sa->load_avg = scale_load_down(se->load.weight);
+       /*
+        * Tasks are intialized with full load to be seen as heavy tasks until
+        * they get a chance to stabilize to their real load level.
+        * Group entities are intialized with zero load to reflect the fact that
+        * nothing has been attached to the task group yet.
+        */
+       if (entity_is_task(se))
+               sa->load_avg = scale_load_down(se->load.weight);
        sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
        /*
         * At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
  */
 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
 {
-       struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
-       u64 avg_idle = this_rq()->avg_idle;
-       u64 avg_cost = this_sd->avg_scan_cost;
+       struct sched_domain *this_sd;
+       u64 avg_cost, avg_idle = this_rq()->avg_idle;
        u64 time, cost;
        s64 delta;
        int cpu, wrap;
 
+       this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+       if (!this_sd)
+               return -1;
+
+       avg_cost = this_sd->avg_scan_cost;
+
        /*
         * Due to large variance we need a large fuzz factor; hackbench in
         * particularly is sensitive here.
@@ -8827,7 +8839,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 {
        struct sched_entity *se;
        struct cfs_rq *cfs_rq;
-       struct rq *rq;
        int i;
 
        tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8842,8 +8853,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        init_cfs_bandwidth(tg_cfs_bandwidth(tg));
 
        for_each_possible_cpu(i) {
-               rq = cpu_rq(i);
-
                cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
                                      GFP_KERNEL, cpu_to_node(i));
                if (!cfs_rq)
index 4f70535..9453efe 100644 (file)
@@ -480,16 +480,6 @@ void wake_up_bit(void *word, int bit)
 }
 EXPORT_SYMBOL(wake_up_bit);
 
-wait_queue_head_t *bit_waitqueue(void *word, int bit)
-{
-       const int shift = BITS_PER_LONG == 32 ? 5 : 6;
-       const struct zone *zone = page_zone(virt_to_page(word));
-       unsigned long val = (unsigned long)word << shift | bit;
-
-       return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
-}
-EXPORT_SYMBOL(bit_waitqueue);
-
 /*
  * Manipulate the atomic_t address to produce a better bit waitqueue table hash
  * index (we're keying off bit -1, but that would produce a horrible hash
index 1bf81ef..744fa61 100644 (file)
@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 const char * const softirq_to_name[NR_SOFTIRQS] = {
-       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
        "TASKLET", "SCHED", "HRTIMER", "RCU"
 };
 
index c3aad68..12dd190 100644 (file)
@@ -542,7 +542,6 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
 static int alarm_timer_create(struct k_itimer *new_timer)
 {
        enum  alarmtimer_type type;
-       struct alarm_base *base;
 
        if (!alarmtimer_get_rtcdev())
                return -ENOTSUPP;
@@ -551,7 +550,6 @@ static int alarm_timer_create(struct k_itimer *new_timer)
                return -EPERM;
 
        type = clock2alarm(new_timer->it_clock);
-       base = &alarm_bases[type];
        alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
        return 0;
 }
index 2d47980..c611c47 100644 (file)
@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
 
 #ifdef CONFIG_NO_HZ_COMMON
 static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
 {
 #ifdef CONFIG_SMP
        if ((tflags & TIMER_PINNED) || !base->migration_enabled)
@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
 
 static inline void forward_timer_base(struct timer_base *base)
 {
+       unsigned long jnow = READ_ONCE(jiffies);
+
        /*
         * We only forward the base when it's idle and we have a delta between
         * base clock and jiffies.
         */
-       if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+       if (!base->is_idle || (long) (jnow - base->clk) < 2)
                return;
 
        /*
         * If the next expiry value is > jiffies, then we fast forward to
         * jiffies otherwise we forward to the next expiry value.
         */
-       if (time_after(base->next_expiry, jiffies))
-               base->clk = jiffies;
+       if (time_after(base->next_expiry, jnow))
+               base->clk = jnow;
        else
                base->clk = base->next_expiry;
 }
 #else
 static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
 {
        return get_timer_this_cpu_base(tflags);
 }
@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
 static inline void forward_timer_base(struct timer_base *base) { }
 #endif
 
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
-       struct timer_base *target = __get_target_base(base, tflags);
-
-       forward_timer_base(target);
-       return target;
-}
 
 /*
  * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
 {
        for (;;) {
                struct timer_base *base;
-               u32 tf = timer->flags;
+               u32 tf;
+
+               /*
+                * We need to use READ_ONCE() here, otherwise the compiler
+                * might re-read @tf between the check for TIMER_MIGRATING
+                * and spin_lock().
+                */
+               tf = READ_ONCE(timer->flags);
 
                if (!(tf & TIMER_MIGRATING)) {
                        base = get_timer_base(tf);
@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
        unsigned long clk = 0, flags;
        int ret = 0;
 
+       BUG_ON(!timer->function);
+
        /*
         * This is a common optimization triggered by the networking code - if
         * the timer is re-modified to have the same timeout or ends up in the
@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
        if (timer_pending(timer)) {
                if (timer->expires == expires)
                        return 1;
+
                /*
-                * Take the current timer_jiffies of base, but without holding
-                * the lock!
+                * We lock timer base and calculate the bucket index right
+                * here. If the timer ends up in the same bucket, then we
+                * just update the expiry time and avoid the whole
+                * dequeue/enqueue dance.
                 */
-               base = get_timer_base(timer->flags);
-               clk = base->clk;
+               base = lock_timer_base(timer, &flags);
 
+               clk = base->clk;
                idx = calc_wheel_index(expires, clk);
 
                /*
@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
                 */
                if (idx == timer_get_idx(timer)) {
                        timer->expires = expires;
-                       return 1;
+                       ret = 1;
+                       goto out_unlock;
                }
+       } else {
+               base = lock_timer_base(timer, &flags);
        }
 
        timer_stats_timer_set_start_info(timer);
-       BUG_ON(!timer->function);
-
-       base = lock_timer_base(timer, &flags);
 
        ret = detach_if_pending(timer, base, false);
        if (!ret && pending_only)
@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
                }
        }
 
+       /* Try to forward a stale timer base clock */
+       forward_timer_base(base);
+
        timer->expires = expires;
        /*
         * If 'idx' was calculated above and the base time did not advance
-        * between calculating 'idx' and taking the lock, only enqueue_timer()
-        * and trigger_dyntick_cpu() is required. Otherwise we need to
-        * (re)calculate the wheel index via internal_add_timer().
+        * between calculating 'idx' and possibly switching the base, only
+        * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
+        * we need to (re)calculate the wheel index via
+        * internal_add_timer().
         */
        if (idx != UINT_MAX && clk == base->clk) {
                enqueue_timer(base, timer, idx);
@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
        base->next_expiry = nextevt;
        /*
-        * We have a fresh next event. Check whether we can forward the base:
+        * We have a fresh next event. Check whether we can forward the
+        * base. We can only do that when @basej is past base->clk
+        * otherwise we might rewind base->clk.
         */
-       if (time_after(nextevt, jiffies))
-               base->clk = jiffies;
-       else if (time_after(nextevt, base->clk))
-               base->clk = nextevt;
+       if (time_after(basej, base->clk)) {
+               if (time_after(nextevt, basej))
+                       base->clk = basej;
+               else if (time_after(nextevt, base->clk))
+                       base->clk = nextevt;
+       }
 
        if (time_before_eq(nextevt, basej)) {
                expires = basem;
index 33bc56c..b01e547 100644 (file)
@@ -198,6 +198,7 @@ config FRAME_WARN
        int "Warn for stack frames larger than (needs gcc 4.4)"
        range 0 8192
        default 0 if KASAN
+       default 2048 if GCC_PLUGIN_LATENT_ENTROPY
        default 1024 if !64BIT
        default 2048 if 64BIT
        help
index 0a11396..144fe6b 100644 (file)
@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
        struct gen_pool_chunk *chunk;
        unsigned long addr = 0;
        int order = pool->min_alloc_order;
-       int nbits, start_bit = 0, end_bit, remain;
+       int nbits, start_bit, end_bit, remain;
 
 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
        BUG_ON(in_nmi());
@@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
                if (size > atomic_read(&chunk->avail))
                        continue;
 
+               start_bit = 0;
                end_bit = chunk_size(chunk) >> order;
 retry:
                start_bit = algo(chunk->bits, end_bit, start_bit,
index 60f77f1..4d830e2 100644 (file)
@@ -50,7 +50,7 @@
                                        STACK_ALLOC_ALIGN)
 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
                STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
-#define STACK_ALLOC_SLABS_CAP 1024
+#define STACK_ALLOC_SLABS_CAP 8192
 #define STACK_ALLOC_MAX_SLABS \
        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
         (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
index be0ee11..86e3e0e 100644 (file)
@@ -187,7 +187,7 @@ config MEMORY_HOTPLUG
        bool "Allow for memory hot-add"
        depends on SPARSEMEM || X86_64_ACPI_NUMA
        depends on ARCH_ENABLE_MEMORY_HOTPLUG
-       depends on !KASAN
+       depends on COMPILE_TEST || !KASAN
 
 config MEMORY_HOTPLUG_SPARSE
        def_bool y
index 849f459..c7fe2f1 100644 (file)
@@ -790,9 +790,7 @@ EXPORT_SYMBOL(__page_cache_alloc);
  */
 wait_queue_head_t *page_waitqueue(struct page *page)
 {
-       const struct zone *zone = page_zone(page);
-
-       return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
+       return bit_waitqueue(page, 0);
 }
 EXPORT_SYMBOL(page_waitqueue);
 
index 381bb07..db77dcb 100644 (file)
  * get_vaddr_frames() - map virtual addresses to pfns
  * @start:     starting user address
  * @nr_frames: number of pages / pfns from start to map
- * @write:     whether pages will be written to by the caller
- * @force:     whether to force write access even if user mapping is
- *             readonly. See description of the same argument of
-               get_user_pages().
+ * @gup_flags: flags modifying lookup behaviour
  * @vec:       structure which receives pages / pfns of the addresses mapped.
  *             It should have space for at least nr_frames entries.
  *
@@ -34,7 +31,7 @@
  * This function takes care of grabbing mmap_sem as necessary.
  */
 int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
-                    bool write, bool force, struct frame_vector *vec)
+                    unsigned int gup_flags, struct frame_vector *vec)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
@@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
                vec->got_ref = true;
                vec->is_pfns = false;
                ret = get_user_pages_locked(start, nr_frames,
-                       write, force, (struct page **)(vec->ptrs), &locked);
+                       gup_flags, (struct page **)(vec->ptrs), &locked);
                goto out;
        }
 
index 96b2b2f..ec4f827 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
        return -EEXIST;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+       return pte_write(pte) ||
+               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
 static struct page *follow_page_pte(struct vm_area_struct *vma,
                unsigned long address, pmd_t *pmd, unsigned int flags)
 {
@@ -95,7 +105,7 @@ retry:
        }
        if ((flags & FOLL_NUMA) && pte_protnone(pte))
                goto no_page;
-       if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+       if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
                pte_unmap_unlock(ptep, ptl);
                return NULL;
        }
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
         * reCOWed by userspace write).
         */
        if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
-               *flags &= ~FOLL_WRITE;
+               *flags |= FOLL_COW;
        return 0;
 }
 
@@ -516,7 +526,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  * instead of __get_user_pages. __get_user_pages should be used only if
  * you need some special @gup_flags.
  */
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas, int *nonblocking)
@@ -621,7 +631,6 @@ next_page:
        } while (nr_pages);
        return i;
 }
-EXPORT_SYMBOL(__get_user_pages);
 
 bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
 {
@@ -729,7 +738,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
                                                struct mm_struct *mm,
                                                unsigned long start,
                                                unsigned long nr_pages,
-                                               int write, int force,
                                                struct page **pages,
                                                struct vm_area_struct **vmas,
                                                int *locked, bool notify_drop,
@@ -747,10 +755,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 
        if (pages)
                flags |= FOLL_GET;
-       if (write)
-               flags |= FOLL_WRITE;
-       if (force)
-               flags |= FOLL_FORCE;
 
        pages_done = 0;
        lock_dropped = false;
@@ -843,12 +847,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
  *          up_read(&mm->mmap_sem);
  */
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
-                          int write, int force, struct page **pages,
+                          unsigned int gup_flags, struct page **pages,
                           int *locked)
 {
        return __get_user_pages_locked(current, current->mm, start, nr_pages,
-                                      write, force, pages, NULL, locked, true,
-                                      FOLL_TOUCH);
+                                      pages, NULL, locked, true,
+                                      gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
@@ -864,14 +868,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
  */
 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                                               unsigned long start, unsigned long nr_pages,
-                                              int write, int force, struct page **pages,
-                                              unsigned int gup_flags)
+                                              struct page **pages, unsigned int gup_flags)
 {
        long ret;
        int locked = 1;
+
        down_read(&mm->mmap_sem);
-       ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                     pages, NULL, &locked, false, gup_flags);
+       ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+                                     &locked, false, gup_flags);
        if (locked)
                up_read(&mm->mmap_sem);
        return ret;
@@ -896,10 +900,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
  * "force" parameter).
  */
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
-                            int write, int force, struct page **pages)
+                            struct page **pages, unsigned int gup_flags)
 {
        return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
-                                        write, force, pages, FOLL_TOUCH);
+                                        pages, gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -910,9 +914,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
- * @write:     whether pages will be written to by the caller
- * @force:     whether to force access even when user mapping is currently
- *             protected (but never forces write access to shared mapping).
+ * @gup_flags: flags modifying lookup behaviour
  * @pages:     array that receives pointers to the pages pinned.
  *             Should be at least nr_pages long. Or NULL, if caller
  *             only intends to ensure the pages are faulted in.
@@ -941,9 +943,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  * or similar operation cannot guarantee anything stronger anyway because
  * locks can't be held over the syscall boundary.
  *
- * If write=0, the page must not be written to. If the page is written to,
- * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
- * after the page is finished with, and before put_page is called.
+ * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
+ * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
+ * be called after the page is finished with, and before put_page is called.
  *
  * get_user_pages is typically used for fewer-copy IO operations, to get a
  * handle on the memory by some means other than accesses via the user virtual
@@ -960,12 +962,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  */
 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages,
+               unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                      pages, vmas, NULL, false,
-                                      FOLL_TOUCH | FOLL_REMOTE);
+       return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+                                      NULL, false,
+                                      gup_flags | FOLL_TOUCH | FOLL_REMOTE);
 }
 EXPORT_SYMBOL(get_user_pages_remote);
 
@@ -976,12 +978,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
  * obviously don't pass FOLL_REMOTE in here.
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-               int write, int force, struct page **pages,
+               unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas)
 {
        return __get_user_pages_locked(current, current->mm, start, nr_pages,
-                                      write, force, pages, vmas, NULL, false,
-                                      FOLL_TOUCH);
+                                      pages, vmas, NULL, false,
+                                      gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -1505,7 +1507,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                start += nr << PAGE_SHIFT;
                pages += nr;
 
-               ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+               ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+                               write ? FOLL_WRITE : 0);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {
index 88af13c..70c0097 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
+#include <linux/bug.h>
 
 #include "kasan.h"
 #include "../slab.h"
@@ -62,7 +63,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
        }
 }
 
-static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
+static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
 {
        void *base = task_stack_page(task);
        size_t size = sp - base;
@@ -77,9 +78,24 @@ void kasan_unpoison_task_stack(struct task_struct *task)
 }
 
 /* Unpoison the stack for the current task beyond a watermark sp value. */
-asmlinkage void kasan_unpoison_remaining_stack(void *sp)
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 {
-       __kasan_unpoison_stack(current, sp);
+       __kasan_unpoison_stack(current, watermark);
+}
+
+/*
+ * Clear all poison for the region between the current SP and a provided
+ * watermark value, as is sometimes required prior to hand-crafted asm function
+ * returns in the middle of functions.
+ */
+void kasan_unpoison_stack_above_sp_to(const void *watermark)
+{
+       const void *sp = __builtin_frame_address(0);
+       size_t size = watermark - sp;
+
+       if (WARN_ON(sp > watermark))
+               return;
+       kasan_unpoison_shadow(sp, size);
 }
 
 /*
index a5e453c..e5355a5 100644 (file)
@@ -1453,8 +1453,11 @@ static void kmemleak_scan(void)
 
                read_lock(&tasklist_lock);
                do_each_thread(g, p) {
-                       scan_block(task_stack_page(p), task_stack_page(p) +
-                                  THREAD_SIZE, NULL);
+                       void *stack = try_get_task_stack(p);
+                       if (stack) {
+                               scan_block(stack, stack + THREAD_SIZE, NULL);
+                               put_task_stack(p);
+                       }
                } while_each_thread(g, p);
                read_unlock(&tasklist_lock);
        }
index 1d05cb9..234676e 100644 (file)
@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
        err = memcg_init_list_lru(lru, memcg_aware);
        if (err) {
                kfree(lru->node);
+               /* Do this so a list_lru_destroy() doesn't crash: */
+               lru->node = NULL;
                goto out;
        }
 
index ae052b5..0f870ba 100644 (file)
@@ -1917,6 +1917,15 @@ retry:
                     current->flags & PF_EXITING))
                goto force;
 
+       /*
+        * Prevent unbounded recursion when reclaim operations need to
+        * allocate memory. This might exceed the limits temporarily,
+        * but we prefer facilitating memory reclaim and getting back
+        * under the limit over triggering OOM kills in these cases.
+        */
+       if (unlikely(current->flags & PF_MEMALLOC))
+               goto force;
+
        if (unlikely(task_in_memcg_oom(current)))
                goto nomem;
 
index fc1987d..e18c57b 100644 (file)
@@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
  * given task for page fault accounting.
  */
 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long addr, void *buf, int len, int write)
+               unsigned long addr, void *buf, int len, unsigned int gup_flags)
 {
        struct vm_area_struct *vma;
        void *old_buf = buf;
+       int write = gup_flags & FOLL_WRITE;
 
        down_read(&mm->mmap_sem);
        /* ignore errors, just check how much was successfully transferred */
@@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                struct page *page = NULL;
 
                ret = get_user_pages_remote(tsk, mm, addr, 1,
-                               write, 1, &page, &vma);
+                               gup_flags, &page, &vma);
                if (ret <= 0) {
 #ifndef CONFIG_HAVE_IOREMAP_PROT
                        break;
@@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  * @addr:      start address to access
  * @buf:       source or destination buffer
  * @len:       number of bytes to transfer
- * @write:     whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
  *
  * The caller must hold a reference on @mm.
  */
 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-               void *buf, int len, int write)
+               void *buf, int len, unsigned int gup_flags)
 {
-       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+       return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
 }
 
 /*
@@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
  * Do not walk the page table directly, use get_user_pages
  */
 int access_process_vm(struct task_struct *tsk, unsigned long addr,
-               void *buf, int len, int write)
+               void *buf, int len, unsigned int gup_flags)
 {
        struct mm_struct *mm;
        int ret;
@@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
        if (!mm)
                return 0;
 
-       ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
+       ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+
        mmput(mm);
 
        return ret;
index 9629273..cad4b91 100644 (file)
@@ -268,7 +268,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
        unsigned long i, pfn, end_pfn, nr_pages;
        int node = pgdat->node_id;
        struct page *page;
-       struct zone *zone;
 
        nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
        page = virt_to_page(pgdat);
@@ -276,19 +275,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
        for (i = 0; i < nr_pages; i++, page++)
                get_page_bootmem(node, page, NODE_INFO);
 
-       zone = &pgdat->node_zones[0];
-       for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
-               if (zone_is_initialized(zone)) {
-                       nr_pages = zone->wait_table_hash_nr_entries
-                               * sizeof(wait_queue_head_t);
-                       nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
-                       page = virt_to_page(zone->wait_table);
-
-                       for (i = 0; i < nr_pages; i++, page++)
-                               get_page_bootmem(node, page, NODE_INFO);
-               }
-       }
-
        pfn = pgdat->node_start_pfn;
        end_pfn = pgdat_end_pfn(pgdat);
 
@@ -2131,7 +2117,6 @@ void try_offline_node(int nid)
        unsigned long start_pfn = pgdat->node_start_pfn;
        unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
        unsigned long pfn;
-       int i;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
@@ -2158,20 +2143,6 @@ void try_offline_node(int nid)
         */
        node_set_offline(nid);
        unregister_one_node(nid);
-
-       /* free waittable in each zone */
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               struct zone *zone = pgdat->node_zones + i;
-
-               /*
-                * wait_table may be allocated from boot memory,
-                * here only free if it's allocated by vmalloc.
-                */
-               if (is_vmalloc_addr(zone->wait_table)) {
-                       vfree(zone->wait_table);
-                       zone->wait_table = NULL;
-               }
-       }
 }
 EXPORT_SYMBOL(try_offline_node);
 
index ad1c96a..0b859af 100644 (file)
@@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
        struct page *p;
        int err;
 
-       err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+       err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
        if (err >= 0) {
                err = page_to_nid(p);
                put_page(p);
index bcdbe62..1193652 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/perf_event.h>
 #include <linux/pkeys.h>
 #include <linux/ksm.h>
-#include <linux/pkeys.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
index 95daf81..8b8faaf 100644 (file)
@@ -109,7 +109,7 @@ unsigned int kobjsize(const void *objp)
        return PAGE_SIZE << compound_order(page);
 }
 
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                      unsigned long start, unsigned long nr_pages,
                      unsigned int foll_flags, struct page **pages,
                      struct vm_area_struct **vmas, int *nonblocking)
@@ -160,33 +160,25 @@ finish_or_fault:
  * - don't permit access to VMAs that don't support it, such as I/O mappings
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-                   int write, int force, struct page **pages,
+                   unsigned int gup_flags, struct page **pages,
                    struct vm_area_struct **vmas)
 {
-       int flags = 0;
-
-       if (write)
-               flags |= FOLL_WRITE;
-       if (force)
-               flags |= FOLL_FORCE;
-
-       return __get_user_pages(current, current->mm, start, nr_pages, flags,
-                               pages, vmas, NULL);
+       return __get_user_pages(current, current->mm, start, nr_pages,
+                               gup_flags, pages, vmas, NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
-                           int write, int force, struct page **pages,
+                           unsigned int gup_flags, struct page **pages,
                            int *locked)
 {
-       return get_user_pages(start, nr_pages, write, force, pages, NULL);
+       return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                               unsigned long start, unsigned long nr_pages,
-                              int write, int force, struct page **pages,
-                              unsigned int gup_flags)
+                              struct page **pages, unsigned int gup_flags)
 {
        long ret;
        down_read(&mm->mmap_sem);
@@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 EXPORT_SYMBOL(__get_user_pages_unlocked);
 
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
-                            int write, int force, struct page **pages)
+                            struct page **pages, unsigned int gup_flags)
 {
        return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
-                                        write, force, pages, 0);
+                                        pages, gup_flags);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe,
 EXPORT_SYMBOL(filemap_map_pages);
 
 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long addr, void *buf, int len, int write)
+               unsigned long addr, void *buf, int len, unsigned int gup_flags)
 {
        struct vm_area_struct *vma;
+       int write = gup_flags & FOLL_WRITE;
 
        down_read(&mm->mmap_sem);
 
@@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  * @addr:      start address to access
  * @buf:       source or destination buffer
  * @len:       number of bytes to transfer
- * @write:     whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
  *
  * The caller must hold a reference on @mm.
  */
 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-               void *buf, int len, int write)
+               void *buf, int len, unsigned int gup_flags)
 {
-       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+       return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
 }
 
 /*
  * Access another process' address space.
  * - source/target buffer must be kernel space
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+               unsigned int gup_flags)
 {
        struct mm_struct *mm;
 
@@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
        if (!mm)
                return 0;
 
-       len = __access_remote_vm(tsk, mm, addr, buf, len, write);
+       len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
 
        mmput(mm);
        return len;
index 2b3bf67..8fd42aa 100644 (file)
@@ -4224,7 +4224,7 @@ static void show_migration_types(unsigned char type)
        }
 
        *p = '\0';
-       printk("(%s) ", tmp);
+       printk(KERN_CONT "(%s) ", tmp);
 }
 
 /*
@@ -4335,7 +4335,8 @@ void show_free_areas(unsigned int filter)
                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
 
                show_node(zone);
-               printk("%s"
+               printk(KERN_CONT
+                       "%s"
                        " free:%lukB"
                        " min:%lukB"
                        " low:%lukB"
@@ -4382,8 +4383,8 @@ void show_free_areas(unsigned int filter)
                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
                printk("lowmem_reserve[]:");
                for (i = 0; i < MAX_NR_ZONES; i++)
-                       printk(" %ld", zone->lowmem_reserve[i]);
-               printk("\n");
+                       printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
+               printk(KERN_CONT "\n");
        }
 
        for_each_populated_zone(zone) {
@@ -4394,7 +4395,7 @@ void show_free_areas(unsigned int filter)
                if (skip_free_areas_node(filter, zone_to_nid(zone)))
                        continue;
                show_node(zone);
-               printk("%s: ", zone->name);
+               printk(KERN_CONT "%s: ", zone->name);
 
                spin_lock_irqsave(&zone->lock, flags);
                for (order = 0; order < MAX_ORDER; order++) {
@@ -4412,11 +4413,12 @@ void show_free_areas(unsigned int filter)
                }
                spin_unlock_irqrestore(&zone->lock, flags);
                for (order = 0; order < MAX_ORDER; order++) {
-                       printk("%lu*%lukB ", nr[order], K(1UL) << order);
+                       printk(KERN_CONT "%lu*%lukB ",
+                              nr[order], K(1UL) << order);
                        if (nr[order])
                                show_migration_types(types[order]);
                }
-               printk("= %lukB\n", K(total));
+               printk(KERN_CONT "= %lukB\n", K(total));
        }
 
        hugetlb_show_meminfo();
@@ -4976,72 +4978,6 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
 #endif
 }
 
-/*
- * Helper functions to size the waitqueue hash table.
- * Essentially these want to choose hash table sizes sufficiently
- * large so that collisions trying to wait on pages are rare.
- * But in fact, the number of active page waitqueues on typical
- * systems is ridiculously low, less than 200. So this is even
- * conservative, even though it seems large.
- *
- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
- * waitqueues, i.e. the size of the waitq table given the number of pages.
- */
-#define PAGES_PER_WAITQUEUE    256
-
-#ifndef CONFIG_MEMORY_HOTPLUG
-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
-{
-       unsigned long size = 1;
-
-       pages /= PAGES_PER_WAITQUEUE;
-
-       while (size < pages)
-               size <<= 1;
-
-       /*
-        * Once we have dozens or even hundreds of threads sleeping
-        * on IO we've got bigger problems than wait queue collision.
-        * Limit the size of the wait table to a reasonable size.
-        */
-       size = min(size, 4096UL);
-
-       return max(size, 4UL);
-}
-#else
-/*
- * A zone's size might be changed by hot-add, so it is not possible to determine
- * a suitable size for its wait_table.  So we use the maximum size now.
- *
- * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
- *
- *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
- *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
- *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
- *
- * The maximum entries are prepared when a zone's memory is (512K + 256) pages
- * or more by the traditional way. (See above).  It equals:
- *
- *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
- *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
- *    powerpc (64K page size)             : =  (32G +16M)byte.
- */
-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
-{
-       return 4096UL;
-}
-#endif
-
-/*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
- */
-static inline unsigned long wait_table_bits(unsigned long size)
-{
-       return ffz(~size);
-}
-
 /*
  * Initially all pages are reserved - free ones are freed
  * up by free_all_bootmem() once the early boot process is
@@ -5304,49 +5240,6 @@ void __init setup_per_cpu_pageset(void)
                        alloc_percpu(struct per_cpu_nodestat);
 }
 
-static noinline __ref
-int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
-{
-       int i;
-       size_t alloc_size;
-
-       /*
-        * The per-page waitqueue mechanism uses hashed waitqueues
-        * per zone.
-        */
-       zone->wait_table_hash_nr_entries =
-                wait_table_hash_nr_entries(zone_size_pages);
-       zone->wait_table_bits =
-               wait_table_bits(zone->wait_table_hash_nr_entries);
-       alloc_size = zone->wait_table_hash_nr_entries
-                                       * sizeof(wait_queue_head_t);
-
-       if (!slab_is_available()) {
-               zone->wait_table = (wait_queue_head_t *)
-                       memblock_virt_alloc_node_nopanic(
-                               alloc_size, zone->zone_pgdat->node_id);
-       } else {
-               /*
-                * This case means that a zone whose size was 0 gets new memory
-                * via memory hot-add.
-                * But it may be the case that a new node was hot-added.  In
-                * this case vmalloc() will not be able to use this new node's
-                * memory - this wait_table must be initialized to use this new
-                * node itself as well.
-                * To use this new node's memory, further consideration will be
-                * necessary.
-                */
-               zone->wait_table = vmalloc(alloc_size);
-       }
-       if (!zone->wait_table)
-               return -ENOMEM;
-
-       for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
-               init_waitqueue_head(zone->wait_table + i);
-
-       return 0;
-}
-
 static __meminit void zone_pcp_init(struct zone *zone)
 {
        /*
@@ -5367,10 +5260,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
                                        unsigned long size)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
-       int ret;
-       ret = zone_wait_table_init(zone, size);
-       if (ret)
-               return ret;
+
        pgdat->nr_zones = zone_idx(zone) + 1;
 
        zone->zone_start_pfn = zone_start_pfn;
@@ -5382,6 +5272,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
                        zone_start_pfn, (zone_start_pfn + size));
 
        zone_init_free_lists(zone);
+       zone->initialized = 1;
 
        return 0;
 }
index 07514d4..be8dc8d 100644 (file)
@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
        ssize_t rc = 0;
        unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
                / sizeof(struct pages *);
+       unsigned int flags = FOLL_REMOTE;
 
        /* Work out address and page range required */
        if (len == 0)
                return 0;
        nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
 
+       if (vm_write)
+               flags |= FOLL_WRITE;
+
        while (!rc && nr_pages && iov_iter_count(iter)) {
                int pages = min(nr_pages, max_pages_per_loop);
                size_t bytes;
@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
                 * current/current->mm
                 */
                pages = __get_user_pages_unlocked(task, mm, pa, pages,
-                                                 vm_write, 0, process_pages,
-                                                 FOLL_REMOTE);
+                                                 process_pages, flags);
                if (pages <= 0)
                        return -EFAULT;
 
index 090fb26..0b0550c 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
        spin_lock_init(&parent->list_lock);
        parent->free_objects = 0;
        parent->free_touched = 0;
+       parent->num_slabs = 0;
 }
 
 #define MAKE_LIST(cachep, listp, slab, nodeid)                         \
@@ -966,7 +967,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
         * guaranteed to be valid until irq is re-enabled, because it will be
         * freed after synchronize_sched().
         */
-       if (force_change)
+       if (old_shared && force_change)
                synchronize_sched();
 
 fail:
@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
        for_each_kmem_cache_node(cachep, node, n) {
                unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
                unsigned long active_slabs = 0, num_slabs = 0;
+               unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+               unsigned long num_slabs_full;
 
                spin_lock_irqsave(&n->list_lock, flags);
-               list_for_each_entry(page, &n->slabs_full, lru) {
-                       active_objs += cachep->num;
-                       active_slabs++;
-               }
+               num_slabs = n->num_slabs;
                list_for_each_entry(page, &n->slabs_partial, lru) {
                        active_objs += page->active;
-                       active_slabs++;
+                       num_slabs_partial++;
                }
                list_for_each_entry(page, &n->slabs_free, lru)
-                       num_slabs++;
+                       num_slabs_free++;
 
                free_objects += n->free_objects;
                spin_unlock_irqrestore(&n->list_lock, flags);
 
-               num_slabs += active_slabs;
                num_objs = num_slabs * cachep->num;
+               active_slabs = num_slabs - num_slabs_free;
+               num_slabs_full = num_slabs -
+                       (num_slabs_partial + num_slabs_free);
+               active_objs += (num_slabs_full * cachep->num);
+
                pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
                        node, active_slabs, num_slabs, active_objs, num_objs,
                        free_objects);
@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache,
 
                page = list_entry(p, struct page, lru);
                list_del(&page->lru);
+               n->num_slabs--;
                /*
                 * Safe to drop the lock. The slab is no longer linked
                 * to the cache.
@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
                list_add_tail(&page->lru, &(n->slabs_free));
        else
                fixup_slab_list(cachep, n, page, &list);
+
+       n->num_slabs++;
        STATS_INC_GROWN(cachep);
        n->free_objects += cachep->num - page->active;
        spin_unlock(&n->list_lock);
@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
 
                page = list_last_entry(&n->slabs_free, struct page, lru);
                list_move(&page->lru, list);
+               n->num_slabs--;
        }
 }
 
@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
        unsigned long num_objs;
        unsigned long active_slabs = 0;
        unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+       unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+       unsigned long num_slabs_full = 0;
        const char *name;
        char *error = NULL;
        int node;
@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
                check_irq_on();
                spin_lock_irq(&n->list_lock);
 
-               list_for_each_entry(page, &n->slabs_full, lru) {
-                       if (page->active != cachep->num && !error)
-                               error = "slabs_full accounting error";
-                       active_objs += cachep->num;
-                       active_slabs++;
-               }
+               num_slabs += n->num_slabs;
+
                list_for_each_entry(page, &n->slabs_partial, lru) {
                        if (page->active == cachep->num && !error)
                                error = "slabs_partial accounting error";
                        if (!page->active && !error)
                                error = "slabs_partial accounting error";
                        active_objs += page->active;
-                       active_slabs++;
+                       num_slabs_partial++;
                }
+
                list_for_each_entry(page, &n->slabs_free, lru) {
                        if (page->active && !error)
                                error = "slabs_free accounting error";
-                       num_slabs++;
+                       num_slabs_free++;
                }
+
                free_objects += n->free_objects;
                if (n->shared)
                        shared_avail += n->shared->avail;
 
                spin_unlock_irq(&n->list_lock);
        }
-       num_slabs += active_slabs;
        num_objs = num_slabs * cachep->num;
+       active_slabs = num_slabs - num_slabs_free;
+       num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
+       active_objs += (num_slabs_full * cachep->num);
+
        if (num_objs - active_objs != free_objects && !error)
                error = "free_objects accounting error";
 
index 9653f2e..bc05fdc 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -432,6 +432,7 @@ struct kmem_cache_node {
        struct list_head slabs_partial; /* partial list first, better asm code */
        struct list_head slabs_full;
        struct list_head slabs_free;
+       unsigned long num_slabs;
        unsigned long free_objects;
        unsigned int free_limit;
        unsigned int colour_next;       /* Per-node cache coloring */
index 662cddf..1a41553 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,8 +230,10 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 /* Check if the vma is being used as a stack by this task */
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
+int vma_is_stack_for_current(struct vm_area_struct *vma)
 {
+       struct task_struct * __maybe_unused t = current;
+
        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
@@ -283,7 +285,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
 int __weak get_user_pages_fast(unsigned long start,
                                int nr_pages, int write, struct page **pages)
 {
-       return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
+       return get_user_pages_unlocked(start, nr_pages, pages,
+                                      write ? FOLL_WRITE : 0);
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
@@ -623,7 +626,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
        if (len > buflen)
                len = buflen;
 
-       res = access_process_vm(task, arg_start, buffer, len, 0);
+       res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 
        /*
         * If the nul at the end of args has been overwritten, then
@@ -638,7 +641,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
                        if (len > buflen - res)
                                len = buflen - res;
                        res += access_process_vm(task, env_start,
-                                                buffer+res, len, 0);
+                                                buffer+res, len,
+                                                FOLL_FORCE);
                        res = strnlen(buffer, res);
                }
        }
index 744f926..76fda22 100644 (file)
@@ -3043,7 +3043,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                                            sc.gfp_mask,
                                            sc.reclaim_idx);
 
+       current->flags |= PF_MEMALLOC;
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       current->flags &= ~PF_MEMALLOC;
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
index 00d2601..1a7c9a7 100644 (file)
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
        while (got < num_pages) {
                rc = get_user_pages_unlocked(
                    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
-                   num_pages - got, write_page, 0, pages + got);
+                   num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
                if (rc < 0)
                        break;
                BUG_ON(rc == 0);
index f826e87..d942c7c 100644 (file)
@@ -41,7 +41,7 @@ config BIG_KEYS
        bool "Large payload keys"
        depends on KEYS
        depends on TMPFS
-       select CRYPTO
+       depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
        select CRYPTO_AES
        select CRYPTO_ECB
        select CRYPTO_RNG
index c0b3030..835c1ab 100644 (file)
@@ -9,6 +9,7 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) "big_key: "fmt
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <linux/file.h>
@@ -341,44 +342,48 @@ error:
  */
 static int __init big_key_init(void)
 {
-       return register_key_type(&key_type_big_key);
-}
-
-/*
- * Initialize big_key crypto and RNG algorithms
- */
-static int __init big_key_crypto_init(void)
-{
-       int ret = -EINVAL;
+       struct crypto_skcipher *cipher;
+       struct crypto_rng *rng;
+       int ret;
 
-       /* init RNG */
-       big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
-       if (IS_ERR(big_key_rng)) {
-               big_key_rng = NULL;
-               return -EFAULT;
+       rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+       if (IS_ERR(rng)) {
+               pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+               return PTR_ERR(rng);
        }
 
+       big_key_rng = rng;
+
        /* seed RNG */
-       ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
-       if (ret)
-               goto error;
+       ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+       if (ret) {
+               pr_err("Can't reset rng: %d\n", ret);
+               goto error_rng;
+       }
 
        /* init block cipher */
-       big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
-                                                0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(big_key_skcipher)) {
-               big_key_skcipher = NULL;
-               ret = -EFAULT;
-               goto error;
+       cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(cipher)) {
+               ret = PTR_ERR(cipher);
+               pr_err("Can't alloc crypto: %d\n", ret);
+               goto error_rng;
+       }
+
+       big_key_skcipher = cipher;
+
+       ret = register_key_type(&key_type_big_key);
+       if (ret < 0) {
+               pr_err("Can't register type: %d\n", ret);
+               goto error_cipher;
        }
 
        return 0;
 
-error:
+error_cipher:
+       crypto_free_skcipher(big_key_skcipher);
+error_rng:
        crypto_free_rng(big_key_rng);
-       big_key_rng = NULL;
        return ret;
 }
 
-device_initcall(big_key_init);
-late_initcall(big_key_crypto_init);
+late_initcall(big_key_init);
index f0611a6..b9f531c 100644 (file)
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
        struct timespec now;
        unsigned long timo;
        key_ref_t key_ref, skey_ref;
-       char xbuf[12];
+       char xbuf[16];
        int rc;
 
        struct keyring_search_context ctx = {
index 0850579..09fd610 100644 (file)
@@ -3557,7 +3557,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
                } else if (!vma->vm_file &&
                           ((vma->vm_start <= vma->vm_mm->start_stack &&
                             vma->vm_end >= vma->vm_mm->start_stack) ||
-                           vma_is_stack_for_task(vma, current))) {
+                           vma_is_stack_for_current(vma))) {
                        rc = current_has_perm(current, PROCESS__EXECSTACK);
                } else if (vma->vm_file && vma->anon_vma) {
                        /*
index ade7c6c..682b73a 100644 (file)
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
         * the execve().
         */
        if (get_user_pages_remote(current, bprm->mm, pos, 1,
-                               0, 1, &page, NULL) <= 0)
+                               FOLL_FORCE, &page, NULL) <= 0)
                return false;
 #else
        page = bprm->page[pos / PAGE_SIZE];
index dcc1028..37d9cfb 100644 (file)
@@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
 
                ktime_get_ts64(&tm);
                tm = timespec64_sub(tm, tmr->last_update);
-               cur_time.tv_nsec = tm.tv_nsec;
-               cur_time.tv_sec = tm.tv_sec;
+               cur_time.tv_nsec += tm.tv_nsec;
+               cur_time.tv_sec += tm.tv_sec;
                snd_seq_sanity_real_time(&cur_time);
        }
        spin_unlock_irqrestore(&tmr->lock, flags);
index d17937b..7e3aa50 100644 (file)
@@ -111,7 +111,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return -EINVAL;
 
        hm = kmalloc(sizeof(*hm), GFP_KERNEL);
-       hr = kmalloc(sizeof(*hr), GFP_KERNEL);
+       hr = kzalloc(sizeof(*hr), GFP_KERNEL);
        if (!hm || !hr) {
                err = -ENOMEM;
                goto out;
index c3469f7..c64d986 100644 (file)
@@ -341,8 +341,7 @@ enum {
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
-       (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
-        AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+       (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
         AZX_DCAPS_SNOOP_TYPE(NVIDIA))
 
 #define AZX_DCAPS_PRESET_CTHDA \
@@ -1716,6 +1715,10 @@ static int azx_first_init(struct azx *chip)
                }
        }
 
+       /* NVidia hardware normally only supports up to 40 bits of DMA */
+       if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
+               dma_bits = 40;
+
        /* disable 64bit DMA address on some devices */
        if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
                dev_dbg(card->dev, "Disabling 64bit DMA\n");
index b58e8c7..2f909dd 100644 (file)
@@ -5811,8 +5811,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 #define ALC295_STANDARD_PINS \
        {0x12, 0xb7a60130}, \
        {0x14, 0x90170110}, \
-       {0x17, 0x21014020}, \
-       {0x18, 0x21a19030}, \
        {0x21, 0x04211020}
 
 #define ALC298_STANDARD_PINS \
@@ -5858,10 +5856,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x02011020},
                {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x14, 0x90170130},
                {0x1b, 0x01014020},
                {0x21, 0x0221103f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170130},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221103f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x14, 0x90170130},
                {0x1b, 0x02011020},
@@ -6039,7 +6045,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC292_STANDARD_PINS,
                {0x13, 0x90a60140}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-               ALC295_STANDARD_PINS),
+               ALC295_STANDARD_PINS,
+               {0x17, 0x21014020},
+               {0x18, 0x21a19030}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC295_STANDARD_PINS,
+               {0x17, 0x21014040},
+               {0x18, 0x21a19050}),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x17, 0x90170110}),
@@ -6613,6 +6625,7 @@ enum {
        ALC891_FIXUP_HEADSET_MODE,
        ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
        ALC662_FIXUP_ACER_VERITON,
+       ALC892_FIXUP_ASROCK_MOBO,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -6889,6 +6902,16 @@ static const struct hda_fixup alc662_fixups[] = {
                        { }
                }
        },
+       [ALC892_FIXUP_ASROCK_MOBO] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x15, 0x40f000f0 }, /* disabled */
+                       { 0x16, 0x40f000f0 }, /* disabled */
+                       { 0x18, 0x01014011 }, /* LO */
+                       { 0x1a, 0x01014012 }, /* LO */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6926,6 +6949,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+       SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
        SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
        SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
        SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
index c60a776..8a59d47 100644 (file)
@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
 AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
 AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
 
+/* Syntek STK1160 */
+{
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+                      USB_DEVICE_ID_MATCH_INT_CLASS |
+                      USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+       .idVendor = 0x05e1,
+       .idProduct = 0x0408,
+       .bInterfaceClass = USB_CLASS_AUDIO,
+       .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Syntek",
+               .product_name = "STK1160",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_AUDIO_ALIGN_TRANSFER
+       }
+},
+
 /* Digidesign Mbox */
 {
        /* Thanks to Clemens Ladisch <clemens@ladisch.de> */
index 1188bc8..a396292 100644 (file)
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
index c0c0b26..b63a31b 100644 (file)
@@ -98,6 +98,15 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                        *type = INSN_FP_SETUP;
                break;
 
+       case 0x8d:
+               if (insn.rex_prefix.bytes &&
+                   insn.rex_prefix.bytes[0] == 0x48 &&
+                   insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
+                   insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
+                       /* lea %(rsp), %rbp */
+                       *type = INSN_FP_SETUP;
+               break;
+
        case 0x90:
                *type = INSN_NOP;
                break;
index 143b6cd..e8a1f69 100644 (file)
@@ -97,6 +97,19 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
        return next;
 }
 
+static bool gcov_enabled(struct objtool_file *file)
+{
+       struct section *sec;
+       struct symbol *sym;
+
+       list_for_each_entry(sec, &file->elf->sections, list)
+               list_for_each_entry(sym, &sec->symbol_list, list)
+                       if (!strncmp(sym->name, "__gcov_.", 8))
+                               return true;
+
+       return false;
+}
+
 #define for_each_insn(file, insn)                                      \
        list_for_each_entry(insn, &file->insn_list, list)
 
@@ -713,6 +726,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
                                      struct instruction *insn)
 {
        struct rela *text_rela, *rodata_rela;
+       struct instruction *orig_insn = insn;
 
        text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
        if (text_rela && text_rela->sym == file->rodata->sym) {
@@ -733,10 +747,16 @@ static struct rela *find_switch_table(struct objtool_file *file,
 
        /* case 3 */
        func_for_each_insn_continue_reverse(file, func, insn) {
-               if (insn->type == INSN_JUMP_UNCONDITIONAL ||
-                   insn->type == INSN_JUMP_DYNAMIC)
+               if (insn->type == INSN_JUMP_DYNAMIC)
                        break;
 
+               /* allow small jumps within the range */
+               if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+                   insn->jump_dest &&
+                   (insn->jump_dest->offset <= insn->offset ||
+                    insn->jump_dest->offset > orig_insn->offset))
+                   break;
+
                text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
                                                    insn->len);
                if (text_rela && text_rela->sym == file->rodata->sym)
@@ -1034,34 +1054,6 @@ static int validate_branch(struct objtool_file *file,
        return 0;
 }
 
-static bool is_gcov_insn(struct instruction *insn)
-{
-       struct rela *rela;
-       struct section *sec;
-       struct symbol *sym;
-       unsigned long offset;
-
-       rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
-       if (!rela)
-               return false;
-
-       if (rela->sym->type != STT_SECTION)
-               return false;
-
-       sec = rela->sym->sec;
-       offset = rela->addend + insn->offset + insn->len - rela->offset;
-
-       list_for_each_entry(sym, &sec->symbol_list, list) {
-               if (sym->type != STT_OBJECT)
-                       continue;
-
-               if (offset >= sym->offset && offset < sym->offset + sym->len)
-                       return (!memcmp(sym->name, "__gcov0.", 8));
-       }
-
-       return false;
-}
-
 static bool is_kasan_insn(struct instruction *insn)
 {
        return (insn->type == INSN_CALL &&
@@ -1083,9 +1075,6 @@ static bool ignore_unreachable_insn(struct symbol *func,
        if (insn->type == INSN_NOP)
                return true;
 
-       if (is_gcov_insn(insn))
-               return true;
-
        /*
         * Check if this (or a subsequent) instruction is related to
         * CONFIG_UBSAN or CONFIG_KASAN.
@@ -1146,6 +1135,19 @@ static int validate_functions(struct objtool_file *file)
                                    ignore_unreachable_insn(func, insn))
                                        continue;
 
+                               /*
+                                * gcov produces a lot of unreachable
+                                * instructions.  If we get an unreachable
+                                * warning and the file has gcov enabled, just
+                                * ignore it, and all other such warnings for
+                                * the file.
+                                */
+                               if (!file->ignore_unreachables &&
+                                   gcov_enabled(file)) {
+                                       file->ignore_unreachables = true;
+                                       continue;
+                               }
+
                                WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
                                warnings++;
                        }
index 5ce61a1..df14e6b 100644 (file)
@@ -36,7 +36,7 @@ SOLIBEXT=so
 # The following works at least on fedora 23, you may need the next
 # line for other distros.
 ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
-JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3)
+JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
 else
   ifneq (,$(wildcard /usr/sbin/alternatives))
     JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
index fb8e42c..4ffff7b 100644 (file)
@@ -601,7 +601,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
                        u64 nr_entries;
                        hbt->timer(hbt->arg);
 
-                       if (hist_browser__has_filter(browser))
+                       if (hist_browser__has_filter(browser) ||
+                           symbol_conf.report_hierarchy)
                                hist_browser__update_nr_entries(browser);
 
                        nr_entries = hist_browser__nr_entries(browser);
index 85dd0db..2f3eded 100644 (file)
@@ -1895,7 +1895,6 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
        if (ph->needs_swap)
                nr = bswap_32(nr);
 
-       ph->env.nr_numa_nodes = nr;
        nodes = zalloc(sizeof(*nodes) * nr);
        if (!nodes)
                return -ENOMEM;
@@ -1932,6 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
 
                free(str);
        }
+       ph->env.nr_numa_nodes = nr;
        ph->env.numa_nodes = nodes;
        return 0;
 
index 9f43fda..660fca0 100644 (file)
@@ -136,8 +136,8 @@ do {                                                        \
 group          [^,{}/]*[{][^}]*[}][^,{}/]*
 event_pmu      [^,{}/]+[/][^/]*[/][^,{}/]*
 event          [^,{}/]+
-bpf_object     .*\.(o|bpf)
-bpf_source     .*\.c
+bpf_object     [^,{}]+\.(o|bpf)
+bpf_source     [^,{}]+\.c
 
 num_dec                [0-9]+
 num_hex                0x[a-fA-F0-9]+
index db96688..8035cc1 100644 (file)
@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
         * mm and might be done in another context, so we must
         * use FOLL_REMOTE.
         */
-       __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE);
+       __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
+                       FOLL_WRITE | FOLL_REMOTE);
 
        kvm_async_page_present_sync(vcpu, apf);
 
index 81dfc73..2907b7b 100644 (file)
@@ -1346,21 +1346,19 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
 static int get_user_page_nowait(unsigned long start, int write,
                struct page **page)
 {
-       int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
+       int flags = FOLL_NOWAIT | FOLL_HWPOISON;
 
        if (write)
                flags |= FOLL_WRITE;
 
-       return __get_user_pages(current, current->mm, start, 1, flags, page,
-                       NULL, NULL);
+       return get_user_pages(start, 1, flags, page, NULL);
 }
 
 static inline int check_user_page_hwpoison(unsigned long addr)
 {
-       int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
+       int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
 
-       rc = __get_user_pages(current, current->mm, addr, 1,
-                             flags, NULL, NULL, NULL);
+       rc = get_user_pages(addr, 1, flags, NULL, NULL);
        return rc == -EHWPOISON;
 }
 
@@ -1416,10 +1414,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
                down_read(&current->mm->mmap_sem);
                npages = get_user_page_nowait(addr, write_fault, page);
                up_read(&current->mm->mmap_sem);
-       } else
+       } else {
+               unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+
+               if (write_fault)
+                       flags |= FOLL_WRITE;
+
                npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
-                                                  write_fault, 0, page,
-                                                  FOLL_TOUCH|FOLL_HWPOISON);
+                                                  page, flags);
+       }
        if (npages != 1)
                return npages;