Merge tag 'xtensa-next-20131015' of git://github.com/czankel/xtensa-linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Oct 2013 23:58:05 +0000 (16:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Oct 2013 23:58:05 +0000 (16:58 -0700)
Pull Xtensa patchset from Chris Zankel:
 "The main patch fixes a bug that can cause a kernel panic, and was
  introduced in rc1.  The other two have been discovered by a uclibc
  test and 'coccinelle'"

* tag 'xtensa-next-20131015' of git://github.com/czankel/xtensa-linux:
  xtensa: Cocci spatch "noderef"
  xtensa: don't use alternate signal stack on threads
  xtensa: fix fast_syscall_spill_registers_fixup

388 files changed:
Documentation/ABI/stable/sysfs-bus-usb
Documentation/ABI/testing/sysfs-devices-power
Documentation/ABI/testing/sysfs-power
Documentation/acpi/dsdt-override.txt
Documentation/connector/ucon.c
Documentation/devicetree/bindings/memory.txt [deleted file]
MAINTAINERS
Makefile
arch/arm/boot/dts/integratorcp.dts
arch/arm/common/mcpm_entry.c
arch/arm/common/sharpsl_param.c
arch/arm/include/asm/Kbuild
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/syscall.h
arch/arm/kernel/head.S
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm/net/bpf_jit_32.c
arch/parisc/configs/712_defconfig
arch/parisc/configs/a500_defconfig
arch/parisc/configs/b180_defconfig
arch/parisc/configs/c3000_defconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/configs/default_defconfig
arch/parisc/kernel/cache.c
arch/parisc/kernel/head.S
arch/powerpc/net/bpf_jit_comp.c
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/timex.h
arch/s390/kernel/compat_signal.c
arch/s390/kernel/debug.c
arch/s390/kvm/interrupt.c
arch/s390/lib/delay.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp.c
arch/x86/Kconfig
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/kvm.c
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/smp.c
block/partitions/efi.c
drivers/acpi/Kconfig
drivers/acpi/device_pm.c
drivers/acpi/power.c
drivers/acpi/scan.c
drivers/ata/ahci.c
drivers/ata/ahci_platform.c
drivers/ata/libahci.c
drivers/ata/libata-acpi.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_isapnp.c
drivers/base/memory.c
drivers/char/tpm/xen-tpmfront.c
drivers/connector/cn_proc.c
drivers/connector/connector.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/dma/edma.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-wiimote-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/iio/frequency/adf4350.c
drivers/iio/industrialio-buffer.c
drivers/infiniband/Kconfig
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/amso1100/c2_ae.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/md/bcache/request.c
drivers/md/dm-snap-persistent.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-frontends/tda10071.c
drivers/media/i2c/ad9389b.c
drivers/media/i2c/adv7511.c
drivers/media/i2c/adv7842.c
drivers/media/i2c/ths8200.c
drivers/media/pci/saa7134/saa7134-video.c
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/sh_vou.c
drivers/media/platform/soc_camera/mx3_camera.c
drivers/media/tuners/e4000.c
drivers/media/usb/stkwebcam/stk-webcam.c
drivers/media/usb/uvc/uvc_driver.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-dma-contig.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/net/can/at91_can.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/hamradio/yam.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wan/farsync.c
drivers/net/wan/wanxl.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/xen-netback/xenbus.c
drivers/of/Kconfig
drivers/of/Makefile
drivers/of/base.c
drivers/of/fdt.c
drivers/of/of_reserved_mem.c [deleted file]
drivers/of/platform.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/platform/x86/Kconfig
drivers/platform/x86/sony-laptop.c
drivers/s390/block/dasd_eckd.c
drivers/s390/char/sclp.c
drivers/s390/char/vmlogrdr.c
drivers/s390/cio/cio.c
drivers/s390/cio/qdio_main.c
drivers/scsi/BusLogic.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/staging/media/msi3101/Kconfig
drivers/staging/media/msi3101/sdr-msi3101.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_sbc.c
drivers/target/target_core_xcopy.c
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/samsung/exynos_tmu.h
drivers/thermal/samsung/exynos_tmu_data.c
drivers/thermal/samsung/exynos_tmu_data.h
drivers/thermal/thermal_hwmon.c
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/thermal/x86_pkg_temp_thermal.c
drivers/tty/serial/imx.c
drivers/tty/serial/vt8500_serial.c
drivers/usb/chipidea/host.c
drivers/usb/core/quirks.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/Kconfig
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_virthub.c
drivers/usb/serial/option.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/scsi.c
drivers/w1/w1.c
fs/btrfs/inode.c
fs/buffer.c
fs/cifs/cifsfs.c
fs/cifs/cifspdu.h
fs/cifs/cifssmb.c
fs/cifs/netmisc.c
fs/cifs/sess.c
fs/cifs/smb2pdu.c
fs/cifs/smbfsctl.h
fs/cifs/transport.c
fs/dcache.c
fs/ecryptfs/crypto.c
fs/ecryptfs/keystore.c
fs/ext3/namei.c
fs/ext4/namei.c
fs/file_table.c
fs/jfs/jfs_inode.c
fs/namei.c
fs/proc/inode.c
fs/proc/task_mmu.c
fs/seq_file.c
include/acpi/acpi_bus.h
include/linux/filter.h
include/linux/memcontrol.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/netdevice.h
include/linux/of_reserved_mem.h [deleted file]
include/linux/sched.h
include/linux/tc_act/tc_defact.h [deleted file]
include/linux/usb/usb_phy_gen_xceiv.h
include/linux/usb_usual.h
include/linux/yam.h
include/net/cipso_ipv4.h
include/net/dst.h
include/net/ip6_route.h
include/net/mac802154.h
include/net/sock.h
include/sound/rcar_snd.h
include/trace/events/target.h
include/uapi/drm/drm_mode.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/tc_act/tc_defact.h [new file with mode: 0644]
include/uapi/rdma/ib_user_verbs.h
ipc/sem.c
ipc/util.c
kernel/cgroup.c
kernel/events/core.c
kernel/mutex.c
kernel/power/hibernate.c
kernel/time/clockevents.c
lib/percpu-refcount.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/mprotect.c
mm/mremap.c
mm/oom_kill.c
mm/page-writeback.c
mm/slab_common.c
mm/swapfile.c
mm/vmscan.c
mm/zswap.c
net/8021q/vlan_netlink.c
net/batman-adv/main.c
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/bridge/br_fdb.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp_if.c
net/bridge/br_vlan.c
net/compat.c
net/core/dev.c
net/core/filter.c
net/core/secure_seq.c
net/core/sock.c
net/ieee802154/6lowpan.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_output.c
net/ipv4/ip_vti.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_policy.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipcomp6.c
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/xfrm6_policy.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/nf_conntrack_h323_main.c
net/sched/sch_fq.c
net/sched/sch_netem.c
net/sctp/output.c
net/socket.c
net/unix/af_unix.c
net/unix/diag.c
net/wireless/core.c
net/wireless/core.h
net/wireless/ibss.c
net/wireless/nl80211.c
net/wireless/radiotap.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_user.c
security/apparmor/apparmorfs.c
security/apparmor/policy.c
sound/pci/hda/hda_generic.c
sound/pci/rme9652/hdsp.c
sound/soc/codecs/pcm1681.c
sound/soc/codecs/pcm1792a.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-mc13783.c
sound/soc/fsl/imx-ssi.c
sound/soc/fsl/imx-ssi.h
sound/soc/omap/Kconfig
sound/soc/sh/rcar/rsnd.h
sound/usb/usx2y/us122l.c
tools/perf/util/event.c
tools/perf/util/evsel.c
tools/perf/util/probe-finder.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/testing/selftests/timers/posix_timers.c

index 2be603c..a6b6857 100644 (file)
@@ -37,8 +37,8 @@ Description:
                that the USB device has been connected to the machine.  This
                file is read-only.
 Users:
-               PowerTOP <power@bughost.org>
-               http://www.lesswatts.org/projects/powertop/
+               PowerTOP <powertop@lists.01.org>
+               https://01.org/powertop/
 
 What:          /sys/bus/usb/device/.../power/active_duration
 Date:          January 2008
@@ -57,8 +57,8 @@ Description:
                will give an integer percentage.  Note that this does not
                account for counter wrap.
 Users:
-               PowerTOP <power@bughost.org>
-               http://www.lesswatts.org/projects/powertop/
+               PowerTOP <powertop@lists.01.org>
+               https://01.org/powertop/
 
 What:          /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
 Date:          January 2008
index 9d43e76..efe449b 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/devices/.../power/
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power directory contains attributes
                allowing the user space to check and modify some power
@@ -8,7 +8,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/wakeup attribute allows the user
                space to check if the device is enabled to wake up the system
@@ -34,7 +34,7 @@ Description:
 
 What:          /sys/devices/.../power/control
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/control attribute allows the user
                space to control the run-time power management of the device.
@@ -53,7 +53,7 @@ Description:
 
 What:          /sys/devices/.../power/async
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../async attribute allows the user space to
                enable or diasble the device's suspend and resume callbacks to
@@ -79,7 +79,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_count
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_count attribute contains the number
                of signaled wakeup events associated with the device.  This
@@ -88,7 +88,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_active_count
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_active_count attribute contains the
                number of times the processing of wakeup events associated with
@@ -98,7 +98,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_abort_count
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_abort_count attribute contains the
                number of times the processing of a wakeup event associated with
@@ -109,7 +109,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_expire_count
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_expire_count attribute contains the
                number of times a wakeup event associated with the device has
@@ -119,7 +119,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_active
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_active attribute contains either 1,
                or 0, depending on whether or not a wakeup event associated with
@@ -129,7 +129,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_total_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_total_time_ms attribute contains
                the total time of processing wakeup events associated with the
@@ -139,7 +139,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_max_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_max_time_ms attribute contains
                the maximum time of processing a single wakeup event associated
@@ -149,7 +149,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_last_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_last_time_ms attribute contains
                the value of the monotonic clock corresponding to the time of
@@ -160,7 +160,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_prevent_sleep_time_ms
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
                contains the total time the device has been preventing
@@ -189,7 +189,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_latency_us
 Date:          March 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_resume_latency_us attribute
                contains the PM QoS resume latency limit for the given device,
@@ -207,7 +207,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_no_power_off
 Date:          September 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_no_power_off attribute
                is used for manipulating the PM QoS "no power off" flag.  If
@@ -222,7 +222,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_remote_wakeup
 Date:          September 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_remote_wakeup attribute
                is used for manipulating the PM QoS "remote wakeup required"
index 2177726..205a738 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/power/
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power directory will contain files that will
                provide a unified interface to the power management
@@ -8,7 +8,7 @@ Description:
 
 What:          /sys/power/state
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/state file controls the system power state.
                Reading from this file returns what states are supported,
@@ -22,7 +22,7 @@ Description:
 
 What:          /sys/power/disk
 Date:          September 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/disk file controls the operating mode of the
                suspend-to-disk mechanism.  Reading from this file returns
@@ -67,7 +67,7 @@ Description:
 
 What:          /sys/power/image_size
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/image_size file controls the size of the image
                created by the suspend-to-disk mechanism.  It can be written a
@@ -84,7 +84,7 @@ Description:
 
 What:          /sys/power/pm_trace
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/pm_trace file controls the code which saves the
                last PM event point in the RTC across reboots, so that you can
@@ -133,7 +133,7 @@ Description:
 
 What:          /sys/power/pm_async
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/pm_async file controls the switch allowing the
                user space to enable or disable asynchronous suspend and resume
@@ -146,7 +146,7 @@ Description:
 
 What:          /sys/power/wakeup_count
 Date:          July 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wakeup_count file allows user space to put the
                system into a sleep state while taking into account the
@@ -161,7 +161,7 @@ Description:
 
 What:          /sys/power/reserved_size
 Date:          May 2011
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/reserved_size file allows user space to control
                the amount of memory reserved for allocations made by device
@@ -175,7 +175,7 @@ Description:
 
 What:          /sys/power/autosleep
 Date:          April 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/autosleep file can be written one of the strings
                returned by reads from /sys/power/state.  If that happens, a
@@ -192,7 +192,7 @@ Description:
 
 What:          /sys/power/wake_lock
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wake_lock file allows user space to create
                wakeup source objects and activate them on demand (if one of
@@ -219,7 +219,7 @@ Description:
 
 What:          /sys/power/wake_unlock
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wake_unlock file allows user space to deactivate
                wakeup sources created with the help of /sys/power/wake_lock.
index febbb1b..784841c 100644 (file)
@@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
 
 When to use this method is described in detail on the
 Linux/ACPI home page:
-http://www.lesswatts.org/projects/acpi/overridingDSDT.php
+https://01.org/linux-acpi/documentation/overriding-dsdt
index 4848db8..8a4da64 100644 (file)
@@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
        nlh->nlmsg_seq = seq++;
        nlh->nlmsg_pid = getpid();
        nlh->nlmsg_type = NLMSG_DONE;
-       nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
+       nlh->nlmsg_len = size;
        nlh->nlmsg_flags = 0;
 
        m = NLMSG_DATA(nlh);
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
deleted file mode 100644 (file)
index eb24693..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-*** Memory binding ***
-
-The /memory node provides basic information about the address and size
-of the physical memory. This node is usually filled or updated by the
-bootloader, depending on the actual memory configuration of the given
-hardware.
-
-The memory layout is described by the following node:
-
-/ {
-       #address-cells = <(n)>;
-       #size-cells = <(m)>;
-       memory {
-               device_type = "memory";
-               reg =  <(baseaddr1) (size1)
-                       (baseaddr2) (size2)
-                       ...
-                       (baseaddrN) (sizeN)>;
-       };
-       ...
-};
-
-A memory node follows the typical device tree rules for "reg" property:
-n:             number of cells used to store base address value
-m:             number of cells used to store size value
-baseaddrX:     defines a base address of the defined memory bank
-sizeX:         the size of the defined memory bank
-
-
-More than one memory bank can be defined.
-
-
-*** Reserved memory regions ***
-
-In /memory/reserved-memory node one can create child nodes describing
-particular reserved (excluded from normal use) memory regions. Such
-memory regions are usually designed for the special usage by various
-device drivers. A good example are contiguous memory allocations or
-memory sharing with other operating system on the same hardware board.
-Those special memory regions might depend on the board configuration and
-devices used on the target system.
-
-Parameters for each memory region can be encoded into the device tree
-with the following convention:
-
-[(label):] (name) {
-       compatible = "linux,contiguous-memory-region", "reserved-memory-region";
-       reg = <(address) (size)>;
-       (linux,default-contiguous-region);
-};
-
-compatible:    one or more of:
-       - "linux,contiguous-memory-region" - enables binding of this
-         region to Contiguous Memory Allocator (special region for
-         contiguous memory allocations, shared with movable system
-         memory, Linux kernel-specific).
-       - "reserved-memory-region" - compatibility is defined, given
-         region is assigned for exclusive usage for by the respective
-         devices.
-
-reg:   standard property defining the base address and size of
-       the memory region
-
-linux,default-contiguous-region: property indicating that the region
-       is the default region for all contiguous memory
-       allocations, Linux specific (optional)
-
-It is optional to specify the base address, so if one wants to use
-autoconfiguration of the base address, '0' can be specified as a base
-address in the 'reg' property.
-
-The /memory/reserved-memory node must contain the same #address-cells
-and #size-cells value as the root node.
-
-
-*** Device node's properties ***
-
-Once regions in the /memory/reserved-memory node have been defined, they
-may be referenced by other device nodes. Bindings that wish to reference
-memory regions should explicitly document their use of the following
-property:
-
-memory-region = <&phandle_to_defined_region>;
-
-This property indicates that the device driver should use the memory
-region pointed by the given phandle.
-
-
-*** Example ***
-
-This example defines a memory consisting of 4 memory banks. 3 contiguous
-regions are defined for Linux kernel, one default of all device drivers
-(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
-framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
-and one for multimedia processing (labelled multimedia_mem, placed at
-0x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
-device for DMA memory allocations (Linux kernel drivers will use CMA is
-available or dma-exclusive usage otherwise). 'multimedia_mem' is
-assigned to scaler@12500000 and codec@12600000 devices for contiguous
-memory allocations when CMA driver is enabled.
-
-The reason for creating a separate region for framebuffer device is to
-match the framebuffer base address to the one configured by bootloader,
-so once Linux kernel drivers starts no glitches on the displayed boot
-logo appears. Scaller and codec drivers should share the memory
-allocations.
-
-/ {
-       #address-cells = <1>;
-       #size-cells = <1>;
-
-       /* ... */
-
-       memory {
-               reg =  <0x40000000 0x10000000
-                       0x50000000 0x10000000
-                       0x60000000 0x10000000
-                       0x70000000 0x10000000>;
-
-               reserved-memory {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-
-                       /*
-                        * global autoconfigured region for contiguous allocations
-                        * (used only with Contiguous Memory Allocator)
-                        */
-                       contig_region@0 {
-                               compatible = "linux,contiguous-memory-region";
-                               reg = <0x0 0x4000000>;
-                               linux,default-contiguous-region;
-                       };
-
-                       /*
-                        * special region for framebuffer
-                        */
-                       display_region: region@78000000 {
-                               compatible = "linux,contiguous-memory-region", "reserved-memory-region";
-                               reg = <0x78000000 0x800000>;
-                       };
-
-                       /*
-                        * special region for multimedia processing devices
-                        */
-                       multimedia_region: region@77000000 {
-                               compatible = "linux,contiguous-memory-region";
-                               reg = <0x77000000 0x4000000>;
-                       };
-               };
-       };
-
-       /* ... */
-
-       fb0: fb@12300000 {
-               status = "okay";
-               memory-region = <&display_region>;
-       };
-
-       scaler: scaler@12500000 {
-               status = "okay";
-               memory-region = <&multimedia_region>;
-       };
-
-       codec: codec@12600000 {
-               status = "okay";
-               memory-region = <&multimedia_region>;
-       };
-};
index 8a0cbf3..3438384 100644 (file)
@@ -237,11 +237,11 @@ F:        drivers/platform/x86/acer-wmi.c
 
 ACPI
 M:     Len Brown <lenb@kernel.org>
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
-Q:     http://patchwork.kernel.org/project/linux-acpi/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
+W:     https://01.org/linux-acpi
+Q:     https://patchwork.kernel.org/project/linux-acpi/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 S:     Supported
 F:     drivers/acpi/
 F:     drivers/pnp/pnpacpi/
@@ -256,21 +256,21 @@ F:        drivers/pci/*/*/*acpi*
 ACPI FAN DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/fan.c
 
 ACPI THERMAL DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/*thermal*
 
 ACPI VIDEO DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/video.c
 
@@ -1009,6 +1009,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-mvebu/
@@ -1016,6 +1017,7 @@ F:        arch/arm/mach-mvebu/
 ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
+M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-dove/
@@ -1148,6 +1150,13 @@ F:       drivers/net/ethernet/i825xx/ether1*
 F:     drivers/net/ethernet/seeq/ether3*
 F:     drivers/scsi/arm/
 
+ARM/Rockchip SoC support
+M:     Heiko Stuebner <heiko@sntech.de>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-rockchip/
+F:     drivers/*/*rockchip*
+
 ARM/SHARK MACHINE SUPPORT
 M:     Alexander Schulz <alex@shark-linux.de>
 W:     http://www.shark-linux.de/shark.html
@@ -1791,6 +1800,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
+M:     Veaceslav Falico <vfalico@redhat.com>
 M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
@@ -2300,7 +2310,7 @@ S:        Maintained
 F:     drivers/net/ethernet/ti/cpmac.c
 
 CPU FREQUENCY DRIVERS
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
 L:     cpufreq@vger.kernel.org
 L:     linux-pm@vger.kernel.org
@@ -2331,7 +2341,7 @@ S:      Maintained
 F:      drivers/cpuidle/cpuidle-big_little.c
 
 CPUIDLE DRIVERS
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
@@ -2718,6 +2728,8 @@ T:        git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vinod.koul@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
+L:     dmaengine@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Supported
 F:     drivers/dma/
 F:     include/linux/dma*
@@ -2821,7 +2833,7 @@ M:        Terje Bergström <tbergstrom@nvidia.com>
 L:     dri-devel@lists.freedesktop.org
 L:     linux-tegra@vger.kernel.org
 T:     git git://anongit.freedesktop.org/tegra/linux.git
-S:     Maintained
+S:     Supported
 F:     drivers/gpu/host1x/
 F:     include/uapi/drm/tegra_drm.h
 F:     Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -3553,7 +3565,7 @@ F:        fs/freevxfs/
 
 FREEZER
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/freezing-of-tasks.txt
@@ -3624,6 +3636,12 @@ L:       linux-scsi@vger.kernel.org
 S:     Odd Fixes (e.g., new signatures)
 F:     drivers/scsi/fdomain.*
 
+GCOV BASED KERNEL PROFILING
+M:     Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+S:     Maintained
+F:     kernel/gcov/
+F:     Documentation/gcov.txt
+
 GDT SCSI DISK ARRAY CONTROLLER DRIVER
 M:     Achim Leubner <achim_leubner@adaptec.com>
 L:     linux-scsi@vger.kernel.org
@@ -3889,7 +3907,7 @@ F:        drivers/video/hgafb.c
 
 HIBERNATION (aka Software Suspend, aka swsusp)
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     arch/x86/power/
@@ -4339,7 +4357,7 @@ F:        drivers/video/i810/
 INTEL MENLOW THERMAL DRIVER
 M:     Sujith Thomas <sujith.thomas@intel.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/platform/x86/intel_menlow.c
 
@@ -4351,7 +4369,10 @@ F:       arch/x86/kernel/microcode_intel.c
 
 INTEL I/OAT DMA DRIVER
 M:     Dan Williams <dan.j.williams@intel.com>
-S:     Maintained
+M:     Dave Jiang <dave.jiang@intel.com>
+L:     dmaengine@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
+S:     Supported
 F:     drivers/dma/ioat*
 
 INTEL IOMMU (VT-d)
@@ -7816,6 +7837,13 @@ F:       Documentation/sound/alsa/soc/
 F:     sound/soc/
 F:     include/sound/soc*
 
+SOUND - DMAENGINE HELPERS
+M:     Lars-Peter Clausen <lars@metafoo.de>
+S:     Supported
+F:     include/sound/dmaengine_pcm.h
+F:     sound/core/pcm_dmaengine.c
+F:     sound/soc/soc-generic-dmaengine-pcm.c
+
 SPARC + UltraSPARC (sparc/sparc64)
 M:     "David S. Miller" <davem@davemloft.net>
 L:     sparclinux@vger.kernel.org
@@ -8095,7 +8123,7 @@ F:        drivers/sh/
 SUSPEND TO RAM
 M:     Len Brown <len.brown@intel.com>
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/
@@ -8288,14 +8316,72 @@ L:      linux-media@vger.kernel.org
 S:     Maintained
 F:     drivers/media/rc/ttusbir.c
 
-TEGRA SUPPORT
+TEGRA ARCHITECTURE SUPPORT
 M:     Stephen Warren <swarren@wwwdotorg.org>
+M:     Thierry Reding <thierry.reding@gmail.com>
 L:     linux-tegra@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/linux-tegra/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
 S:     Supported
 N:     [^a-z]tegra
 
+TEGRA ASOC DRIVER
+M:     Stephen Warren <swarren@wwwdotorg.org>
+S:     Supported
+F:     sound/soc/tegra/
+
+TEGRA CLOCK DRIVER
+M:     Peter De Schrijver <pdeschrijver@nvidia.com>
+M:     Prashant Gaikwad <pgaikwad@nvidia.com>
+S:     Supported
+F:     drivers/clk/tegra/
+
+TEGRA DMA DRIVER
+M:     Laxman Dewangan <ldewangan@nvidia.com>
+S:     Supported
+F:     drivers/dma/tegra20-apb-dma.c
+
+TEGRA GPIO DRIVER
+M:     Stephen Warren <swarren@wwwdotorg.org>
+S:     Supported
+F:     drivers/gpio/gpio-tegra.c
+
+TEGRA I2C DRIVER
+M:     Laxman Dewangan <ldewangan@nvidia.com>
+S:     Supported
+F:     drivers/i2c/busses/i2c-tegra.c
+
+TEGRA IOMMU DRIVERS
+M:     Hiroshi Doyu <hdoyu@nvidia.com>
+S:     Supported
+F:     drivers/iommu/tegra*
+
+TEGRA KBC DRIVER
+M:     Rakesh Iyer <riyer@nvidia.com>
+M:     Laxman Dewangan <ldewangan@nvidia.com>
+S:     Supported
+F:     drivers/input/keyboard/tegra-kbc.c
+
+TEGRA PINCTRL DRIVER
+M:     Stephen Warren <swarren@wwwdotorg.org>
+S:     Supported
+F:     drivers/pinctrl/pinctrl-tegra*
+
+TEGRA PWM DRIVER
+M:     Thierry Reding <thierry.reding@gmail.com>
+S:     Supported
+F:     drivers/pwm/pwm-tegra.c
+
+TEGRA SERIAL DRIVER
+M:     Laxman Dewangan <ldewangan@nvidia.com>
+S:     Supported
+F:     drivers/tty/serial/serial-tegra.c
+
+TEGRA SPI DRIVER
+M:     Laxman Dewangan <ldewangan@nvidia.com>
+S:     Supported
+F:     drivers/spi/spi-tegra*
+
 TEHUTI ETHERNET DRIVER
 M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
index deec08b..868c0eb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
index ff1aea0..72693a6 100644 (file)
@@ -9,11 +9,6 @@
        model = "ARM Integrator/CP";
        compatible = "arm,integrator-cp";
 
-       aliases {
-               arm,timer-primary = &timer2;
-               arm,timer-secondary = &timer1;
-       };
-
        chosen {
                bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
        };
        };
 
        timer0: timer@13000000 {
+               /* TIMER0 runs @ 25MHz */
                compatible = "arm,integrator-cp-timer";
+               status = "disabled";
        };
 
        timer1: timer@13000100 {
+               /* TIMER1 runs @ 1MHz */
                compatible = "arm,integrator-cp-timer";
        };
 
        timer2: timer@13000200 {
+               /* TIMER2 runs @ 1MHz */
                compatible = "arm,integrator-cp-timer";
        };
 
index 370236d..9902509 100644 (file)
@@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void)
 {
        phys_reset_t phys_reset;
 
-       BUG_ON(!platform_ops);
+       if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
+               return;
        BUG_ON(!irqs_disabled());
 
        /*
@@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
 {
        phys_reset_t phys_reset;
 
-       BUG_ON(!platform_ops);
+       if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
+               return;
        BUG_ON(!irqs_disabled());
 
        /* Very similar to mcpm_cpu_power_down() */
index d56c932..025f6ce 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <asm/mach/sharpsl_param.h>
+#include <asm/memory.h>
 
 /*
  * Certain hardware parameters determined at the time of device manufacture,
  */
 #ifdef CONFIG_ARCH_SA1100
 #define PARAM_BASE     0xe8ffc000
+#define param_start(x) (void *)(x)
 #else
 #define PARAM_BASE     0xa0000a00
+#define param_start(x) __va(x)
 #endif
 #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 )  | ( b << 8 ) | a )
 
@@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
 
 void sharpsl_save_param(void)
 {
-       memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
+       memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
 
        if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
                sharpsl_param.comadj=-1;
index d3db398..59ceae8 100644 (file)
@@ -31,5 +31,4 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += timex.h
 generic-y += trace_clock.h
-generic-y += types.h
 generic-y += unaligned.h
index 0f7b762..fc82a88 100644 (file)
@@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
  *
  * This must be called with interrupts disabled.
  *
- * This does not return.  Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return.  Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
  */
 void mcpm_cpu_power_down(void);
 
@@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void);
  *
  * This must be called with interrupts disabled.
  *
- * This does not return.  Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return.  Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
  */
 void mcpm_cpu_suspend(u64 expected_residency);
 
index f1d96d4..73ddd72 100644 (file)
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
                unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                pr_warning("%s called with max args %d, handling only %d\n",
                           __func__, i + n, SYSCALL_MAX_ARGS);
index 2c7cc1e..476de57 100644 (file)
@@ -487,7 +487,26 @@ __fixup_smp:
        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
        and     r0, r0, #0xc0000000     @ multiprocessing extensions and
        teq     r0, #0x80000000         @ not part of a uniprocessor system?
-       moveq   pc, lr                  @ yes, assume SMP
+       bne    __fixup_smp_on_up        @ no, assume UP
+
+       @ Core indicates it is SMP. Check for Aegis SOC where a single
+       @ Cortex-A9 CPU is present but SMP operations fault.
+       mov     r4, #0x41000000
+       orr     r4, r4, #0x0000c000
+       orr     r4, r4, #0x00000090
+       teq     r3, r4                  @ Check for ARM Cortex-A9
+       movne   pc, lr                  @ Not ARM Cortex-A9,
+
+       @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
+       @ below address check will need to be #ifdef'd or equivalent
+       @ for the Aegis platform.
+       mrc     p15, 4, r0, c15, c0     @ get SCU base address
+       teq     r0, #0x0                @ '0' on actual UP A9 hardware
+       beq     __fixup_smp_on_up       @ So its an A9 UP
+       ldr     r0, [r0, #4]            @ read SCU Config
+       and     r0, r0, #0x3            @ number of CPUs
+       teq     r0, #0x0                @ is 1?
+       movne   pc, lr
 
 __fixup_smp_on_up:
        adr     r0, 1f
index f5e1a84..1272ed2 100644 (file)
@@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
                                break;
 
                len = (j - i) << PAGE_SHIFT;
-               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               ret = iommu_map(mapping->domain, iova, phys, len,
+                               IOMMU_READ|IOMMU_WRITE);
                if (ret < 0)
                        goto fail;
                iova += len;
@@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                                         GFP_KERNEL);
 }
 
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+       int prot;
+
+       switch (dir) {
+       case DMA_BIDIRECTIONAL:
+               prot = IOMMU_READ | IOMMU_WRITE;
+               break;
+       case DMA_TO_DEVICE:
+               prot = IOMMU_READ;
+               break;
+       case DMA_FROM_DEVICE:
+               prot = IOMMU_WRITE;
+               break;
+       default:
+               prot = 0;
+       }
+
+       return prot;
+}
+
 /*
  * Map a part of the scatter-gather list into contiguous io address space
  */
@@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
        int ret = 0;
        unsigned int count;
        struct scatterlist *s;
+       int prot;
 
        size = PAGE_ALIGN(size);
        *handle = DMA_ERROR_CODE;
@@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                        !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
-               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               prot = __dma_direction_to_prot(dir);
+
+               ret = iommu_map(mapping->domain, iova, phys, len, prot);
                if (ret < 0)
                        goto fail;
                count += len >> PAGE_SHIFT;
@@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       switch (dir) {
-       case DMA_BIDIRECTIONAL:
-               prot = IOMMU_READ | IOMMU_WRITE;
-               break;
-       case DMA_TO_DEVICE:
-               prot = IOMMU_READ;
-               break;
-       case DMA_FROM_DEVICE:
-               prot = IOMMU_WRITE;
-               break;
-       default:
-               prot = 0;
-       }
+       prot = __dma_direction_to_prot(dir);
 
        ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
        if (ret < 0)
index febaee7..18ec4c5 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/nodemask.h>
 #include <linux/initrd.h>
 #include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
@@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi,
        if (mdesc->reserve)
                mdesc->reserve();
 
-       early_init_dt_scan_reserved_mem();
-
        /*
         * reserve memory for DMA contigouos allocations,
         * must come from DMA area inside low memory
index f50d223..99b44e0 100644 (file)
@@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index 0f90569..9387cc2 100644 (file)
@@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
 CONFIG_LLC2=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
index b647b18..9002532 100644 (file)
@@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
 CONFIG_LLC2=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_BLK_DEV_UMEM=m
index e289f5b..f1a0c25 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
@@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
index 311ca36..ec1b014 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
@@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
 CONFIG_IP_NF_QUEUE=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_BLK_DEV_UMEM=m
index f110063..e1c8d20 100644 (file)
@@ -62,6 +62,8 @@ CONFIG_TIPC=m
 CONFIG_LLC2=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
index dfe88f6..ba61495 100644 (file)
@@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_LLC2=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
index b521c0a..c035673 100644 (file)
@@ -602,7 +602,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
                __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
        }
 }
-EXPORT_SYMBOL_GPL(flush_cache_page);
 
 #ifdef CONFIG_PARISC_TMPALIAS
 
index 37aabd7..d2d5825 100644 (file)
@@ -195,6 +195,8 @@ common_stext:
        ldw             MEM_PDC_HI(%r0),%r6
        depd            %r6, 31, 32, %r3        /* move to upper word */
 
+       mfctl           %cr30,%r6               /* PCX-W2 firmware bug */
+
        ldo             PDC_PSW(%r0),%arg0              /* 21 */
        ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
        ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
@@ -203,6 +205,8 @@ common_stext:
        copy            %r0,%arg3
 
 stext_pdc_ret:
+       mtctl           %r6,%cr30               /* restore task thread info */
+
        /* restore rfi target address*/
        ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
        tophys_r1       %r10
index bf56e33..2345bdb 100644 (file)
@@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index 9b60a36..2204400 100644 (file)
@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
 
 static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
 {
-       if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
+       if (!MACHINE_HAS_ESOP &&
+           (pte_val(entry) & _PAGE_PRESENT) &&
+           (pte_val(entry) & _PAGE_WRITE)) {
                /*
                 * Without enhanced suppression-on-protection force
                 * the dirty bit on for all writable ptes.
index 8ad8af9..819b94d 100644 (file)
@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
 
 typedef unsigned long long cycles_t;
 
-static inline unsigned long long get_tod_clock(void)
-{
-       unsigned long long clk;
-
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-       asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
-#else
-       asm volatile("stck %0" : "=Q" (clk) : : "cc");
-#endif
-       return clk;
-}
-
 static inline void get_tod_clock_ext(char *clk)
 {
        asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
 }
 
-static inline unsigned long long get_tod_clock_xt(void)
+static inline unsigned long long get_tod_clock(void)
 {
        unsigned char clk[16];
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
 
+static inline unsigned long long get_tod_clock_fast(void)
+{
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+       unsigned long long clk;
+
+       asm volatile("stckf %0" : "=Q" (clk) : : "cc");
+       return clk;
+#else
+       return get_tod_clock();
+#endif
+}
+
 static inline cycles_t get_cycles(void)
 {
        return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
  */
 static inline unsigned long long get_tod_clock_monotonic(void)
 {
-       return get_tod_clock_xt() - sched_clock_base_cc;
+       return get_tod_clock() - sched_clock_base_cc;
 }
 
 /**
index 1389b63..adaa9e9 100644 (file)
@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
                        break;
                }
        }
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,7 +148,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
                        break;
                }
        }
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
index f1279dc..17d62fe 100644 (file)
@@ -867,7 +867,7 @@ static inline void
 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
                        int exception)
 {
-       active->id.stck = get_tod_clock();
+       active->id.stck = get_tod_clock_fast();
        active->id.fields.cpuid = smp_processor_id();
        active->caller = __builtin_return_address(0);
        active->id.fields.exception = exception;
index 7f35cb3..7f1f7ac 100644 (file)
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        }
 
        if ((!rc) && (vcpu->arch.sie_block->ckc <
-               get_tod_clock() + vcpu->arch.sie_block->epoch)) {
+               get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
                if ((!psw_extint_disabled(vcpu)) &&
                        (vcpu->arch.sie_block->gcr[0] & 0x800ul))
                        rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                goto no_timer;
        }
 
-       now = get_tod_clock() + vcpu->arch.sie_block->epoch;
+       now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
        if (vcpu->arch.sie_block->ckc < now) {
                __unset_cpu_idle(vcpu);
                return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        }
 
        if ((vcpu->arch.sie_block->ckc <
-               get_tod_clock() + vcpu->arch.sie_block->epoch))
+               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
                __try_deliver_ckc_interrupt(vcpu);
 
        if (atomic_read(&fi->active)) {
index 57c87d7..a9f3d00 100644 (file)
@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
        do {
                set_clock_comparator(end);
                vtime_stop_cpu();
-       } while (get_tod_clock() < end);
+       } while (get_tod_clock_fast() < end);
        lockdep_on();
        __ctl_load(cr0, 0, 0);
        __ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
 {
        u64 clock_saved, end;
 
-       end = get_tod_clock() + (usecs << 12);
+       end = get_tod_clock_fast() + (usecs << 12);
        do {
                clock_saved = 0;
                if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
                vtime_stop_cpu();
                if (clock_saved)
                        local_tick_enable(clock_saved);
-       } while (get_tod_clock() < end);
+       } while (get_tod_clock_fast() < end);
 }
 
 /*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
 {
        u64 end;
 
-       end = get_tod_clock() + (usecs << 12);
-       while (get_tod_clock() < end)
+       end = get_tod_clock_fast() + (usecs << 12);
+       while (get_tod_clock_fast() < end)
                cpu_relax();
 }
 
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
 
        nsecs <<= 9;
        do_div(nsecs, 125);
-       end = get_tod_clock() + nsecs;
+       end = get_tod_clock_fast() + nsecs;
        if (nsecs & ~0xfffUL)
                __udelay(nsecs >> 12);
-       while (get_tod_clock() < end)
+       while (get_tod_clock_fast() < end)
                barrier();
 }
 EXPORT_SYMBOL(__ndelay);
index 7092392..a5df511 100644 (file)
@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
        struct bpf_binary_header *header = (void *)addr;
 
        if (fp->bpf_func == sk_run_filter)
-               return;
+               goto free_filter;
        set_memory_rw(addr, header->pages);
        module_free(NULL, header);
+free_filter:
+       kfree(fp);
 }
index 9c7be59..218b6b2 100644 (file)
@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index 145d703..f67e839 100644 (file)
@@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
 
 config MICROCODE
        tristate "CPU microcode loading support"
+       depends on CPU_SUP_AMD || CPU_SUP_INTEL
        select FW_LOADER
        ---help---
 
index 1191ac1..a419814 100644 (file)
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
                break;
        case UV3_HUB_PART_NUMBER:
        case UV3_HUB_PART_NUMBER_X:
-               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
                break;
        }
 
index 697b93a..a0e2a8a 100644 (file)
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
        if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
                return;
 
-       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
+       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
+       pv_lock_ops.unlock_kick = kvm_unlock_kick;
+}
+
+static __init int kvm_spinlock_init_jump(void)
+{
+       if (!kvm_para_available())
+               return 0;
+       if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
+               return 0;
 
        static_key_slow_inc(&paravirt_ticketlocks_enabled);
+       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
 
-       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
-       pv_lock_ops.unlock_kick = kvm_unlock_kick;
+       return 0;
 }
+early_initcall(kvm_spinlock_init_jump);
+
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
index 79c216a..516593e 100644 (file)
@@ -772,13 +772,21 @@ out:
        return;
 }
 
+static void bpf_jit_free_deferred(struct work_struct *work)
+{
+       struct sk_filter *fp = container_of(work, struct sk_filter, work);
+       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+       struct bpf_binary_header *header = (void *)addr;
+
+       set_memory_rw(addr, header->pages);
+       module_free(NULL, header);
+       kfree(fp);
+}
+
 void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter) {
-               unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
-               struct bpf_binary_header *header = (void *)addr;
-
-               set_memory_rw(addr, header->pages);
-               module_free(NULL, header);
+               INIT_WORK(&fp->work, bpf_jit_free_deferred);
+               schedule_work(&fp->work);
        }
 }
index d1e4777..31d0475 100644 (file)
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
                   old memory can be recycled */
                make_lowmem_page_readwrite(xen_initial_gdt);
 
+#ifdef CONFIG_X86_32
+               /*
+                * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+                * expects __USER_DS
+                */
+               loadsegment(ds, __USER_DS);
+               loadsegment(es, __USER_DS);
+#endif
+
                xen_filter_cpu_maps();
                xen_setup_vcpu_info_placement();
        }
index 1eb09ee..a8287b4 100644 (file)
@@ -222,11 +222,16 @@ check_hybrid:
         * the disk size.
         *
         * Hybrid MBRs do not necessarily comply with this.
+        *
+        * Consider a bad value here to be a warning to support dd'ing
+        * an image from a smaller disk to a larger disk.
         */
        if (ret == GPT_MBR_PROTECTIVE) {
                sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
                if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
-                       ret = 0;
+                       pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
+                                sz, min_t(uint32_t,
+                                          total_sectors - 1, 0xFFFFFFFF));
        }
 done:
        return ret;
index 22327e6..6efe2ac 100644 (file)
@@ -24,7 +24,7 @@ menuconfig ACPI
          are configured, ACPI is used.
 
          The project home page for the Linux ACPI subsystem is here:
-         <http://www.lesswatts.org/projects/acpi/>
+         <https://01.org/linux-acpi>
 
          Linux support for ACPI is based on Intel Corporation's ACPI
          Component Architecture (ACPI CA).  For more information on the
@@ -123,9 +123,9 @@ config ACPI_BUTTON
        default y
        help
          This driver handles events on the power, sleep, and lid buttons.
-         A daemon reads /proc/acpi/event and perform user-defined actions
-         such as shutting down the system.  This is necessary for
-         software-controlled poweroff.
+         A daemon reads events from input devices or via netlink and
+         performs user-defined actions such as shutting down the system.
+         This is necessary for software-controlled poweroff.
 
          To compile this driver as a module, choose M here:
          the module will be called button.
index 59d3202..a94383d 100644 (file)
@@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
        }
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
-
-/**
- * acpi_dev_pm_add_dependent - Add physical device depending for PM.
- * @handle: Handle of ACPI device node.
- * @depdev: Device depending on that node for PM.
- */
-void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
-{
-       struct acpi_device_physical_node *dep;
-       struct acpi_device *adev;
-
-       if (!depdev || acpi_bus_get_device(handle, &adev))
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(dep, &adev->power_dependent, node)
-               if (dep->dev == depdev)
-                       goto out;
-
-       dep = kzalloc(sizeof(*dep), GFP_KERNEL);
-       if (dep) {
-               dep->dev = depdev;
-               list_add_tail(&dep->node, &adev->power_dependent);
-       }
-
- out:
-       mutex_unlock(&adev->physical_node_lock);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
-
-/**
- * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
- * @handle: Handle of ACPI device node.
- * @depdev: Device depending on that node for PM.
- */
-void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
-{
-       struct acpi_device_physical_node *dep;
-       struct acpi_device *adev;
-
-       if (!depdev || acpi_bus_get_device(handle, &adev))
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(dep, &adev->power_dependent, node)
-               if (dep->dev == depdev) {
-                       list_del(&dep->node);
-                       kfree(dep);
-                       break;
-               }
-
-       mutex_unlock(&adev->physical_node_lock);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
 #endif /* CONFIG_PM */
index 0dbe5cd..c2ad391 100644 (file)
@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
 #define ACPI_POWER_RESOURCE_STATE_ON   0x01
 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
 
-struct acpi_power_dependent_device {
-       struct list_head node;
-       struct acpi_device *adev;
-       struct work_struct work;
-};
-
 struct acpi_power_resource {
        struct acpi_device device;
        struct list_head list_node;
-       struct list_head dependent;
        char *name;
        u32 system_level;
        u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
        return 0;
 }
 
-static void acpi_power_resume_dependent(struct work_struct *work)
-{
-       struct acpi_power_dependent_device *dep;
-       struct acpi_device_physical_node *pn;
-       struct acpi_device *adev;
-       int state;
-
-       dep = container_of(work, struct acpi_power_dependent_device, work);
-       adev = dep->adev;
-       if (acpi_power_get_inferred_state(adev, &state))
-               return;
-
-       if (state > ACPI_STATE_D0)
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(pn, &adev->physical_node_list, node)
-               pm_request_resume(pn->dev);
-
-       list_for_each_entry(pn, &adev->power_dependent, node)
-               pm_request_resume(pn->dev);
-
-       mutex_unlock(&adev->physical_node_lock);
-}
-
 static int __acpi_power_on(struct acpi_power_resource *resource)
 {
        acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
                                  resource->name));
        } else {
                result = __acpi_power_on(resource);
-               if (result) {
+               if (result)
                        resource->ref_count--;
-               } else {
-                       struct acpi_power_dependent_device *dep;
-
-                       list_for_each_entry(dep, &resource->dependent, node)
-                               schedule_work(&dep->work);
-               }
        }
        return result;
 }
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
        return result;
 }
 
-static void acpi_power_add_dependent(struct acpi_power_resource *resource,
-                                    struct acpi_device *adev)
-{
-       struct acpi_power_dependent_device *dep;
-
-       mutex_lock(&resource->resource_lock);
-
-       list_for_each_entry(dep, &resource->dependent, node)
-               if (dep->adev == adev)
-                       goto out;
-
-       dep = kzalloc(sizeof(*dep), GFP_KERNEL);
-       if (!dep)
-               goto out;
-
-       dep->adev = adev;
-       INIT_WORK(&dep->work, acpi_power_resume_dependent);
-       list_add_tail(&dep->node, &resource->dependent);
-
- out:
-       mutex_unlock(&resource->resource_lock);
-}
-
-static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
-                                       struct acpi_device *adev)
-{
-       struct acpi_power_dependent_device *dep;
-       struct work_struct *work = NULL;
-
-       mutex_lock(&resource->resource_lock);
-
-       list_for_each_entry(dep, &resource->dependent, node)
-               if (dep->adev == adev) {
-                       list_del(&dep->node);
-                       work = &dep->work;
-                       break;
-               }
-
-       mutex_unlock(&resource->resource_lock);
-
-       if (work) {
-               cancel_work_sync(work);
-               kfree(dep);
-       }
-}
-
 static struct attribute *attrs[] = {
        NULL,
 };
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
 
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
 {
-       struct acpi_device_power_state *ps;
-       struct acpi_power_resource_entry *entry;
        int state;
 
        if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
        if (!adev->power.flags.power_resources)
                return;
 
-       ps = &adev->power.states[ACPI_STATE_D0];
-       list_for_each_entry(entry, &ps->resources, node) {
-               struct acpi_power_resource *resource = entry->resource;
-
-               if (add)
-                       acpi_power_add_dependent(resource, adev);
-               else
-                       acpi_power_remove_dependent(resource, adev);
-       }
-
        for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
                acpi_power_expose_hide(adev,
                                       &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
        acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
                                ACPI_STA_DEFAULT);
        mutex_init(&resource->resource_lock);
-       INIT_LIST_HEAD(&resource->dependent);
        INIT_LIST_HEAD(&resource->list_node);
        resource->name = device->pnp.bus_id;
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
                mutex_lock(&resource->resource_lock);
 
                result = acpi_power_get_state(resource->device.handle, &state);
-               if (result)
+               if (result) {
+                       mutex_unlock(&resource->resource_lock);
                        continue;
+               }
 
                if (state == ACPI_POWER_RESOURCE_STATE_OFF
                    && resource->ref_count) {
index 407ad13..fee8a29 100644 (file)
@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
        INIT_LIST_HEAD(&device->wakeup_list);
        INIT_LIST_HEAD(&device->physical_node_list);
        mutex_init(&device->physical_node_lock);
-       INIT_LIST_HEAD(&device->power_dependent);
 
        new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
        if (!new_bus_id) {
index 9d715ae..8e28f92 100644 (file)
@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
        else
-               printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+               dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
 
        if (pi.flags & ATA_FLAG_EM)
                ahci_reset_em(host);
index 2daaee0..7d3b853 100644 (file)
@@ -184,7 +184,7 @@ static int ahci_probe(struct platform_device *pdev)
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
        else
-               printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+               dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
 
        if (pi.flags & ATA_FLAG_EM)
                ahci_reset_em(host);
index acfd0f7..aaac4fb 100644 (file)
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
                                rc = ap->ops->transmit_led_message(ap,
                                                               emp->led_state,
                                                               4);
+                               /*
+                                * If busy, give a breather but do not
+                                * release EH ownership by using msleep()
+                                * instead of ata_msleep().  EM Transmit
+                                * bit is busy for the whole host and
+                                * releasing ownership will cause other
+                                * ports to fail the same way.
+                                */
                                if (rc == -EBUSY)
-                                       ata_msleep(ap, 1);
+                                       msleep(1);
                                else
                                        break;
                        }
index 4ba8b04..ab714d2 100644 (file)
@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
 {
        ata_acpi_clear_gtf(dev);
 }
-
-void ata_scsi_acpi_bind(struct ata_device *dev)
-{
-       acpi_handle handle = ata_dev_acpi_handle(dev);
-       if (handle)
-               acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
-}
-
-void ata_scsi_acpi_unbind(struct ata_device *dev)
-{
-       acpi_handle handle = ata_dev_acpi_handle(dev);
-       if (handle)
-               acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
-}
index c69fcce..370462f 100644 (file)
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
  *     should be retried.  To be used from EH.
  *
  *     SCSI midlayer limits the number of retries to scmd->allowed.
- *     scmd->retries is decremented for commands which get retried
+ *     scmd->allowed is incremented for commands which get retried
  *     due to unrelated failures (qc->err_mask is zero).
  */
 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
 {
        struct scsi_cmnd *scmd = qc->scsicmd;
-       if (!qc->err_mask && scmd->retries)
-               scmd->retries--;
+       if (!qc->err_mask)
+               scmd->allowed++;
        __ata_eh_qc_complete(qc);
 }
 
index 97a0cef..db6dfcf 100644 (file)
@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
                        if (!IS_ERR(sdev)) {
                                dev->sdev = sdev;
                                scsi_device_put(sdev);
-                               ata_scsi_acpi_bind(dev);
                        } else {
                                dev->sdev = NULL;
                        }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
        struct scsi_device *sdev;
        unsigned long flags;
 
-       ata_scsi_acpi_unbind(dev);
-
        /* Alas, we need to grab scan_mutex to ensure SCSI device
         * state doesn't change underneath us and thus
         * scsi_device_get() always succeeds.  The mutex locking can
index eeeb778..45b5ab3 100644 (file)
@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
 extern void ata_acpi_bind_port(struct ata_port *ap);
 extern void ata_acpi_bind_dev(struct ata_device *dev);
 extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
-extern void ata_scsi_acpi_bind(struct ata_device *dev);
-extern void ata_scsi_acpi_unbind(struct ata_device *dev);
 #else
 static inline void ata_acpi_dissociate(struct ata_host *host) { }
 static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
                                      pm_message_t state) { }
 static inline void ata_acpi_bind_port(struct ata_port *ap) {}
 static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
-static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
-static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
 #endif
 
 /* libata-scsi.c */
index 4bceb88..b33d1f9 100644 (file)
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
 
        ap->ioaddr.cmd_addr = cmd_addr;
 
-       if (pnp_port_valid(idev, 1) == 0) {
+       if (pnp_port_valid(idev, 1)) {
                ctl_addr = devm_ioport_map(&idev->dev,
                                           pnp_port_start(idev, 1), 1);
                ap->ioaddr.altstatus_addr = ctl_addr;
index 9e59f65..bece691 100644 (file)
@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
                online_type = ONLINE_KEEP;
        else if (!strncmp(buf, "offline", min_t(int, count, 7)))
                online_type = -1;
-       else
-               return -EINVAL;
+       else {
+               ret = -EINVAL;
+               goto err;
+       }
 
        switch (online_type) {
        case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
                ret = -EINVAL; /* should never happen */
        }
 
+err:
        unlock_device_hotplug();
 
        if (ret)
index 06189e5..94c280d 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/io/tpmif.h>
 #include <xen/grant_table.h>
index 08ae128..c73fc2b 100644 (file)
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        /*  If cn_netlink_send() failed, the data is not sent */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        ev->what = which_id;
        ev->event_data.id.process_pid = task->pid;
        ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        msg->seq = rcvd_seq;
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = rcvd_ack + 1;
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
index 6ecfa75..a36749f 100644 (file)
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
 
        data = nlmsg_data(nlh);
 
-       memcpy(data, msg, sizeof(*data) + msg->len);
+       memcpy(data, msg, size);
 
        NETLINK_CB(skb).dst_group = group;
 
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
 static void cn_rx_skb(struct sk_buff *__skb)
 {
        struct nlmsghdr *nlh;
-       int err;
        struct sk_buff *skb;
+       int len, err;
 
        skb = skb_get(__skb);
 
        if (skb->len >= NLMSG_HDRLEN) {
                nlh = nlmsg_hdr(skb);
+               len = nlmsg_len(nlh);
 
-               if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+               if (len < (int)sizeof(struct cn_msg) ||
                    skb->len < nlh->nlmsg_len ||
-                   nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+                   len > CONNECTOR_MAX_MSG_SIZE) {
                        kfree_skb(skb);
                        return;
                }
index d2c3253..506fd23 100644 (file)
@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
 {
        int ret;
 
+       if (acpi_disabled)
+               return -ENODEV;
+
        /* don't keep reloading if cpufreq_driver exists */
        if (cpufreq_get_current_driver())
-               return 0;
-
-       if (acpi_disabled)
-               return 0;
+               return -EEXIST;
 
        pr_debug("acpi_cpufreq_init\n");
 
index 32b3479..eb3fdc7 100644 (file)
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
 }
 
 struct sample {
-       int core_pct_busy;
+       int32_t core_pct_busy;
        u64 aperf;
        u64 mperf;
        int freq;
@@ -68,7 +68,7 @@ struct _pid {
        int32_t i_gain;
        int32_t d_gain;
        int deadband;
-       int last_err;
+       int32_t last_err;
 };
 
 struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
        pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
 }
 
-static signed int pid_calc(struct _pid *pid, int busy)
+static signed int pid_calc(struct _pid *pid, int32_t busy)
 {
-       signed int err, result;
+       signed int result;
        int32_t pterm, dterm, fp_error;
        int32_t integral_limit;
 
-       err = pid->setpoint - busy;
-       fp_error = int_tofp(err);
+       fp_error = int_tofp(pid->setpoint) - busy;
 
-       if (abs(err) <= pid->deadband)
+       if (abs(fp_error) <= int_tofp(pid->deadband))
                return 0;
 
        pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
        if (pid->integral < -integral_limit)
                pid->integral = -integral_limit;
 
-       dterm = mul_fp(pid->d_gain, (err - pid->last_err));
-       pid->last_err = err;
+       dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
+       pid->last_err = fp_error;
 
        result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
 
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 {
        int max_perf = cpu->pstate.turbo_pstate;
+       int max_perf_adj;
        int min_perf;
        if (limits.no_turbo)
                max_perf = cpu->pstate.max_pstate;
 
-       max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
-       *max = clamp_t(int, max_perf,
+       max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+       *max = clamp_t(int, max_perf_adj,
                        cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
        min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 {
        int max_perf, min_perf;
+       u64 val;
 
        intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
 
@@ -394,11 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
        trace_cpu_frequency(pstate * 100000, cpu->cpu);
 
        cpu->pstate.current_pstate = pstate;
+       val = pstate << 8;
        if (limits.no_turbo)
-               wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
-       else
-               wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
+               val |= (u64)1 << 32;
 
+       wrmsrl(MSR_IA32_PERF_CTL, val);
 }
 
 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -435,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
        u64 core_pct;
-       core_pct = div64_u64(sample->aperf * 100, sample->mperf);
-       sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
+       core_pct = div64_u64(int_tofp(sample->aperf * 100),
+                            sample->mperf);
+       sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
 
        sample->core_pct_busy = core_pct;
 }
@@ -468,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
        mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
 
-static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
-       int32_t busy_scaled;
        int32_t core_busy, max_pstate, current_pstate;
 
-       core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+       core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
-       busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
-
-       return fp_toint(busy_scaled);
+       return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 }
 
 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 {
-       int busy_scaled;
+       int32_t busy_scaled;
        struct _pid *pid;
        signed int ctl = 0;
        int steps;
@@ -637,8 +636,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
 
 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
-       int rc, min_pstate, max_pstate;
        struct cpudata *cpu;
+       int rc;
 
        rc = intel_pstate_init_cpu(policy->cpu);
        if (rc)
@@ -652,9 +651,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
 
-       intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
-       policy->min = min_pstate * 100000;
-       policy->max = max_pstate * 100000;
+       policy->min = cpu->pstate.min_pstate * 100000;
+       policy->max = cpu->pstate.turbo_pstate * 100000;
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
index 8a72b0c..15631f9 100644 (file)
@@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
                if (freq->frequency == CPUFREQ_ENTRY_INVALID)
                        continue;
 
-               dvfs = &s3c64xx_dvfs_table[freq->index];
+               dvfs = &s3c64xx_dvfs_table[freq->driver_data];
                found = 0;
 
                for (i = 0; i < count; i++) {
index 3519111..10b577f 100644 (file)
@@ -305,6 +305,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                                edma_alloc_slot(EDMA_CTLR(echan->ch_num),
                                                EDMA_SLOT_ANY);
                        if (echan->slot[i] < 0) {
+                               kfree(edesc);
                                dev_err(dev, "Failed to allocate slot\n");
                                kfree(edesc);
                                return NULL;
@@ -346,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                        ccnt = sg_dma_len(sg) / (acnt * bcnt);
                        if (ccnt > (SZ_64K - 1)) {
                                dev_err(dev, "Exceeded max SG segment size\n");
+                               kfree(edesc);
                                return NULL;
                        }
                        cidx = acnt * bcnt;
index 2d9ca60..41b5913 100644 (file)
@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
        struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        u32 base, pin, mask;
-       unsigned long reg, pending;
+       unsigned long reg, ena, pending;
        unsigned virq;
 
        /* check from GPIO controller which pin triggered the interrupt */
        for (base = 0; base < lg->chip.ngpio; base += 32) {
                reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+               ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
 
-               while ((pending = inl(reg))) {
+               while ((pending = (inl(reg) & inl(ena)))) {
                        pin = __ffs(pending);
                        mask = BIT(pin);
                        /* Clear before handling so we don't lose an edge */
index 86ef346..0dee0e0 100644 (file)
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
  */
 static int desc_to_gpio(const struct gpio_desc *desc)
 {
-       return desc->chip->base + gpio_chip_hwgpio(desc);
+       return desc - &gpio_desc[0];
 }
 
 
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        int                     status = -EPROBE_DEFER;
        unsigned long           flags;
 
-       if (!desc || !desc->chip) {
+       if (!desc) {
                pr_warn("%s: invalid GPIO\n", __func__);
                return -EINVAL;
        }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        spin_lock_irqsave(&gpio_lock, flags);
 
        chip = desc->chip;
+       if (chip == NULL)
+               goto done;
 
        if (!try_module_get(chip->owner))
                goto done;
index e572dd2..05ad9ba 100644 (file)
@@ -402,9 +402,16 @@ long drm_ioctl(struct file *filp,
                cmd = ioctl->cmd_drv;
        }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+               u32 drv_size;
+
                ioctl = &drm_ioctls[nr];
-               cmd = ioctl->cmd;
+
+               drv_size = _IOC_SIZE(ioctl->cmd);
                usize = asize = _IOC_SIZE(cmd);
+               if (drv_size > asize)
+                       asize = drv_size;
+
+               cmd = ioctl->cmd;
        } else
                goto err_i1;
 
index 69d8ed5..2ad2788 100644 (file)
@@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
                intel_modeset_suspend_hw(dev);
        }
 
+       i915_gem_suspend_gtt_mappings(dev);
+
        i915_save_state(dev);
 
        intel_opregion_fini(dev);
@@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                i915_gem_restore_gtt_mappings(dev);
                mutex_unlock(&dev->struct_mutex);
-       }
+       } else if (drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_check_and_clear_faults(dev);
 
        __i915_drm_thaw(dev);
 
index 35874b3..ab0f2c0 100644 (file)
@@ -497,10 +497,12 @@ struct i915_address_space {
 
        /* FIXME: Need a more generic return type */
        gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
-                                    enum i915_cache_level level);
+                                    enum i915_cache_level level,
+                                    bool valid); /* Create a valid PTE */
        void (*clear_range)(struct i915_address_space *vm,
                            unsigned int first_entry,
-                           unsigned int num_entries);
+                           unsigned int num_entries,
+                           bool use_scratch);
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
                               unsigned int first_entry,
@@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj);
 
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
index 212f6d8..1f7b4ca 100644 (file)
 #define HSW_WT_ELLC_LLC_AGE0           HSW_CACHEABILITY_CONTROL(0x6)
 
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
 #define BYT_PTE_SNOOPED_BY_CPU_CACHES  (1 << 2)
 
 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        /* Mark the page as writeable.  Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
-                                     enum i915_cache_level level)
+                                     enum i915_cache_level level,
+                                     bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   unsigned first_entry,
-                                  unsigned num_entries)
+                                  unsigned num_entries,
+                                  bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                dma_addr_t page_addr;
 
                page_addr = sg_page_iter_dma_address(&sg_iter);
-               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
+               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
                        act_pt++;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        }
 
        ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
+                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
 
        ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 {
        ppgtt->base.clear_range(&ppgtt->base,
                                i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                               obj->base.size >> PAGE_SHIFT);
+                               obj->base.size >> PAGE_SHIFT,
+                               true);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
                dev_priv->mm.interruptible = interruptible;
 }
 
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       for_each_ring(ring, dev_priv, i) {
+               u32 fault_reg;
+               fault_reg = I915_READ(RING_FAULT_REG(ring));
+               if (fault_reg & RING_FAULT_VALID) {
+                       DRM_DEBUG_DRIVER("Unexpected fault\n"
+                                        "\tAddr: 0x%08lx\\n"
+                                        "\tAddress space: %s\n"
+                                        "\tSource ID: %d\n"
+                                        "\tType: %d\n",
+                                        fault_reg & PAGE_MASK,
+                                        fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+                                        RING_FAULT_SRCID(fault_reg),
+                                        RING_FAULT_FAULT_TYPE(fault_reg));
+                       I915_WRITE(RING_FAULT_REG(ring),
+                                  fault_reg & ~RING_FAULT_VALID);
+               }
+       }
+       POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Don't bother messing with faults pre GEN6 as we have little
+        * documentation supporting that it's a good idea.
+        */
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       i915_check_and_clear_faults(dev);
+
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      dev_priv->gtt.base.start / PAGE_SIZE,
+                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      false);
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
+       i915_check_and_clear_faults(dev);
+
        /* First fill our portion of the GTT with scratch pages */
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE);
+                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      true);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
                addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
+               iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
                i++;
        }
 
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
         */
        if (i != 0)
                WARN_ON(readl(&gtt_entries[i-1]) !=
-                       vm->pte_encode(addr, level));
+                       vm->pte_encode(addr, level, true));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
-                                 unsigned int num_entries)
+                                 unsigned int num_entries,
+                                 bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
        readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
-                                 unsigned int num_entries)
+                                 unsigned int num_entries,
+                                 bool unused)
 {
        intel_gtt_clear_range(first_entry, num_entries);
 }
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       entry,
-                                      obj->base.size >> PAGE_SHIFT);
+                                      obj->base.size >> PAGE_SHIFT,
+                                      true);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
                const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
+               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
+       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
 }
 
 static bool
index 38f96f6..ef9b354 100644 (file)
 #define   ARB_MODE_SWIZZLE_IVB (1<<5)
 #define RENDER_HWS_PGA_GEN7    (0x04080)
 #define RING_FAULT_REG(ring)   (0x4094 + 0x100*(ring)->id)
+#define   RING_FAULT_GTTSEL_MASK (1<<11)
+#define   RING_FAULT_SRCID(x)  ((x >> 3) & 0xff)
+#define   RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
+#define   RING_FAULT_VALID     (1<<0)
 #define DONE_REG               0x40b0
 #define BSD_HWS_PGA_GEN7       (0x04180)
 #define BLT_HWS_PGA_GEN7       (0x04280)
 #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
 #define SOUTH_DSPCLK_GATE_D    0xc2020
+#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
 #define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
 #define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
 
 /* CPU: FDI_TX */
index f4c5e95..26c2ea3 100644 (file)
@@ -4759,7 +4759,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
         * gating for the panel power sequencer or it will fail to
         * start up when no ports are active.
         */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
        /* The below fixes the weird display corruption, a few pixels shifted
index 32923d2..5e891b2 100644 (file)
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                   (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                    (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else if (radeon_connector->use_digital)
+               if (radeon_audio != 0) {
+                       if (radeon_connector->use_digital &&
+                           (radeon_connector->audio == RADEON_AUDIO_ENABLE))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (radeon_connector->use_digital)
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_CRT;
+               } else if (radeon_connector->use_digital) {
                        return ATOM_ENCODER_MODE_DVI;
-               else
+               } else {
                        return ATOM_ENCODER_MODE_CRT;
+               }
                break;
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                   (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                    (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               if (radeon_audio != 0) {
+                       if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else
+                               return ATOM_ENCODER_MODE_DVI;
+               } else {
                        return ATOM_ENCODER_MODE_DVI;
+               }
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DisplayPort:
                dig_connector = radeon_connector->con_priv;
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
                        return ATOM_ENCODER_MODE_DP;
-               else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                        (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                         (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               } else if (radeon_audio != 0) {
+                       if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else
+                               return ATOM_ENCODER_MODE_DVI;
+               } else {
                        return ATOM_ENCODER_MODE_DVI;
+               }
                break;
        case DRM_MODE_CONNECTOR_eDP:
                return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                         * does the same thing and more.
                         */
                        if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
-                           (rdev->family != CHIP_RS880))
+                           (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
                                atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
index b874ccd..9cd2bc9 100644 (file)
@@ -1694,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3182,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
                DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
                return r;
        }
        ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3198,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        r = radeon_fence_wait(ib.fence, false);
        if (r) {
                DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
+               radeon_ib_free(rdev, &ib);
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
index 85a69d2..9fcd338 100644 (file)
@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        if (!dig->afmt->pin)
                return;
 
index f815c20..fe1de85 100644 (file)
@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder)
                        radeon_connector = to_radeon_connector(connector);
index 93c1f9e..cac2866 100644 (file)
@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "ni_mc: Bogus length %zu in firmware \"%s\"\n",
index 2a1b187..f9be220 100644 (file)
@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "smc: Bogus length %zu in firmware \"%s\"\n",
index 5b72931..06022e3 100644 (file)
@@ -309,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder)
                        radeon_connector = to_radeon_connector(connector);
index 79159b5..6456573 100644 (file)
@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                        drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_vborder_property,
                                                      0);
-                       drm_object_attach_property(&radeon_connector->base.base,
-                                                  rdev->mode_info.audio_property,
-                                                  RADEON_AUDIO_DISABLE);
+                       if (radeon_audio != 0)
+                               drm_object_attach_property(&radeon_connector->base.base,
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        subpixel_order = SubPixelHorizontalRGB;
                        connector->interlace_allowed = true;
                        if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        subpixel_order = SubPixelHorizontalRGB;
                        connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        connector->interlace_allowed = true;
                        /* in theory with a DP to VGA converter... */
index 66c2228..80285e3 100644 (file)
@@ -85,9 +85,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                   VRAM, also but everything into VRAM on AGP cards to avoid
                   image corruptions */
                if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-                   p->rdev->family < CHIP_PALM &&
                    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
-
+                       /* TODO: is this still needed for NI+ ? */
                        p->relocs[i].lobj.domain =
                                RADEON_GEM_DOMAIN_VRAM;
 
index cdd12dc..9c14a1b 100644 (file)
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
-int radeon_audio = 1;
+int radeon_audio = -1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = -1;
@@ -196,7 +196,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
-MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
index 4f2e73f..308eff5 100644 (file)
@@ -476,7 +476,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
+       /* TODO: is this still necessary on NI+ ? */
+       if ((cmd == 0 || cmd == 0x3) &&
            (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
                DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
                          start, end);
index d4652af..d96f7cb 100644 (file)
@@ -1681,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
                       fw_name);
                release_firmware(rdev->smc_fw);
                rdev->smc_fw = NULL;
+               err = 0;
        } else if (rdev->smc_fw->size != smc_req_size) {
                printk(KERN_ERR
                       "si_smc: Bogus length %zu in firmware \"%s\"\n",
index 3100fa9..7266805 100644 (file)
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
        /* enable VCPU clock */
        WREG32(UVD_VCPU_CNTL,  1 << 9);
 
-       /* enable UMC and NC0 */
-       WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
+       /* enable UMC */
+       WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
 
        /* boot up the VCPU */
        WREG32(UVD_SOFT_RESET, 0);
index 1a90f0a..0508f93 100644 (file)
@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
        struct vmw_fpriv *vmw_fp;
 
        vmw_fp = vmw_fpriv(file_priv);
-       ttm_object_file_release(&vmw_fp->tfile);
-       if (vmw_fp->locked_master)
+
+       if (vmw_fp->locked_master) {
+               struct vmw_master *vmaster =
+                       vmw_master(vmw_fp->locked_master);
+
+               ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+               ttm_vt_unlock(&vmaster->lock);
                drm_master_put(&vmw_fp->locked_master);
+       }
+
+       ttm_object_file_release(&vmw_fp->tfile);
        kfree(vmw_fp);
 }
 
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
                drm_master_put(&vmw_fp->locked_master);
        }
 
-       ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+       ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        if (!dev_priv->enable_fb) {
                ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
index 0e67cf4..37fb4be 100644 (file)
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
        if (new_backup)
                res->backup_offset = new_backup_offset;
 
-       if (!res->func->may_evict)
+       if (!res->func->may_evict || res->id == -1)
                return;
 
        write_lock(&dev_priv->resource_lock);
index 5a8c011..e80da62 100644 (file)
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
 
 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 {
-       __u32 raw_value;
+       __s32 raw_value;
        switch (item->tag) {
        case HID_GLOBAL_ITEM_TAG_PUSH:
 
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
                return 0;
 
        case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
-               /* Units exponent negative numbers are given through a
-                * two's complement.
-                * See "6.2.2.7 Global Items" for more information. */
-               raw_value = item_udata(item);
+               /* Many devices provide unit exponent as a two's complement
+                * nibble due to the common misunderstanding of HID
+                * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
+                * both this and the standard encoding. */
+               raw_value = item_sdata(item);
                if (!(raw_value & 0xfffffff0))
                        parser->global.unit_exponent = hid_snto32(raw_value, 4);
                else
@@ -1870,6 +1871,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
 };
index 9cbc7ab..f0296a5 100644 (file)
 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN   0x0003
 
 #define USB_VENDOR_ID_NINTENDO         0x057e
+#define USB_VENDOR_ID_NINTENDO2                0x054c
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2        0x0330
 
 #define USB_DEVICE_ID_SYNAPTICS_COMP_TP        0x0009
 #define USB_DEVICE_ID_SYNAPTICS_WTP    0x0010
 #define USB_DEVICE_ID_SYNAPTICS_DPAD   0x0013
+#define USB_DEVICE_ID_SYNAPTICS_LTS1   0x0af8
+#define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 
 #define USB_VENDOR_ID_THINGM           0x27b8
 #define USB_DEVICE_ID_BLINK1           0x01ed
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 
+#define USB_VENDOR_ID_SIS      0x0457
+#define USB_DEVICE_ID_SIS_TS   0x1013
+
 #endif
index 8741d95..d97f232 100644 (file)
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
        return -EINVAL;
 }
 
+
 /**
  * hidinput_calc_abs_res - calculate an absolute axis resolution
  * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
        case ABS_MT_TOOL_Y:
        case ABS_MT_TOUCH_MAJOR:
        case ABS_MT_TOUCH_MINOR:
-               if (field->unit & 0xffffff00)           /* Not a length */
-                       return 0;
-               unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
-               switch (field->unit & 0xf) {
-               case 0x1:                               /* If centimeters */
+               if (field->unit == 0x11) {              /* If centimeters */
                        /* Convert to millimeters */
                        unit_exponent += 1;
-                       break;
-               case 0x3:                               /* If inches */
+               } else if (field->unit == 0x13) {       /* If inches */
                        /* Convert to millimeters */
                        prev = physical_extents;
                        physical_extents *= 254;
                        if (physical_extents < prev)
                                return 0;
                        unit_exponent -= 1;
-                       break;
-               default:
+               } else {
                        return 0;
                }
                break;
index abb20db..1446f52 100644 (file)
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
                goto done;
        }
 
-       if (vendor == USB_VENDOR_ID_NINTENDO) {
+       if (vendor == USB_VENDOR_ID_NINTENDO ||
+           vendor == USB_VENDOR_ID_NINTENDO2) {
                if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
                        devtype = WIIMOTE_DEV_GEN10;
                        goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
 static const struct hid_device_id wiimote_hid_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
                                USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
+                               USB_DEVICE_ID_NINTENDO_WIIMOTE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
                                USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
index 0734552..3fca3be 100644 (file)
@@ -110,6 +110,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
 
        { 0, 0 }
 };
index a7b30be..52605c0 100644 (file)
@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
        }
 
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-       if (indio_dev == NULL)
-               return -ENOMEM;
+       if (indio_dev == NULL) {
+               ret =  -ENOMEM;
+               goto error_disable_clk;
+       }
 
        st = iio_priv(indio_dev);
 
index 2710f72..2db7dcd 100644 (file)
@@ -477,6 +477,9 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
        indio_dev->currentmode = INDIO_DIRECT_MODE;
        if (indio_dev->setup_ops->postdisable)
                indio_dev->setup_ops->postdisable(indio_dev);
+
+       if (indio_dev->available_scan_masks == NULL)
+               kfree(indio_dev->active_scan_mask);
 }
 
 int iio_update_buffers(struct iio_dev *indio_dev,
index 5ceda71..b84791f 100644 (file)
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
          libibverbs, libibcm and a hardware driver library from
          <http://www.openfabrics.org/git/>.
 
+config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
+       bool "Experimental and unstable ABI for userspace access to flow steering verbs"
+       depends on INFINIBAND_USER_ACCESS
+       depends on STAGING
+       ---help---
+         The final ABI for userspace access to flow steering verbs
+         has not been defined.  To use the current ABI, *WHICH WILL
+         CHANGE IN THE FUTURE*, say Y here.
+
+         If unsure, say N.
+
 config INFINIBAND_USER_MEM
        bool
        depends on INFINIBAND_USER_ACCESS != n
index d040b87..d8f9c6c 100644 (file)
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
 IB_UVERBS_DECLARE_CMD(create_xsrq);
 IB_UVERBS_DECLARE_CMD(open_xrcd);
 IB_UVERBS_DECLARE_CMD(close_xrcd);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 IB_UVERBS_DECLARE_CMD(create_flow);
 IB_UVERBS_DECLARE_CMD(destroy_flow);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 #endif /* UVERBS_H */
index f2b81b9..2f0f01b 100644 (file)
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
 static struct uverbs_lock_class ah_lock_class  = { .name = "AH-uobj" };
 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen)                      \
        do {                                                            \
@@ -2599,6 +2601,7 @@ out_put:
        return ret ? ret : in_len;
 }
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
                                union ib_flow_spec *ib_spec)
 {
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
                                struct ib_uverbs_create_xsrq *cmd,
index 75ad86c..2df31f6 100644 (file)
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
        [IB_USER_VERBS_CMD_CLOSE_XRCD]          = ib_uverbs_close_xrcd,
        [IB_USER_VERBS_CMD_CREATE_XSRQ]         = ib_uverbs_create_xsrq,
        [IB_USER_VERBS_CMD_OPEN_QP]             = ib_uverbs_open_qp,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        [IB_USER_VERBS_CMD_CREATE_FLOW]         = ib_uverbs_create_flow,
        [IB_USER_VERBS_CMD_DESTROY_FLOW]        = ib_uverbs_destroy_flow
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
                return -ENOSYS;
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
                struct ib_uverbs_cmd_hdr_ex hdr_ex;
 
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                     (hdr_ex.out_words +
                                                      hdr_ex.provider_out_words) * 4);
        } else {
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
                if (hdr.in_words * 4 != count)
                        return -EINVAL;
 
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                     buf + sizeof(hdr),
                                                     hdr.in_words * 4,
                                                     hdr.out_words * 4);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        }
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 }
 
 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
index d5d1929..cedda25 100644 (file)
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
                return "C2_QP_STATE_ERROR";
        default:
                return "<invalid QP state>";
-       };
+       }
 }
 
 void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
index d6c5a73..f061264 100644 (file)
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                ibdev->ib_dev.create_flow       = mlx4_ib_create_flow;
                ibdev->ib_dev.destroy_flow      = mlx4_ib_destroy_flow;
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
                ibdev->ib_dev.uverbs_cmd_mask   |=
                        (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
                        (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
        }
 
        mlx4_ib_alloc_eqs(dev, ibdev);
index 3f831de..b1a6cb3 100644 (file)
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+       char name[MLX5_MAX_EQ_NAME];
        struct mlx5_eq *eq, *n;
        int ncomp_vec;
        int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
                        goto clean;
                }
 
-               snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(&dev->mdev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        eq->name,
-                                        &dev->mdev.priv.uuari.uars[0]);
+                                        name, &dev->mdev.priv.uuari.uars[0]);
                if (err) {
                        kfree(eq);
                        goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        props->max_srq_sge         = max_rq_sg - 1;
        props->max_fast_reg_page_list_len = (unsigned int)-1;
        props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
-       props->atomic_cap          = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
-               IB_ATOMIC_HCA : IB_ATOMIC_NONE;
-       props->masked_atomic_cap   = IB_ATOMIC_HCA;
+       props->atomic_cap          = IB_ATOMIC_NONE;
+       props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_pkeys           = be16_to_cpup((__be16 *)(out_mad->data + 28));
        props->max_mcast_grp       = 1 << dev->mdev.caps.log_max_mcg;
        props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
        ibev.device           = &ibdev->ib_dev;
        ibev.element.port_num = port;
 
+       if (port < 1 || port > ibdev->num_ports) {
+               mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
+               return;
+       }
+
        if (ibdev->ib_active)
                ib_dispatch_event(&ibev);
 }
index bd41df9..3453580 100644 (file)
@@ -42,6 +42,10 @@ enum {
        DEF_CACHE_SIZE  = 10,
 };
 
+enum {
+       MLX5_UMR_ALIGN  = 2048
+};
+
 static __be64 *mr_align(__be64 *ptr, int align)
 {
        unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
 
 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_ib_mr *mr;
        int npages = 1 << ent->order;
-       int size = sizeof(u64) * npages;
        int err = 0;
        int i;
 
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                }
                mr->order = ent->order;
                mr->umred = 1;
-               mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
-               if (!mr->pas) {
-                       kfree(mr);
-                       err = -ENOMEM;
-                       goto out;
-               }
-               mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
-                                        DMA_TO_DEVICE);
-               if (dma_mapping_error(ddev, mr->dma)) {
-                       kfree(mr->pas);
-                       kfree(mr);
-                       err = -ENOMEM;
-                       goto out;
-               }
-
                in->seg.status = 1 << 6;
                in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
                in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                                            sizeof(*in));
                if (err) {
                        mlx5_ib_warn(dev, "create mkey failed %d\n", err);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
                        kfree(mr);
                        goto out;
                }
@@ -129,11 +114,9 @@ out:
 
 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_ib_mr *mr;
-       int size;
        int err;
        int i;
 
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
                ent->size--;
                spin_unlock(&ent->lock);
                err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
-               if (err) {
+               if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
-               } else {
-                       size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
+               else
                        kfree(mr);
-               }
        }
 }
 
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 
 static void clean_keys(struct mlx5_ib_dev *dev, int c)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_ib_mr *mr;
-       int size;
        int err;
 
+       cancel_delayed_work(&ent->dwork);
        while (1) {
                spin_lock(&ent->lock);
                if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
                ent->size--;
                spin_unlock(&ent->lock);
                err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
-               if (err) {
+               if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
-               } else {
-                       size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
+               else
                        kfree(mr);
-               }
        }
 }
 
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
        int i;
 
        dev->cache.stopped = 1;
-       destroy_workqueue(dev->cache.wq);
+       flush_workqueue(dev->cache.wq);
 
        mlx5_mr_cache_debugfs_cleanup(dev);
 
        for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
                clean_keys(dev, i);
 
+       destroy_workqueue(dev->cache.wq);
+
        return 0;
 }
 
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
                                  int page_shift, int order, int access_flags)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct device *ddev = dev->ib_dev.dma_device;
        struct umr_common *umrc = &dev->umrc;
        struct ib_send_wr wr, *bad;
        struct mlx5_ib_mr *mr;
        struct ib_sge sg;
+       int size = sizeof(u64) * npages;
        int err;
        int i;
 
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        if (!mr)
                return ERR_PTR(-EAGAIN);
 
-       mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
+       mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+       if (!mr->pas) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       mlx5_ib_populate_pas(dev, umem, page_shift,
+                            mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+
+       mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(ddev, mr->dma)) {
+               kfree(mr->pas);
+               err = -ENOMEM;
+               goto error;
+       }
 
        memset(&wr, 0, sizeof(wr));
        wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        wait_for_completion(&mr->done);
        up(&umrc->sem);
 
+       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+       kfree(mr->pas);
+
        if (mr->status != IB_WC_SUCCESS) {
                mlx5_ib_warn(dev, "reg umr failed\n");
                err = -EFAULT;
index 045f8cd..5659ea8 100644 (file)
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
 
        switch (qp_type) {
        case IB_QPT_XRC_INI:
-               size = sizeof(struct mlx5_wqe_xrc_seg);
+               size += sizeof(struct mlx5_wqe_xrc_seg);
                /* fall through */
        case IB_QPT_RC:
                size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
                        sizeof(struct mlx5_wqe_raddr_seg);
                break;
 
+       case IB_QPT_XRC_TGT:
+               return 0;
+
        case IB_QPT_UC:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_raddr_seg);
                break;
 
        case IB_QPT_UD:
        case IB_QPT_SMI:
        case IB_QPT_GSI:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_datagram_seg);
                break;
 
        case MLX5_IB_QPT_REG_UMR:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_umr_ctrl_seg) +
                        sizeof(struct mlx5_mkey_seg);
                break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                return wqe_size;
 
        if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
-               mlx5_ib_dbg(dev, "\n");
+               mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
+                           wqe_size, dev->mdev.caps.max_sq_desc_sz);
                return -EINVAL;
        }
 
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
+       if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+               mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+                           qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+               return -ENOMEM;
+       }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
        qp->sq.max_gs = attr->cap.max_send_sge;
-       qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
+       qp->sq.max_post = wq_size / wqe_size;
+       attr->cap.max_send_wr = qp->sq.max_post;
 
        return wq_size;
 }
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
                                          MLX5_QP_OPTPAR_Q_KEY,
                        [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX    |
                                           MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+                                         MLX5_QP_OPTPAR_RRE            |
+                                         MLX5_QP_OPTPAR_RAE            |
+                                         MLX5_QP_OPTPAR_RWE            |
+                                         MLX5_QP_OPTPAR_PKEY_INDEX,
                },
        },
        [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
                [MLX5_QP_STATE_RTS] = {
                        [MLX5_QP_ST_UD]  = MLX5_QP_OPTPAR_Q_KEY,
                        [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_UC]  = MLX5_QP_OPTPAR_RWE,
+                       [MLX5_QP_ST_RC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT   |
+                                          MLX5_QP_OPTPAR_RWE           |
+                                          MLX5_QP_OPTPAR_RAE           |
+                                          MLX5_QP_OPTPAR_RRE,
                },
        },
 };
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
        rseg->reserved = 0;
 }
 
-static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
-{
-       if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
-               aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
-       } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-               aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-       } else {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-               aseg->compare  = 0;
-       }
-}
-
-static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
-                                 struct ib_send_wr *wr)
-{
-       aseg->swap_add          = cpu_to_be64(wr->wr.atomic.swap);
-       aseg->swap_add_mask     = cpu_to_be64(wr->wr.atomic.swap_mask);
-       aseg->compare           = cpu_to_be64(wr->wr.atomic.compare_add);
-       aseg->compare_mask      = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-}
-
 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
                             struct ib_send_wr *wr)
 {
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                        case IB_WR_ATOMIC_CMP_AND_SWP:
                        case IB_WR_ATOMIC_FETCH_AND_ADD:
-                               set_raddr_seg(seg, wr->wr.atomic.remote_addr,
-                                             wr->wr.atomic.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
-
-                               set_atomic_seg(seg, wr);
-                               seg  += sizeof(struct mlx5_wqe_atomic_seg);
-
-                               size += (sizeof(struct mlx5_wqe_raddr_seg) +
-                                        sizeof(struct mlx5_wqe_atomic_seg)) / 16;
-                               break;
-
                        case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
-                               set_raddr_seg(seg, wr->wr.atomic.remote_addr,
-                                             wr->wr.atomic.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
-
-                               set_masked_atomic_seg(seg, wr);
-                               seg  += sizeof(struct mlx5_wqe_masked_atomic_seg);
-
-                               size += (sizeof(struct mlx5_wqe_raddr_seg) +
-                                        sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
-                               break;
+                               mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+                               err = -ENOSYS;
+                               *bad_wr = wr;
+                               goto out;
 
                        case IB_WR_LOCAL_INV:
                                next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
index 84d297a..0aa478b 100644 (file)
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        mlx5_vfree(in);
        if (err) {
                mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
-               goto err_srq;
+               goto err_usr_kern_srq;
        }
 
        mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
 err_core:
        mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+
+err_usr_kern_srq:
        if (pd->uobject)
                destroy_srq_user(pd, srq);
        else
index 7c9d35f..6902017 100644 (file)
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                        mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
                                   eqe->type, eqe->subtype, eq->eqn);
                        break;
-               };
+               }
 
                set_eqe_hw(eqe);
                ++eq->cons_index;
index 4ed8235..50219ab 100644 (file)
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
                return IB_QPS_SQE;
        case OCRDMA_QPS_ERR:
                return IB_QPS_ERR;
-       };
+       }
        return IB_QPS_ERR;
 }
 
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
                return OCRDMA_QPS_SQE;
        case IB_QPS_ERR:
                return OCRDMA_QPS_ERR;
-       };
+       }
        return OCRDMA_QPS_ERR;
 }
 
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
                break;
        default:
                return -EINVAL;
-       };
+       }
 
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
        if (!cmd)
index 56e0049..0ce7674 100644 (file)
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
        case BE_DEV_DOWN:
                ocrdma_close(dev);
                break;
-       };
+       }
 }
 
 static struct ocrdma_driver ocrdma_drv = {
index 6e982bb..69f1d12 100644 (file)
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
                /* Unsupported */
                *ib_speed = IB_SPEED_SDR;
                *ib_width = IB_WIDTH_1X;
-       };
+       }
 }
 
 
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
        default:
                ibwc_status = IB_WC_GENERAL_ERR;
                break;
-       };
+       }
        return ibwc_status;
 }
 
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
                pr_err("%s() invalid opcode received = 0x%x\n",
                       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
                break;
-       };
+       }
 }
 
 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
index 3591855..6df2350 100644 (file)
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 
-       if (device->use_frwr)
+       if (device && device->use_frwr)
                isert_conn_free_frwr_pool(isert_conn);
 
        if (isert_conn->conn_qp) {
index b6a74bc..2a7f0dd 100644 (file)
@@ -1000,7 +1000,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
 
                if (bio->bi_rw & REQ_FLUSH) {
                        /* Also need to send a flush to the backing device */
-                       struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
+                       struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
                                                             dc->disk.bio_split);
 
                        flush->bi_rw    = WRITE_FLUSH;
index 4caa8e6..2d2b1b7 100644 (file)
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
        return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 }
 
+static void skip_metadata(struct pstore *ps)
+{
+       uint32_t stride = ps->exceptions_per_area + 1;
+       chunk_t next_free = ps->next_free;
+       if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
+               ps->next_free++;
+}
+
 /*
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
 
        ps->current_area--;
 
+       skip_metadata(ps);
+
        return 0;
 }
 
@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
                                        struct dm_exception *e)
 {
        struct pstore *ps = get_info(store);
-       uint32_t stride;
-       chunk_t next_free;
        sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 
        /* Is there enough room ? */
@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
         * Move onto the next free pending, making sure to take
         * into account the location of the metadata chunks.
         */
-       stride = (ps->exceptions_per_area + 1);
-       next_free = ++ps->next_free;
-       if (sector_div(next_free, stride) == 1)
-               ps->next_free++;
+       ps->next_free++;
+       skip_metadata(ps);
 
        atomic_inc(&ps->pending_count);
        return 0;
index adf4d7e..561a65f 100644 (file)
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
        u64 *p;
        int lo, hi;
        int rv = 1;
+       unsigned long flags;
 
        if (bb->shift < 0)
                /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
                sectors = next - s;
        }
 
-       write_seqlock_irq(&bb->lock);
+       write_seqlock_irqsave(&bb->lock, flags);
 
        p = bb->page;
        lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
        bb->changed = 1;
        if (!acknowledged)
                bb->unacked_exist = 1;
-       write_sequnlock_irq(&bb->lock);
+       write_sequnlock_irqrestore(&bb->lock, flags);
 
        return rv;
 }
index d60412c..aacf6bf 100644 (file)
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
                        }
                }
                if (rdev
+                   && rdev->recovery_offset == MaxSector
                    && !test_bit(Faulty, &rdev->flags)
                    && !test_and_set_bit(In_sync, &rdev->flags)) {
                        count++;
index df7b0a0..73dc8a3 100644 (file)
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
                        }
                        sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
                } else if (tmp->rdev
+                          && tmp->rdev->recovery_offset == MaxSector
                           && !test_bit(Faulty, &tmp->rdev->flags)
                           && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
                        count++;
index 7ff4f25..f8b9068 100644 (file)
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
+                       /*
+                        * If this is discard request, set bi_vcnt 0. We don't
+                        * want to confuse SCSI because SCSI will replace payload
+                        */
+                       if (rw & REQ_DISCARD)
+                               bi->bi_vcnt = 0;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
+                       /*
+                        * If this is discard request, set bi_vcnt 0. We don't
+                        * want to confuse SCSI because SCSI will replace payload
+                        */
+                       if (rw & REQ_DISCARD)
+                               rbi->bi_vcnt = 0;
                        if (conf->mddev->gendisk)
                                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
                                                      rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                }
                /* now that discard is done we can proceed with any sync */
                clear_bit(STRIPE_DISCARD, &sh->state);
+               /*
+                * SCSI discard will change some bio fields and the stripe has
+                * no updated data, so remove it from hash list and the stripe
+                * will be reinitialized
+                */
+               spin_lock_irq(&conf->device_lock);
+               remove_hash(sh);
+               spin_unlock_irq(&conf->device_lock);
                if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
                        set_bit(STRIPE_HANDLE, &sh->state);
 
index 2521f7e..e79749c 100644 (file)
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
                { 0xd5, 0x03, 0x03 },
        };
 
-       /* firmware status */
-       ret = tda10071_rd_reg(priv, 0x51, &tmp);
-       if (ret)
-               goto error;
-
-       if (!tmp) {
+       if (priv->warm) {
                /* warm state - wake up device from sleep */
-               priv->warm = 1;
 
                for (i = 0; i < ARRAY_SIZE(tab); i++) {
                        ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
                        goto error;
        } else {
                /* cold state - try to download firmware */
-               priv->warm = 0;
 
                /* request the firmware, this will block and timeout */
                ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
index bb0c99d..b06a7e5 100644 (file)
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
 
 static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 170000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+               V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
index 7a57609..7c8d971 100644 (file)
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
 
 static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = ADV7511_MAX_WIDTH,
-               .max_height = ADV7511_MAX_HEIGHT,
-               .min_pixelclock = ADV7511_MIN_PIXELCLOCK,
-               .max_pixelclock = ADV7511_MAX_PIXELCLOCK,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+               ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
        state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
        if (state->i2c_edid == NULL) {
                v4l2_err(sd, "failed to register edid i2c client\n");
+               err = -ENOMEM;
                goto err_entity;
        }
 
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
        state->work_queue = create_singlethread_workqueue(sd->name);
        if (state->work_queue == NULL) {
                v4l2_err(sd, "could not create workqueue\n");
+               err = -ENOMEM;
                goto err_unreg_cec;
        }
 
index d174890..22f729d 100644 (file)
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
 
 static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 170000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 225000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static inline const struct v4l2_dv_timings_cap *
index a58a8f6..d9f65d7 100644 (file)
@@ -46,14 +46,10 @@ struct ths8200_state {
 
 static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1080,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 148500000,
-               .standards = V4L2_DV_BT_STD_CEA861,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
-       },
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+               V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
 };
 
 static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
index e12bbd8..fb60da8 100644 (file)
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
 
        /* stop video capture */
        if (res_check(fh, RESOURCE_VIDEO)) {
+               pm_qos_remove_request(&dev->qos_request);
                videobuf_streamoff(&fh->cap);
                res_free(dev,fh,RESOURCE_VIDEO);
        }
index 15d2396..9b88a46 100644 (file)
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
        jpeg->vfd_decoder->release      = video_device_release;
        jpeg->vfd_decoder->lock         = &jpeg->lock;
        jpeg->vfd_decoder->v4l2_dev     = &jpeg->v4l2_dev;
+       jpeg->vfd_decoder->vfl_dir      = VFL_DIR_M2M;
 
        ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
        if (ret) {
index 7a9c5e9..4f30341 100644 (file)
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
        v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
                              &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
 
-       for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+       for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
                if (vou_fmt[i].pfmt == pix->pixelformat)
                        return 0;
 
index 8f9f621..f975b70 100644 (file)
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
        struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
        struct idmac_video_param *video = &ichan->params.video;
        const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
-       unsigned long flags;
        dma_cookie_t cookie;
        size_t new_size;
 
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
                memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
 #endif
 
-       spin_lock_irqsave(&mx3_cam->lock, flags);
+       spin_lock_irq(&mx3_cam->lock);
        list_add_tail(&buf->queue, &mx3_cam->capture);
 
        if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
        if (mx3_cam->active == buf)
                mx3_cam->active = NULL;
 
-       spin_unlock_irqrestore(&mx3_cam->lock, flags);
+       spin_unlock_irq(&mx3_cam->lock);
 error:
        vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 }
index ad9309d..6c96e48 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 #include "e4000_priv.h"
+#include <linux/math64.h>
 
 /* write multiple registers */
 static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
         * or more.
         */
        f_vco = c->frequency * e4000_pll_lut[i].mul;
-       sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock;
+       sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
        buf[0] = f_vco / priv->cfg->clock;
        buf[1] = (sigma_delta >> 0) & 0xff;
        buf[2] = (sigma_delta >> 8) & 0xff;
index c43c8d3..be77482 100644 (file)
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
                }
        },
+       {
+               .ident = "T12Rg-H",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
+               }
+       },
        {}
 };
 
index 81695d4..c3bb250 100644 (file)
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_MINMAX },
+       /* Microsoft Lifecam NX-3000 */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x045e,
+         .idProduct            = 0x0721,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Microsoft Lifecam VX-7000 */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_DEF },
+       /* Dell SP2008WFP Monitor */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x05a9,
+         .idProduct            = 0x2641,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Dell Alienware X51 */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
index 594c75e..de0e87f 100644 (file)
@@ -353,7 +353,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 
                        if (b->m.planes[plane].bytesused > length)
                                return -EINVAL;
-                       if (b->m.planes[plane].data_offset >=
+
+                       if (b->m.planes[plane].data_offset > 0 &&
+                           b->m.planes[plane].data_offset >=
                            b->m.planes[plane].bytesused)
                                return -EINVAL;
                }
index fd56f25..646f08f 100644 (file)
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
        return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
 }
 
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
+       struct vm_area_struct *vma, unsigned long *res)
+{
+       unsigned long pfn, start_pfn, prev_pfn;
+       unsigned int i;
+       int ret;
+
+       if (!vma_is_io(vma))
+               return -EFAULT;
+
+       ret = follow_pfn(vma, start, &pfn);
+       if (ret)
+               return ret;
+
+       start_pfn = pfn;
+       start += PAGE_SIZE;
+
+       for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
+               prev_pfn = pfn;
+               ret = follow_pfn(vma, start, &pfn);
+
+               if (ret) {
+                       pr_err("no page for address %lu\n", start);
+                       return ret;
+               }
+               if (pfn != prev_pfn + 1)
+                       return -EINVAL;
+       }
+
+       *res = start_pfn;
+       return 0;
+}
+
 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
        int n_pages, struct vm_area_struct *vma, int write)
 {
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
                        unsigned long pfn;
                        int ret = follow_pfn(vma, start, &pfn);
 
+                       if (!pfn_valid(pfn))
+                               return -EINVAL;
+
                        if (ret) {
                                pr_err("no page for address %lu\n", start);
                                return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
        struct vb2_dc_buf *buf = buf_priv;
        struct sg_table *sgt = buf->dma_sgt;
 
-       dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
-       if (!vma_is_io(buf->vma))
-               vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+       if (sgt) {
+               dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+               if (!vma_is_io(buf->vma))
+                       vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
 
-       sg_free_table(sgt);
-       kfree(sgt);
+               sg_free_table(sgt);
+               kfree(sgt);
+       }
        vb2_put_vma(buf->vma);
        kfree(buf);
 }
 
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       /* really, we cannot do anything better at this point */
+       return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        unsigned long size, int write)
 {
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        /* extract page list from userspace mapping */
        ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
        if (ret) {
+               unsigned long pfn;
+               if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+                       buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
+                       buf->size = size;
+                       kfree(pages);
+                       return buf;
+               }
+
                pr_err("failed to get user pages\n");
                goto fail_vma;
        }
index 59ab069..a9830ff 100644 (file)
@@ -349,7 +349,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
 
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
-       return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this);
+       return legacy_set_geometry(this);
 }
 
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
index dd03dfd..c28d4e2 100644 (file)
@@ -1320,7 +1320,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
        for (cs = 0; cs < pdata->num_cs; cs++) {
                struct mtd_info *mtd = info->host[cs]->mtd;
 
-               mtd->name = pdev->name;
+               /*
+                * The mtd name matches the one used in 'mtdparts' kernel
+                * parameter. This name cannot be changed or otherwise
+                * user's mtd partitions configuration would get broken.
+                */
+               mtd->name = "pxa3xx_nand-0";
                info->cs = cs;
                ret = pxa3xx_nand_scan(mtd);
                if (ret) {
index 3b1ff61..693d8ff 100644 (file)
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
 
 static const struct platform_device_id at91_can_id_table[] = {
        {
-               .name = "at91_can",
+               .name = "at91sam9x5_can",
                .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
        }, {
-               .name = "at91sam9x5_can",
+               .name = "at91_can",
                .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
        }, {
                /* sentinel */
index f9cba41..1870c47 100644 (file)
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
        size_t size;
 
        size = nla_total_size(sizeof(u32));   /* IFLA_CAN_STATE */
-       size += sizeof(struct can_ctrlmode);  /* IFLA_CAN_CTRLMODE */
+       size += nla_total_size(sizeof(struct can_ctrlmode));  /* IFLA_CAN_CTRLMODE */
        size += nla_total_size(sizeof(u32));  /* IFLA_CAN_RESTART_MS */
-       size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
-       size += sizeof(struct can_clock);     /* IFLA_CAN_CLOCK */
+       size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+       size += nla_total_size(sizeof(struct can_clock));     /* IFLA_CAN_CLOCK */
        if (priv->do_get_berr_counter)        /* IFLA_CAN_BERR_COUNTER */
-               size += sizeof(struct can_berr_counter);
+               size += nla_total_size(sizeof(struct can_berr_counter));
        if (priv->bittiming_const)            /* IFLA_CAN_BITTIMING_CONST */
-               size += sizeof(struct can_bittiming_const);
+               size += nla_total_size(sizeof(struct can_bittiming_const));
 
        return size;
 }
index 3f21142..8f5ce74 100644 (file)
@@ -62,7 +62,7 @@
 #define FLEXCAN_MCR_BCC                        BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0xf)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x1f)
 #define FLEXCAN_MCR_IDAM_A             (0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (1 << 8)
 #define FLEXCAN_MCR_IDAM_C             (2 << 8)
@@ -735,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
         *
         */
        reg_mcr = flexcan_read(&regs->mcr);
+       reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
        reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
                FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
-               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
+               FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
        netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
        flexcan_write(reg_mcr, &regs->mcr);
 
@@ -771,6 +773,10 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
+       /* Abort any pending TX, mark Mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+                     &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
        /* acceptance mask/acceptance code (accept everything) */
        flexcan_write(0x0, &regs->rxgmask);
        flexcan_write(0x0, &regs->rx14mask);
@@ -979,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
 }
 
 static const struct of_device_id flexcan_of_match[] = {
-       { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
-       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+       { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
index 97b3d32..c5e375d 100644 (file)
@@ -1197,8 +1197,9 @@ union cdu_context {
 /* TM (timers) host DB constants */
 #define TM_ILT_PAGE_SZ_HW      0
 #define TM_ILT_PAGE_SZ         (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM         (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM            1024
+#define TM_CONN_NUM            (BNX2X_FIRST_VF_CID + \
+                                BNX2X_VF_CIDS + \
+                                CNIC_ISCSI_CID_MAX)
 #define TM_ILT_SZ              (8 * TM_CONN_NUM)
 #define TM_ILT_LINES           DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
 
@@ -1527,7 +1528,6 @@ struct bnx2x {
 #define PCI_32BIT_FLAG                 (1 << 1)
 #define ONE_PORT_FLAG                  (1 << 2)
 #define NO_WOL_FLAG                    (1 << 3)
-#define USING_DAC_FLAG                 (1 << 4)
 #define USING_MSIX_FLAG                        (1 << 5)
 #define USING_MSI_FLAG                 (1 << 6)
 #define DISABLE_MSI_FLAG               (1 << 7)
@@ -1621,7 +1621,7 @@ struct bnx2x {
        u16                     rx_ticks_int;
        u16                     rx_ticks;
 /* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT                (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT                (0xff*BNX2X_BTR)
 
        u32                     lin_cnt;
 
@@ -2072,7 +2072,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 
 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
                               u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+                              u32 *comp);
 
 /* FLR related routines */
 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2498,4 +2499,8 @@ enum bnx2x_pci_bus_speed {
 };
 
 void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+       (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
 #endif /* bnx2x.h */
index e66beff..4ab4c89 100644 (file)
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
        }
 #endif
+       skb_record_rx_queue(skb, fp->rx_queue);
        napi_gro_receive(&fp->napi, skb);
 }
 
index 324de5f..e8efa1c 100644 (file)
@@ -891,17 +891,8 @@ static void bnx2x_get_regs(struct net_device *dev,
         * will re-enable parity attentions right after the dump.
         */
 
-       /* Disable parity on path 0 */
-       bnx2x_pretend_func(bp, 0);
        bnx2x_disable_blocks_parity(bp);
 
-       /* Disable parity on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_disable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
        dump_hdr.preset = DUMP_ALL_PRESETS;
        dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +919,9 @@ static void bnx2x_get_regs(struct net_device *dev,
        /* Actually read the registers */
        __bnx2x_get_regs(bp, p);
 
-       /* Re-enable parity attentions on path 0 */
-       bnx2x_pretend_func(bp, 0);
+       /* Re-enable parity attentions */
        bnx2x_clear_blocks_parity(bp);
        bnx2x_enable_blocks_parity(bp);
-
-       /* Re-enable parity attentions on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_clear_blocks_parity(bp);
-       bnx2x_enable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 }
 
 static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +975,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
         * will re-enable parity attentions right after the dump.
         */
 
-       /* Disable parity on path 0 */
-       bnx2x_pretend_func(bp, 0);
        bnx2x_disable_blocks_parity(bp);
 
-       /* Disable parity on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_disable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
        dump_hdr.preset = bp->dump_preset_idx;
        dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1005,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
        /* Actually read the registers */
        __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
 
-       /* Re-enable parity attentions on path 0 */
-       bnx2x_pretend_func(bp, 0);
+       /* Re-enable parity attentions */
        bnx2x_clear_blocks_parity(bp);
        bnx2x_enable_blocks_parity(bp);
 
-       /* Re-enable parity attentions on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_clear_blocks_parity(bp);
-       bnx2x_enable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        return 0;
 }
 
index 76df015..c2dfea7 100644 (file)
@@ -640,23 +640,35 @@ static const struct {
  * [30] MCP Latched ump_tx_parity
  * [31] MCP Latched scpad_parity
  */
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS  \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS      \
        (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
         AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
-        AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+        AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS  \
+       (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
         AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 
 /* Below registers control the MCP parity attention output. When
  * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
  * enabled, when cleared - disabled.
  */
-static const u32 mcp_attn_ctl_regs[] = {
-       MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
-       MISC_REG_AEU_ENABLE4_NIG_0,
-       MISC_REG_AEU_ENABLE4_PXP_0,
-       MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
-       MISC_REG_AEU_ENABLE4_NIG_1,
-       MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+       u32 addr;
+       u32 bits;
+} mcp_attn_ctl_regs[] = {
+       { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+               MISC_AEU_ENABLE_MCP_PRTY_BITS },
+       { MISC_REG_AEU_ENABLE4_NIG_0,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_PXP_0,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+               MISC_AEU_ENABLE_MCP_PRTY_BITS },
+       { MISC_REG_AEU_ENABLE4_NIG_1,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_PXP_1,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
 };
 
 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
        u32 reg_val;
 
        for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
-               reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+               reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
 
                if (enable)
-                       reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+                       reg_val |= mcp_attn_ctl_regs[i].bits;
                else
-                       reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+                       reg_val &= ~mcp_attn_ctl_regs[i].bits;
 
-               REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+               REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
        }
 }
 
index 82b658d..b42f89c 100644 (file)
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 }
 
 /* issue a dmae command over the init-channel and wait for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+                              u32 *comp)
 {
-       u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
        int rc = 0;
 
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
        spin_lock_bh(&bp->dmae_lock);
 
        /* reset completion */
-       *wb_comp = 0;
+       *comp = 0;
 
        /* post the command on the channel used for initializations */
        bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 
        /* wait for completion */
        udelay(5);
-       while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+       while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 
                if (!cnt ||
                    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
                cnt--;
                udelay(50);
        }
-       if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+       if (*comp & DMAE_PCI_ERR_FLAG) {
                BNX2X_ERR("DMAE PCI error!\n");
                rc = DMAE_PCI_ERROR;
        }
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
        return rc;
 }
 
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp)        \
+       (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 {
        u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
                trace_shmem_base = bp->common.shmem_base;
        else
                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
-       addr = trace_shmem_base - 0x800;
+
+       /* sanity */
+       if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+           trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+                               SCRATCH_BUFFER_SIZE(bp)) {
+               BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+                         trace_shmem_base);
+               return;
+       }
+
+       addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
 
        /* validate TRCB signature */
        mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
        /* read cyclic buffer pointer */
        addr += 4;
        mark = REG_RD(bp, addr);
-       mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
-                       + ((mark + 0x3) & ~0x3) - 0x08000000;
+       mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+       if (mark >= trace_shmem_base || mark < addr + 4) {
+               BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+               return;
+       }
        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 
        printk("%s", lvl);
 
        /* dump buffer after the mark */
-       for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+       for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, offset + 4*word));
                data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
        pr_cont("%s%s", idx ? ", " : "", blk);
 }
 
-static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "BRB");
+                       res |= true; /* Each bit is real error! */
+
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "BRB");
                                        _print_parity(bp,
                                                      BRB1_REG_BRB1_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PARSER");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PARSER");
                                        _print_parity(bp, PRS_REG_PRS_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TSDM");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "TSDM");
                                        _print_parity(bp,
                                                      TSDM_REG_TSDM_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++,
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
                                                          "SEARCHER");
                                        _print_parity(bp, SRC_REG_SRC_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TCM");
-                                       _print_parity(bp,
-                                                     TCM_REG_TCM_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TSEMI");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "TCM");
+                                       _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "TSEMI");
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "XPB");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "XPB");
                                        _print_parity(bp, GRCBASE_XPB +
                                                          PB_REG_PB_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
 
                        /* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool *global,
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool *global,
                                            bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
+                       res |= true; /* Each bit is real error! */
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "PBF");
+                                       _print_next_block((*par_num)++, "PBF");
                                        _print_parity(bp, PBF_REG_PBF_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "QM");
+                                       _print_next_block((*par_num)++, "QM");
                                        _print_parity(bp, QM_REG_QM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "TM");
+                                       _print_next_block((*par_num)++, "TM");
                                        _print_parity(bp, TM_REG_TM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XSDM");
+                                       _print_next_block((*par_num)++, "XSDM");
                                        _print_parity(bp,
                                                      XSDM_REG_XSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XCM");
+                                       _print_next_block((*par_num)++, "XCM");
                                        _print_parity(bp, XCM_REG_XCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XSEMI");
+                                       _print_next_block((*par_num)++,
+                                                         "XSEMI");
                                        _print_parity(bp,
                                                      XSEM_REG_XSEM_PRTY_STS_0);
                                        _print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "DOORBELLQ");
                                        _print_parity(bp,
                                                      DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "NIG");
+                                       _print_next_block((*par_num)++, "NIG");
                                        if (CHIP_IS_E1x(bp)) {
                                                _print_parity(bp,
                                                        NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "VAUX PCI CORE");
                                *global = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "DEBUG");
+                                       _print_next_block((*par_num)++,
+                                                         "DEBUG");
                                        _print_parity(bp, DBG_REG_DBG_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "USDM");
+                                       _print_next_block((*par_num)++, "USDM");
                                        _print_parity(bp,
                                                      USDM_REG_USDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "UCM");
+                                       _print_next_block((*par_num)++, "UCM");
                                        _print_parity(bp, UCM_REG_UCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "USEMI");
+                                       _print_next_block((*par_num)++,
+                                                         "USEMI");
                                        _print_parity(bp,
                                                      USEM_REG_USEM_PRTY_STS_0);
                                        _print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "UPB");
+                                       _print_next_block((*par_num)++, "UPB");
                                        _print_parity(bp, GRCBASE_UPB +
                                                          PB_REG_PB_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "CSDM");
+                                       _print_next_block((*par_num)++, "CSDM");
                                        _print_parity(bp,
                                                      CSDM_REG_CSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "CCM");
+                                       _print_next_block((*par_num)++, "CCM");
                                        _print_parity(bp, CCM_REG_CCM_PRTY_STS);
                                }
                                break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CSEMI");
+                       res |= true; /* Each bit is real error! */
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "CSEMI");
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PXP");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "PXP");
                                        _print_parity(bp, PXP_REG_PXP_PRTY_STS);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_0);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
-                               if (print)
-                                       _print_next_block(par_num++,
-                                       "PXPPCICLOCKCLIENT");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CFC");
+                                       break;
+                               case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PXPPCICLOCKCLIENT");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "CFC");
                                        _print_parity(bp,
                                                      CFC_REG_CFC_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CDU");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "CDU");
                                        _print_parity(bp, CDU_REG_CDU_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "DMAE");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "DMAE");
                                        _print_parity(bp,
                                                      DMAE_REG_DMAE_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "IGU");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "IGU");
                                        if (CHIP_IS_E1x(bp))
                                                _print_parity(bp,
                                                        HC_REG_HC_PRTY_STS);
                                        else
                                                _print_parity(bp,
                                                        IGU_REG_IGU_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "MISC");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "MISC");
                                        _print_parity(bp,
                                                      MISC_REG_MISC_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
 
                        /* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
-                                          bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool *global,
+                                           bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       bool res = false;
+       u32 cur_bit;
+       int i;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
                                if (print)
-                                       _print_next_block(par_num++, "MCP ROM");
+                                       _print_next_block((*par_num)++,
+                                                         "MCP ROM");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP UMP RX");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP UMP TX");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP SCPAD");
-                               *global = true;
+                               /* clear latched SCPAD PATIRY from MCP */
+                               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+                                      1UL << 10);
                                break;
                        }
 
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PGLUE_B");
+                       res |= true; /* Each bit is real error! */
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PGLUE_B");
                                        _print_parity(bp,
-                                               PGLUE_B_REG_PGLUE_B_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "ATC");
+                                                     PGLUE_B_REG_PGLUE_B_PRTY_STS);
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "ATC");
                                        _print_parity(bp,
                                                      ATC_REG_ATC_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
-
                        /* Clear the bit */
                        sig &= ~cur_bit;
                }
        }
 
-       return par_num;
+       return res;
 }
 
 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                              u32 *sig)
 {
+       bool res = false;
+
        if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
            (sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                if (print)
                        netdev_err(bp->dev,
                                   "Parity errors detected in blocks: ");
-               par_num = bnx2x_check_blocks_with_parity0(bp,
-                       sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity1(bp,
-                       sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity2(bp,
-                       sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity3(
-                       sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity4(bp,
-                       sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+               res |= bnx2x_check_blocks_with_parity0(bp,
+                       sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+               res |= bnx2x_check_blocks_with_parity1(bp,
+                       sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+               res |= bnx2x_check_blocks_with_parity2(bp,
+                       sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+               res |= bnx2x_check_blocks_with_parity3(bp,
+                       sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+               res |= bnx2x_check_blocks_with_parity4(bp,
+                       sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
 
                if (print)
                        pr_cont("\n");
+       }
 
-               return true;
-       } else
-               return false;
+       return res;
 }
 
 /**
@@ -7126,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        int port = BP_PORT(bp);
        int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
        u32 low, high;
-       u32 val;
+       u32 val, reg;
 
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
@@ -7271,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        val |= CHIP_IS_E1(bp) ? 0 : 0x10;
        REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
 
+       /* SCPAD_PARITY should NOT trigger close the gates */
+       reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+       REG_WR(bp, reg,
+              REG_RD(bp, reg) &
+              ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+       reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+       REG_WR(bp, reg,
+              REG_RD(bp, reg) &
+              ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
        bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 
        if (!CHIP_IS_E1x(bp)) {
@@ -11685,9 +11722,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 static int bnx2x_open(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       bool global = false;
-       int other_engine = BP_PATH(bp) ? 0 : 1;
-       bool other_load_status, load_status;
        int rc;
 
        bp->stats_init = true;
@@ -11703,6 +11737,10 @@ static int bnx2x_open(struct net_device *dev)
         * Parity recovery is only relevant for PF driver.
         */
        if (IS_PF(bp)) {
+               int other_engine = BP_PATH(bp) ? 0 : 1;
+               bool other_load_status, load_status;
+               bool global = false;
+
                other_load_status = bnx2x_get_load_status(bp, other_engine);
                load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
                if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -12080,7 +12118,6 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
        struct device *dev = &bp->pdev->dev;
 
        if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
-               bp->flags |= USING_DAC_FLAG;
                if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
                        dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
                        return -EIO;
@@ -12248,8 +12285,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
-       if (bp->flags & USING_DAC_FLAG)
-               dev->features |= NETIF_F_HIGHDMA;
+       dev->features |= NETIF_F_HIGHDMA;
 
        /* Add Loopback capability to the device */
        dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12612,24 +12648,24 @@ static int set_max_cos_est(int chip_id)
                return BNX2X_MULTI_TX_COS_E1X;
        case BCM57712:
        case BCM57712_MF:
-       case BCM57712_VF:
                return BNX2X_MULTI_TX_COS_E2_E3A0;
        case BCM57800:
        case BCM57800_MF:
-       case BCM57800_VF:
        case BCM57810:
        case BCM57810_MF:
        case BCM57840_4_10:
        case BCM57840_2_20:
        case BCM57840_O:
        case BCM57840_MFO:
-       case BCM57810_VF:
        case BCM57840_MF:
-       case BCM57840_VF:
        case BCM57811:
        case BCM57811_MF:
-       case BCM57811_VF:
                return BNX2X_MULTI_TX_COS_E3B0;
+       case BCM57712_VF:
+       case BCM57800_VF:
+       case BCM57810_VF:
+       case BCM57840_VF:
+       case BCM57811_VF:
                return 1;
        default:
                pr_err("Unknown board_type (%d), aborting\n", chip_id);
index 9ad012b..bf08ad6 100644 (file)
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
                                 bnx2x_vfop_qdtor, cmd->done);
                return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
                                             cmd->block);
+       } else {
+               BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
+               return -ENOMEM;
        }
-       DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
-          vf->abs_vfid, vfop->rc);
-       return -ENOMEM;
 }
 
 static void
@@ -3390,14 +3390,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete eth macs\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* remove existing uc list macs */
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete uc_list macs\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* configure the new mac to device */
@@ -3405,6 +3407,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
                                  BNX2X_ETH_MAC, &ramrod_flags);
 
+out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
@@ -3467,7 +3470,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                                          &ramrod_flags);
                if (rc) {
                        BNX2X_ERR("failed to delete vlans\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* send queue update ramrod to configure default vlan and silent
@@ -3501,7 +3505,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
                        if (rc) {
                                BNX2X_ERR("failed to configure vlan\n");
-                               return -EINVAL;
+                               rc =  -EINVAL;
+                               goto out;
                        }
 
                        /* configure default vlan to vf queue and set silent
@@ -3519,18 +3524,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                rc = bnx2x_queue_state_change(bp, &q_params);
                if (rc) {
                        BNX2X_ERR("Failed to configure default VLAN\n");
-                       return rc;
+                       goto out;
                }
 
                /* clear the flag indicating that this VF needs its vlan
-                * (will only be set if the HV configured th Vlan before vf was
-                * and we were called because the VF came up later
+                * (will only be set if the HV configured the Vlan before vf was
+                * up and we were called because the VF came up later
                 */
+out:
                vf->cfg_flags &= ~VF_CFG_VLAN;
-
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
        }
-       return 0;
+       return rc;
 }
 
 /* crc is the first field in the bulletin board. Compute the crc over the
index 86436c7..3b75070 100644 (file)
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
 
        } else if (bp->func_stx) {
                *stats_comp = 0;
-               bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+               bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
        }
 }
 
index da16953..28757df 100644 (file)
@@ -980,7 +980,7 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       return bnx2x_issue_dmae_with_comp(bp, &dmae);
+       return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 }
 
 static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
index 78d6d6b..48f5288 100644 (file)
 #define XGMAC_DMA_HW_FEATURE   0x00000f58      /* Enabled Hardware Features */
 
 #define XGMAC_ADDR_AE          0x80000000
-#define XGMAC_MAX_FILTER_ADDR  31
 
 /* PMT Control and Status */
 #define XGMAC_PMT_POINTER_RESET        0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
        struct device *device;
        struct napi_struct napi;
 
+       int max_macs;
        struct xgmac_extra_stats xstats;
 
        spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
        netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
                 netdev_mc_count(dev), netdev_uc_count(dev));
 
-       if (dev->flags & IFF_PROMISC) {
-               writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
-               return;
-       }
+       if (dev->flags & IFF_PROMISC)
+               value |= XGMAC_FRAME_FILTER_PR;
 
        memset(hash_filter, 0, sizeof(hash_filter));
 
-       if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+       if (netdev_uc_count(dev) > priv->max_macs) {
                use_hash = true;
                value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
        }
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
                goto out;
        }
 
-       if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+       if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
                use_hash = true;
                value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
        } else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
        }
 
 out:
-       for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
-               xgmac_set_mac_addr(ioaddr, NULL, reg);
+       for (i = reg; i <= priv->max_macs; i++)
+               xgmac_set_mac_addr(ioaddr, NULL, i);
        for (i = 0; i < XGMAC_NUM_HASH; i++)
                writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
 
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
        uid = readl(priv->base + XGMAC_VERSION);
        netdev_info(ndev, "h/w version is 0x%x\n", uid);
 
+       /* Figure out how many valid mac address filter registers we have */
+       writel(1, priv->base + XGMAC_ADDR_HIGH(31));
+       if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
+               priv->max_macs = 31;
+       else
+               priv->max_macs = 7;
+
        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
        ndev->irq = platform_get_irq(pdev, 0);
        if (ndev->irq == -ENXIO) {
index 5f5896e..a7a941b 100644 (file)
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
 
 /* DM9000 network board routine ---------------------------- */
 
-static void
-dm9000_reset(board_info_t * db)
-{
-       dev_dbg(db->dev, "resetting device\n");
-
-       /* RESET device */
-       writeb(DM9000_NCR, db->io_addr);
-       udelay(200);
-       writeb(NCR_RST, db->io_data);
-       udelay(200);
-}
-
 /*
  *   Read a byte from I/O port
  */
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
        writeb(value, db->io_data);
 }
 
+static void
+dm9000_reset(board_info_t *db)
+{
+       dev_dbg(db->dev, "resetting device\n");
+
+       /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
+        * The essential point is that we have to do a double reset, and the
+        * instruction is to set LBK into MAC internal loopback mode.
+        */
+       iow(db, DM9000_NCR, 0x03);
+       udelay(100); /* Application note says at least 20 us */
+       if (ior(db, DM9000_NCR) & 1)
+               dev_err(db->dev, "dm9000 did not respond to first reset\n");
+
+       iow(db, DM9000_NCR, 0);
+       iow(db, DM9000_NCR, 0x03);
+       udelay(100);
+       if (ior(db, DM9000_NCR) & 1)
+               dev_err(db->dev, "dm9000 did not respond to second reset\n");
+}
+
 /* routines for sending block to chip */
 
 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
 static void dm9000_show_carrier(board_info_t *db,
                                unsigned carrier, unsigned nsr)
 {
+       int lpa;
        struct net_device *ndev = db->ndev;
+       struct mii_if_info *mii = &db->mii;
        unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
 
-       if (carrier)
-               dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
+       if (carrier) {
+               lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+               dev_info(db->dev,
+                        "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
                         ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
-                        (ncr & NCR_FDX) ? "full" : "half");
-       else
+                        (ncr & NCR_FDX) ? "full" : "half", lpa);
+       } else {
                dev_info(db->dev, "%s: link down\n", ndev->name);
+       }
 }
 
 static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
                        (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
 
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
+       iow(db, DM9000_GPR, 0);
 
-       dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
-       dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+       /* If we are dealing with DM9000B, some extra steps are required: a
+        * manual phy reset, and setting init params.
+        */
+       if (db->type == TYPE_DM9000B) {
+               dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
+               dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
+       }
 
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
index bd0e0c0..c08fd32 100644 (file)
@@ -1198,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
-               req->if_id = cpu_to_le16(adapter->if_handle);
        } else if (BEx_chip(adapter)) {
                if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
                        req->hdr.version = 2;
@@ -1206,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
                req->hdr.version = 2;
        }
 
+       if (req->hdr.version > 0)
+               req->if_id = cpu_to_le16(adapter->if_handle);
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
index c4eaade..9fbe4dd 100644 (file)
@@ -88,6 +88,7 @@
 
 #include <asm/io.h>
 #include <asm/reg.h>
+#include <asm/mpc85xx.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 #include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
        }
 }
 
-static void gfar_detect_errata(struct gfar_private *priv)
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
 {
-       struct device *dev = &priv->ofdev->dev;
        unsigned int pvr = mfspr(SPRN_PVR);
        unsigned int svr = mfspr(SPRN_SVR);
        unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
                priv->errata |= GFAR_ERRATA_76;
 
-       /* MPC8313 and MPC837x all rev */
-       if ((pvr == 0x80850010 && mod == 0x80b0) ||
-           (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
-               priv->errata |= GFAR_ERRATA_A002;
+       /* MPC8313 Rev < 2.0 */
+       if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+               priv->errata |= GFAR_ERRATA_12;
+}
 
-       /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
-       if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
-           (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+       unsigned int svr = mfspr(SPRN_SVR);
+
+       if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
                priv->errata |= GFAR_ERRATA_12;
+       if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+               priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+       struct device *dev = &priv->ofdev->dev;
+
+       /* no plans to fix */
+       priv->errata |= GFAR_ERRATA_A002;
+
+       if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+               __gfar_detect_errata_85xx(priv);
+       else /* non-mpc85xx parts, i.e. e300 core based */
+               __gfar_detect_errata_83xx(priv);
 
        if (priv->errata)
                dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
        /* Normaly TSEC should not hang on GRS commands, so we should
         * actually wait for IEVENT_GRSC flag.
         */
-       if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+       if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
                return 0;
 
        /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
index 86d5142..151e00c 100644 (file)
@@ -2655,6 +2655,8 @@ static int igb_set_eee(struct net_device *netdev,
            (hw->phy.media_type != e1000_media_type_copper))
                return -EOPNOTSUPP;
 
+       memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
        ret_val = igb_get_eee(netdev, &eee_curr);
        if (ret_val)
                return ret_val;
index 7fb5677..2c210ec 100644 (file)
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
        p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
        p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
        spin_unlock_bh(&mp->mib_counters_lock);
-
-       mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 static void mib_counters_timer_wrapper(unsigned long _mp)
 {
        struct mv643xx_eth_private *mp = (void *)_mp;
-
        mib_counters_update(mp);
+       mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
                mp->int_mask |= INT_TX_END_0 << i;
        }
 
+       add_timer(&mp->mib_counters_timer);
        port_start(mp);
 
        wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
        if (!ppdev)
                return -ENOMEM;
        ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ppdev->dev.of_node = pnp;
 
        ret = platform_device_add_resources(ppdev, &res, 1);
        if (ret)
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        mp->mib_counters_timer.data = (unsigned long)mp;
        mp->mib_counters_timer.function = mib_counters_timer_wrapper;
        mp->mib_counters_timer.expires = jiffies + 30 * HZ;
-       add_timer(&mp->mib_counters_timer);
 
        spin_lock_init(&mp->mib_counters_lock);
 
index dec455c..afe2efa 100644 (file)
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                put_page(page);
                return -ENOMEM;
        }
-       page_alloc->size = PAGE_SIZE << order;
+       page_alloc->page_size = PAGE_SIZE << order;
        page_alloc->page = page;
        page_alloc->dma = dma;
-       page_alloc->offset = frag_info->frag_align;
+       page_alloc->page_offset = frag_info->frag_align;
        /* Not doing get_page() for each frag is a big win
         * on asymetric workloads.
         */
-       atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+       atomic_set(&page->_count,
+                  page_alloc->page_size / frag_info->frag_stride);
        return 0;
 }
 
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
        for (i = 0; i < priv->num_frags; i++) {
                frag_info = &priv->frag_info[i];
                page_alloc[i] = ring_alloc[i];
-               page_alloc[i].offset += frag_info->frag_stride;
-               if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+               page_alloc[i].page_offset += frag_info->frag_stride;
+
+               if (page_alloc[i].page_offset + frag_info->frag_stride <=
+                   ring_alloc[i].page_size)
                        continue;
+
                if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
                        goto out;
        }
 
        for (i = 0; i < priv->num_frags; i++) {
                frags[i] = ring_alloc[i];
-               dma = ring_alloc[i].dma + ring_alloc[i].offset;
+               dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
                ring_alloc[i] = page_alloc[i];
                rx_desc->data[i].addr = cpu_to_be64(dma);
        }
@@ -117,7 +121,7 @@ out:
                frag_info = &priv->frag_info[i];
                if (page_alloc[i].page != ring_alloc[i].page) {
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
-                               page_alloc[i].size, PCI_DMA_FROMDEVICE);
+                               page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
                        page = page_alloc[i].page;
                        atomic_set(&page->_count, 1);
                        put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
                              int i)
 {
        const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+       u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
 
-       if (frags[i].offset + frag_info->frag_stride > frags[i].size)
-               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
-                                        PCI_DMA_FROMDEVICE);
+       if (next_frag_end > frags[i].page_size)
+               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+                              PCI_DMA_FROMDEVICE);
 
        if (frags[i].page)
                put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
 
                page_alloc = &ring->page_alloc[i];
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                              page_alloc->size, PCI_DMA_FROMDEVICE);
+                              page_alloc->page_size, PCI_DMA_FROMDEVICE);
                page = page_alloc->page;
                atomic_set(&page->_count, 1);
                put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
                       i, page_count(page_alloc->page));
 
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                               page_alloc->size, PCI_DMA_FROMDEVICE);
-               while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
+               while (page_alloc->page_offset + frag_info->frag_stride <
+                      page_alloc->page_size) {
                        put_page(page_alloc->page);
-                       page_alloc->offset += frag_info->frag_stride;
+                       page_alloc->page_offset += frag_info->frag_stride;
                }
                page_alloc->page = NULL;
        }
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                /* Save page reference in skb */
                __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
                skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
-               skb_frags_rx[nr].page_offset = frags[nr].offset;
+               skb_frags_rx[nr].page_offset = frags[nr].page_offset;
                skb->truesize += frag_info->frag_stride;
                frags[nr].page = NULL;
        }
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 
        /* Get pointer to first fragment so we could copy the headers into the
         * (linear part of the) skb */
-       va = page_address(frags[0].page) + frags[0].offset;
+       va = page_address(frags[0].page) + frags[0].page_offset;
 
        if (length <= SMALL_PACKET_SIZE) {
                /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
                                                DMA_FROM_DEVICE);
                        ethh = (struct ethhdr *)(page_address(frags[0].page) +
-                                                frags[0].offset);
+                                                frags[0].page_offset);
 
                        if (is_multicast_ether_addr(ethh->h_dest)) {
                                struct mlx4_mac_entry *entry;
index 5e0aa56..bf06e36 100644 (file)
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
 struct mlx4_en_rx_alloc {
        struct page     *page;
        dma_addr_t      dma;
-       u32             offset;
-       u32             size;
+       u32             page_offset;
+       u32             page_size;
 };
 
 struct mlx4_en_tx_ring {
index 5472cbd..6ca3073 100644 (file)
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
        return 0;
 }
 
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+                          int csum)
 {
        block->token = token;
-       block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
-       block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+       if (csum) {
+               block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+                                           sizeof(block->data) - 2);
+               block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+       }
 }
 
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
 {
        struct mlx5_cmd_mailbox *next = msg->next;
 
        while (next) {
-               calc_block_sig(next->buf, token);
+               calc_block_sig(next->buf, token, csum);
                next = next->next;
        }
 }
 
-static void set_signature(struct mlx5_cmd_work_ent *ent)
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 {
        ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
-       calc_chain_sig(ent->in, ent->token);
-       calc_chain_sig(ent->out, ent->token);
+       calc_chain_sig(ent->in, ent->token, csum);
+       calc_chain_sig(ent->out, ent->token, csum);
 }
 
 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
        lay->type = MLX5_PCI_CMD_XPORT;
        lay->token = ent->token;
        lay->status_own = CMD_OWNER_HW;
-       if (!cmd->checksum_disabled)
-               set_signature(ent);
+       set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ktime_get_ts(&ent->ts1);
 
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
 
                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
                block = next->buf;
-               if (xor8_buf(block, sizeof(*block)) != 0xff)
-                       return -EINVAL;
 
                memcpy(to, block->data, copy);
                to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                goto err_map;
        }
 
+       cmd->checksum_disabled = 1;
        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
        cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
 
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
        case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
        case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
-       case MLX5_CMD_STAT_LIM_ERR:                     return -EINVAL;
+       case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
        case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
        case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
index 443cc4d..2231d93 100644 (file)
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                goto err_in;
        }
 
+       snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+                name, pci_name(dev->pdev));
        eq->eqn = out.eq_number;
        err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
-                         name, eq);
+                         eq->name, eq);
        if (err)
                goto err_eq;
 
index b47739b..bc0f5fb 100644 (file)
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
        struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
        struct mlx5_cmd_set_hca_cap_mbox_out set_out;
-       struct mlx5_profile *prof = dev->profile;
        u64 flags;
-       int csum = 1;
        int err;
 
        memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
               sizeof(set_ctx->hca_cap));
 
-       if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
-               csum = !!prof->cmdif_csum;
-               flags = be64_to_cpu(set_ctx->hca_cap.flags);
-               if (csum)
-                       flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-               else
-                       flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-
-               set_ctx->hca_cap.flags = cpu_to_be64(flags);
-       }
-
        if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
                set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
 
+       flags = be64_to_cpu(query_out->hca_cap.flags);
+       /* disable checksum */
+       flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+
+       set_ctx->hca_cap.flags = cpu_to_be64(flags);
        memset(&set_out, 0, sizeof(set_out));
        set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
        set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        if (err)
                goto query_ex;
 
-       if (!csum)
-               dev->cmd.checksum_disabled = 1;
-
 query_ex:
        kfree(query_out);
        kfree(set_ctx);
index 3a2408d..7b12acf 100644 (file)
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
        __be64                  pas[0];
 };
 
+enum {
+       MAX_RECLAIM_TIME_MSECS  = 5000,
+};
+
 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
 {
        struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        int err;
        int i;
 
+       if (nclaimed)
+               *nclaimed = 0;
+
        memset(&in, 0, sizeof(in));
        outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
        out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
 
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
 {
-       unsigned long end = jiffies + msecs_to_jiffies(5000);
+       unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
        struct fw_page *fwp;
        struct rb_node *p;
+       int nclaimed = 0;
        int err;
 
        do {
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
+                       err = reclaim_pages(dev, fwp->func_id,
+                                           optimal_reclaimed_pages(),
+                                           &nclaimed);
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
                                return err;
                        }
+                       if (nclaimed)
+                               end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
                }
                if (time_after(jiffies, end)) {
                        mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
index bd1a2d2..ea54d95 100644 (file)
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
        irq = irq_of_parse_and_map(node, 0);
        if (irq <= 0) {
                netdev_err(ndev, "irq_of_parse_and_map failed\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto irq_map_fail;
        }
 
        priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
        priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
                                                GFP_DMA | GFP_KERNEL);
-       if (priv->tx_desc_base == NULL)
+       if (priv->tx_desc_base == NULL) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
                                                RX_DESC_NUM, &priv->rx_base,
                                                GFP_DMA | GFP_KERNEL);
-       if (priv->rx_desc_base == NULL)
+       if (priv->rx_desc_base == NULL) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
                                    GFP_ATOMIC);
-       if (!priv->tx_buf_base)
+       if (!priv->tx_buf_base) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
                                    GFP_ATOMIC);
-       if (!priv->rx_buf_base)
+       if (!priv->rx_buf_base) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        platform_set_drvdata(pdev, ndev);
 
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
 init_fail:
        netdev_err(ndev, "init failed\n");
        moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+       free_netdev(ndev);
        return ret;
 }
 
index ebe4c86..ff83a9f 100644 (file)
@@ -665,7 +665,7 @@ static int qlcnic_set_channels(struct net_device *dev,
                        return err;
        }
 
-       if (channel->tx_count) {
+       if (qlcnic_82xx_check(adapter) && channel->tx_count) {
                err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
                if (err)
                        return err;
index 21d00a0..9e61eb8 100644 (file)
@@ -2257,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = qlcnic_alloc_adapter_resources(adapter);
        if (err)
-               goto err_out_free_netdev;
+               goto err_out_free_wq;
 
        adapter->dev_rst_time = jiffies;
        adapter->ahw->revision_id = pdev->revision;
@@ -2396,6 +2396,9 @@ err_out_disable_msi:
 err_out_free_hw:
        qlcnic_free_adapter_resources(adapter);
 
+err_out_free_wq:
+       destroy_workqueue(adapter->qlcnic_wq);
+
 err_out_free_netdev:
        free_netdev(netdev);
 
@@ -3648,11 +3651,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
        u8 max_hw = QLCNIC_MAX_TX_RINGS;
        u32 max_allowed;
 
-       if (!qlcnic_82xx_check(adapter)) {
-               netdev_err(netdev, "No Multi TX-Q support\n");
-               return -EINVAL;
-       }
-
        if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
                return -EINVAL;
@@ -3692,8 +3690,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
        u8 max_hw = adapter->ahw->max_rx_ques;
        u32 max_allowed;
 
-       if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
-           !qlcnic_use_msi) {
+       if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_err(netdev, "No RSS support in INT-x mode\n");
                return -EINVAL;
        }
index 5cd831e..b57c278 100644 (file)
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
                          EESR_TDE | EESR_ECI,
+       .fdr_value      = 0x0000070f,
+       .rmcr_value     = 0x00000001,
 
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
        .bculr          = 1,
        .hw_swap        = 1,
+       .rpadir         = 1,
+       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .tsu            = 1,
index 9f18ae9..21f9ad6 100644 (file)
@@ -444,6 +444,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
        EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+       EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+       EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+       EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+       EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+       EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+       EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+       EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+       EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+       EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+       EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+       EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+       EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |          \
@@ -498,44 +510,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
                                  (1ULL << EF10_STAT_rx_length_error))
 
-#if BITS_PER_LONG == 64
-#define STAT_MASK_BITMAP(bits) (bits)
-#else
-#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
-#endif
-
-static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
-{
-       static const unsigned long hunt_40g_stat_mask[] = {
-               STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
-                                HUNT_40G_EXTRA_STAT_MASK)
-       };
-       static const unsigned long hunt_10g_only_stat_mask[] = {
-               STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
-                                HUNT_10G_ONLY_STAT_MASK)
-       };
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK (                                   \
+       (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |                   \
+       (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |                 \
+       (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |                    \
+       (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |                  \
+       (1ULL << EF10_STAT_rx_pm_trunc_qbb) |                           \
+       (1ULL << EF10_STAT_rx_pm_discard_qbb) |                         \
+       (1ULL << EF10_STAT_rx_pm_discard_mapping) |                     \
+       (1ULL << EF10_STAT_rx_dp_q_disabled_packets) |                  \
+       (1ULL << EF10_STAT_rx_dp_di_dropped_packets) |                  \
+       (1ULL << EF10_STAT_rx_dp_streaming_packets) |                   \
+       (1ULL << EF10_STAT_rx_dp_emerg_fetch) |                         \
+       (1ULL << EF10_STAT_rx_dp_emerg_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+       u64 raw_mask = HUNT_COMMON_STAT_MASK;
        u32 port_caps = efx_mcdi_phy_get_caps(efx);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
        if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-               return hunt_40g_stat_mask;
+               raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
        else
-               return hunt_10g_only_stat_mask;
+               raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+       if (nic_data->datapath_caps &
+           (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+               raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+       return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+       u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+       mask[0] = raw_mask;
+#else
+       mask[0] = raw_mask & 0xffffffff;
+       mask[1] = raw_mask >> 32;
+#endif
 }
 
 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
 {
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+       efx_ef10_get_stat_mask(efx, mask);
        return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
-                                     efx_ef10_stat_mask(efx), names);
+                                     mask, names);
 }
 
 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        __le64 generation_start, generation_end;
        u64 *stats = nic_data->stats;
        __le64 *dma_stats;
 
+       efx_ef10_get_stat_mask(efx, mask);
+
        dma_stats = efx->stats_buffer.addr;
        nic_data = efx->nic_data;
 
@@ -543,8 +583,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
-       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
                             stats, efx->stats_buffer.addr, false);
+       rmb();
        generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
        if (generation_end != generation_start)
                return -EAGAIN;
@@ -563,12 +604,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
                                    struct rtnl_link_stats64 *core_stats)
 {
-       const unsigned long *mask = efx_ef10_stat_mask(efx);
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        u64 *stats = nic_data->stats;
        size_t stats_count = 0, index;
        int retry;
 
+       efx_ef10_get_stat_mask(efx, mask);
+
        /* If we're unlucky enough to read statistics during the DMA, wait
         * up to 10ms for it to finish (typically takes <500us)
         */
index c082562..366c8e3 100644 (file)
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                               bool *was_attached)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
        size_t outlen;
        int rc;
 
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                goto fail;
        }
 
+       /* We currently assume we have control of the external link
+        * and are completely trusted by firmware.  Abort probing
+        * if that's not true for this function.
+        */
+       if (driver_operating &&
+           outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
+           (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+            (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+             1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+           (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+            1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+               netif_err(efx, probe, efx->net_dev,
+                         "This driver version only supports one function per port\n");
+               return -ENODEV;
+       }
+
        if (was_attached != NULL)
                *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
        return 0;
index b5cf624..e0a63dd 100644 (file)
 #define          MC_CMD_MAC_RX_LANES01_DISP_ERR  0x39 /* enum */
 #define          MC_CMD_MAC_RX_LANES23_DISP_ERR  0x3a /* enum */
 #define          MC_CMD_MAC_RX_MATCH_FAULT  0x3b /* enum */
-#define          MC_CMD_GMAC_DMABUF_START  0x40 /* enum */
-#define          MC_CMD_GMAC_DMABUF_END    0x5f /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW  0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW  0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_VFIFO_FULL  0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_VFIFO_FULL  0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_QBB  0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_QBB  0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_MAPPING  0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_Q_DISABLED_PKTS  0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_DI_DROPPED_PKTS  0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS  0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS  0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_START  0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_END    0x5f
 #define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
index e7dbd2d..9826594 100644 (file)
@@ -469,8 +469,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
  * @count: Length of the @desc array
  * @mask: Bitmask of which elements of @desc are enabled
  * @stats: Buffer to update with the converted statistics.  The length
- *     of this array must be at least the number of set bits in the
- *     first @count bits of @mask.
+ *     of this array must be at least @count.
  * @dma_buf: DMA buffer containing hardware statistics
  * @accumulate: If set, the converted values will be added rather than
  *     directly stored to the corresponding elements of @stats
@@ -503,11 +502,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
                        }
 
                        if (accumulate)
-                               *stats += val;
+                               stats[index] += val;
                        else
-                               *stats = val;
+                               stats[index] = val;
                }
-
-               ++stats;
        }
 }
index fda29d3..890bbbe 100644 (file)
@@ -386,6 +386,18 @@ enum {
        EF10_STAT_rx_align_error,
        EF10_STAT_rx_length_error,
        EF10_STAT_rx_nodesc_drops,
+       EF10_STAT_rx_pm_trunc_bb_overflow,
+       EF10_STAT_rx_pm_discard_bb_overflow,
+       EF10_STAT_rx_pm_trunc_vfifo_full,
+       EF10_STAT_rx_pm_discard_vfifo_full,
+       EF10_STAT_rx_pm_trunc_qbb,
+       EF10_STAT_rx_pm_discard_qbb,
+       EF10_STAT_rx_pm_discard_mapping,
+       EF10_STAT_rx_dp_q_disabled_packets,
+       EF10_STAT_rx_dp_di_dropped_packets,
+       EF10_STAT_rx_dp_streaming_packets,
+       EF10_STAT_rx_dp_emerg_fetch,
+       EF10_STAT_rx_dp_emerg_wait,
        EF10_STAT_COUNT
 };
 
index 5730fe2..98eedb9 100644 (file)
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] =  {
                        void __iomem *__ioaddr = ioaddr;                \
                        if (__len >= 2 && (unsigned long)__ptr & 2) {   \
                                __len -= 2;                             \
-                               SMC_outw(*(u16 *)__ptr, ioaddr,         \
-                                       DATA_REG(lp));          \
+                               SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
                                __ptr += 2;                             \
                        }                                               \
                        if (SMC_CAN_USE_DATACS && lp->datacs)           \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] =  {
                        SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
                        if (__len & 2) {                                \
                                __ptr += (__len & ~3);                  \
-                               SMC_outw(*((u16 *)__ptr), ioaddr,       \
-                                        DATA_REG(lp));         \
+                               SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
                        }                                               \
                } else if (SMC_16BIT(lp))                               \
                        SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1);   \
index 79974e3..cc3ce55 100644 (file)
@@ -639,13 +639,6 @@ void cpsw_rx_handler(void *token, int len, int status)
 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
-       u32 rx, tx, rx_thresh;
-
-       rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
-       rx = __raw_readl(&priv->wr_regs->rx_stat);
-       tx = __raw_readl(&priv->wr_regs->tx_stat);
-       if (!rx_thresh && !rx && !tx)
-               return IRQ_NONE;
 
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
@@ -1169,9 +1162,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
                }
        }
 
+       napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
-       napi_enable(&priv->napi);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
@@ -1771,8 +1764,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        }
        data->mac_control = prop;
 
-       if (!of_property_read_u32(node, "dual_emac", &prop))
-               data->dual_emac = prop;
+       if (of_property_read_bool(node, "dual_emac"))
+               data->dual_emac = 1;
 
        /*
         * Populate all the child nodes here...
@@ -1782,7 +1775,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        if (ret)
                pr_warn("Doesn't have any child node\n");
 
-       for_each_node_by_name(slave_node, "slave") {
+       for_each_child_of_node(node, slave_node) {
                struct cpsw_slave_data *slave_data = data->slave_data + i;
                const void *mac_addr = NULL;
                u32 phyid;
@@ -1791,6 +1784,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                struct device_node *mdio_node;
                struct platform_device *mdio;
 
+               /* This is no slave child node, continue */
+               if (strcmp(slave_node->name, "slave"))
+                       continue;
+
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        pr_err("Missing slave[%d] phy_id property\n", i);
index 67df09e..6a32ef9 100644 (file)
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
                    netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
                        emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
-               }
-               if (!netdev_mc_empty(ndev)) {
+               } else if (!netdev_mc_empty(ndev)) {
                        struct netdev_hw_addr *ha;
 
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
index 0721e72..5af1c3e 100644 (file)
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EINVAL;         /* Cannot change this parameter when up */
                if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
                        return -ENOBUFS;
-               ym->bitrate = 9600;
                if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
                        kfree(ym);
                        return -EFAULT;
index 42e6dee..0632d34 100644 (file)
@@ -82,7 +82,6 @@ struct mrf24j40 {
 
        struct mutex buffer_mutex; /* only used to protect buf */
        struct completion tx_complete;
-       struct work_struct irqwork;
        u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
 };
 
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
        if (ret)
                goto err;
 
+       INIT_COMPLETION(devrec->tx_complete);
+
        /* Set TXNTRIG bit of TXNCON to send packet */
        ret = read_short_reg(devrec, REG_TXNCON, &val);
        if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
                val |= 0x4;
        write_short_reg(devrec, REG_TXNCON, val);
 
-       INIT_COMPLETION(devrec->tx_complete);
-
        /* Wait for the device to send the TX complete interrupt. */
        ret = wait_for_completion_interruptible_timeout(
                                                &devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
 static irqreturn_t mrf24j40_isr(int irq, void *data)
 {
        struct mrf24j40 *devrec = data;
-
-       disable_irq_nosync(irq);
-
-       schedule_work(&devrec->irqwork);
-
-       return IRQ_HANDLED;
-}
-
-static void mrf24j40_isrwork(struct work_struct *work)
-{
-       struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
        u8 intstat;
        int ret;
 
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
                mrf24j40_handle_rx(devrec);
 
 out:
-       enable_irq(devrec->spi->irq);
+       return IRQ_HANDLED;
 }
 
 static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
 
        mutex_init(&devrec->buffer_mutex);
        init_completion(&devrec->tx_complete);
-       INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
        devrec->spi = spi;
        spi_set_drvdata(spi, devrec);
 
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
        val &= ~0x3; /* Clear RX mode (normal) */
        write_short_reg(devrec, REG_RXMCR, val);
 
-       ret = request_irq(spi->irq,
-                         mrf24j40_isr,
-                         IRQF_TRIGGER_FALLING,
-                         dev_name(&spi->dev),
-                         devrec);
+       ret = request_threaded_irq(spi->irq,
+                                  NULL,
+                                  mrf24j40_isr,
+                                  IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+                                  dev_name(&spi->dev),
+                                  devrec);
 
        if (ret) {
                dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
        dev_dbg(printdev(devrec), "remove\n");
 
        free_irq(spi->irq, devrec);
-       flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
        ieee802154_unregister_device(devrec->dev);
        ieee802154_free_device(devrec->dev);
        /* TODO: Will ieee802154_free_device() wait until ->xmit() is
index 807815f..7cb105c 100644 (file)
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
        if (unlikely(!noblock))
                add_wait_queue(&tfile->wq.wait, &wait);
        while (len) {
-               current->state = TASK_INTERRUPTIBLE;
+               if (unlikely(!noblock))
+                       current->state = TASK_INTERRUPTIBLE;
 
                /* Read frames from the queue */
                if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
-       current->state = TASK_RUNNING;
-       if (unlikely(!noblock))
+       if (unlikely(!noblock)) {
+               current->state = TASK_RUNNING;
                remove_wait_queue(&tfile->wq.wait, &wait);
+       }
 
        return ret;
 }
index 3569293..846cc19 100644 (file)
@@ -36,8 +36,8 @@
 #define AX_RXHDR_L4_TYPE_TCP                   16
 #define AX_RXHDR_L3CSUM_ERR                    2
 #define AX_RXHDR_L4CSUM_ERR                    1
-#define AX_RXHDR_CRC_ERR                       ((u32)BIT(31))
-#define AX_RXHDR_DROP_ERR                      ((u32)BIT(30))
+#define AX_RXHDR_CRC_ERR                       ((u32)BIT(29))
+#define AX_RXHDR_DROP_ERR                      ((u32)BIT(31))
 #define AX_ACCESS_MAC                          0x01
 #define AX_ACCESS_PHY                          0x02
 #define AX_ACCESS_EEPROM                       0x04
@@ -1406,6 +1406,19 @@ static const struct driver_info sitecom_info = {
        .tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info samsung_info = {
+       .description = "Samsung USB Ethernet Adapter",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct usb_device_id products[] = {
 {
        /* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1431,11 @@ static const struct usb_device_id products[] = {
 }, {
        /* Sitecom USB 3.0 to Gigabit Adapter */
        USB_DEVICE(0x0df6, 0x0072),
-       .driver_info = (unsigned long) &sitecom_info,
+       .driver_info = (unsigned long)&sitecom_info,
+}, {
+       /* Samsung USB Ethernet Adapter */
+       USB_DEVICE(0x04e8, 0xa100),
+       .driver_info = (unsigned long)&samsung_info,
 },
        { },
 };
index 3d6aaf7..818ce90 100644 (file)
@@ -714,6 +714,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
 
        /* 4. Gobi 1000 devices */
index bf94e10..90a429b 100644 (file)
@@ -1688,8 +1688,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
                !(info->flags & FLAG_MULTI_PACKET)) {
                dev->padding_pkt = kzalloc(1, GFP_KERNEL);
-               if (!dev->padding_pkt)
+               if (!dev->padding_pkt) {
+                       status = -ENOMEM;
                        goto out4;
+               }
        }
 
        status = register_netdev (net);
index defec2b..9fbdfcd 100644 (file)
@@ -938,7 +938,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
                return -EINVAL;
        } else {
                vi->curr_queue_pairs = queue_pairs;
-               schedule_delayed_work(&vi->refill, 0);
+               /* virtnet_open() will refill when device is going to up. */
+               if (dev->flags & IFF_UP)
+                       schedule_delayed_work(&vi->refill, 0);
        }
 
        return 0;
@@ -1116,6 +1118,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
 {
        struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
 
+       mutex_lock(&vi->config_lock);
+
+       if (!vi->config_enable)
+               goto done;
+
        switch(action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
@@ -1128,6 +1135,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
        default:
                break;
        }
+
+done:
+       mutex_unlock(&vi->config_lock);
        return NOTIFY_OK;
 }
 
@@ -1733,7 +1743,9 @@ static int virtnet_restore(struct virtio_device *vdev)
        vi->config_enable = true;
        mutex_unlock(&vi->config_lock);
 
+       rtnl_lock();
        virtnet_set_queues(vi, vi->curr_queue_pairs);
+       rtnl_unlock();
 
        return 0;
 }
index 3f0c4f2..bcfff0d 100644 (file)
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
        }
 
        i = port->index;
+       memset(&sync, 0, sizeof(sync));
        sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
        /* Lucky card and linux use same encoding here */
        sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
index 6a24a5a..4c0a697 100644 (file)
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        ifr->ifr_settings.size = size; /* data size wanted */
                        return -ENOBUFS;
                }
+               memset(&line, 0, sizeof(line));
                line.clock_type = get_status(port)->clocking;
                line.clock_rate = 0;
                line.loopback = 0;
index e4f6590..709301f 100644 (file)
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        unsigned long flags;
+       int i;
 
        if (ath_startrecv(sc) != 0) {
                ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                }
        work:
                ath_restart_work(sc);
+
+               for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+                       if (!ATH_TXQ_SETUP(sc, i))
+                               continue;
+
+                       spin_lock_bh(&sc->tx.txq[i].axq_lock);
+                       ath_txq_schedule(sc, &sc->tx.txq[i]);
+                       spin_unlock_bh(&sc->tx.txq[i].axq_lock);
+               }
        }
 
        ieee80211_wake_queues(sc->hw);
@@ -539,21 +549,10 @@ chip_reset:
 
 static int ath_reset(struct ath_softc *sc)
 {
-       int i, r;
+       int r;
 
        ath9k_ps_wakeup(sc);
-
        r = ath_reset_internal(sc, NULL);
-
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (!ATH_TXQ_SETUP(sc, i))
-                       continue;
-
-               spin_lock_bh(&sc->tx.txq[i].axq_lock);
-               ath_txq_schedule(sc, &sc->tx.txq[i]);
-               spin_unlock_bh(&sc->tx.txq[i].axq_lock);
-       }
-
        ath9k_ps_restore(sc);
 
        return r;
index 5ac713d..dd30452 100644 (file)
@@ -1969,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
                               struct ath_atx_tid *tid, struct sk_buff *skb)
 {
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ath_frame_info *fi = get_frame_info(skb);
        struct list_head bf_head;
-       struct ath_buf *bf;
-
-       bf = fi->bf;
+       struct ath_buf *bf = fi->bf;
 
        INIT_LIST_HEAD(&bf_head);
        list_add_tail(&bf->list, &bf_head);
        bf->bf_state.bf_type = 0;
+       if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+               bf->bf_state.bf_type = BUF_AMPDU;
+               ath_tx_addto_baw(sc, tid, bf);
+       }
 
        bf->bf_next = NULL;
        bf->bf_lastbf = bf;
index 899cad3..755a0c8 100644 (file)
@@ -237,7 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
        struct hwbus_priv *self = dev_id;
 
        if (self->core) {
+               cw1200_spi_lock(self);
                cw1200_irq_handler(self->core);
+               cw1200_spi_unlock(self);
                return IRQ_HANDLED;
        } else {
                return IRQ_NONE;
index 30d45e2..8ac305b 100644 (file)
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
        .ht_params = &iwl6000_ht_params,
 };
 
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
+       .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
+       IWL_DEVICE_6035,
+       .ht_params = &iwl6000_ht_params,
+};
+
 const struct iwl_cfg iwl1030_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
        IWL_DEVICE_6030,
index e4d370b..b03c25e 100644 (file)
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
 extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
 extern const struct iwl_cfg iwl2030_2bgn_cfg;
 extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
 extern const struct iwl_cfg iwl105_bgn_cfg;
 extern const struct iwl_cfg iwl105_bgn_d_cfg;
 extern const struct iwl_cfg iwl135_bgn_cfg;
index dd57a36..80b4750 100644 (file)
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
 {
        int ret;
 
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (trans->state != IWL_TRANS_FW_ALIVE) {
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               return -EIO;
+       }
 
        if (!(cmd->flags & CMD_ASYNC))
                lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
index 21407a3..d58e393 100644 (file)
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                if (!mvmvif->queue_params[ac].uapsd)
                        continue;
 
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+               if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+                       cmd->flags |=
+                               cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
                cmd->uapsd_ac_flags |= BIT(ac);
 
                /* QNDP TID - the highest TID with no admission control */
index 9a7ab84..621fb71 100644 (file)
@@ -394,6 +394,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
                        return false;
                }
 
+               /*
+                * If scan cannot be aborted, it means that we had a
+                * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
+                * ieee80211_scan_completed already.
+                */
                IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
                               *resp);
                return true;
@@ -417,14 +422,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
                                               SCAN_COMPLETE_NOTIFICATION };
        int ret;
 
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               return;
+
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
                                   scan_abort_notif,
                                   ARRAY_SIZE(scan_abort_notif),
                                   iwl_mvm_scan_abort_notif, NULL);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
+                                  CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+               /* mac80211's state will be cleaned in the fw_restart flow */
                goto out_remove_notif;
        }
 
index dc02cb9..26108a1 100644 (file)
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6x00 Series */
        {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
        {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
 
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+       {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
        {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6x35 Series */
        {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
 
@@ -260,54 +270,86 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 #if IS_ENABLED(CONFIG_IWLMVM)
 /* 7000 Series */
        {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
 
 /* 3160 Series */
        {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
index bad95d2..c3f904d 100644 (file)
@@ -1401,6 +1401,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->reg_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
+       err = pci_enable_device(pdev);
+       if (err)
+               goto out_no_pci;
+
        if (!cfg->base_params->pcie_l1_allowed) {
                /*
                 * W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1416,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                       PCIE_LINK_STATE_CLKPM);
        }
 
-       err = pci_enable_device(pdev);
-       if (err)
-               goto out_no_pci;
-
        pci_set_master(pdev);
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
index f45eb29..1424335 100644 (file)
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
                 * non-AGG queue.
                 */
                iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+               ssn = trans_pcie->txq[txq_id].q.read_ptr;
        }
 
        /* Place first TFD at index corresponding to start sequence number.
index 9d7c0e6..37f873b 100644 (file)
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
  */
 int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
 {
+       int ret = 0;
+
        if (!priv->media_connected)
                return 0;
 
        switch (priv->bss_mode) {
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
-               return mwifiex_deauthenticate_infra(priv, mac);
+               ret = mwifiex_deauthenticate_infra(priv, mac);
+               if (ret)
+                       cfg80211_disconnected(priv->netdev, 0, NULL, 0,
+                                             GFP_KERNEL);
+               break;
        case NL80211_IFTYPE_ADHOC:
                return mwifiex_send_cmd_sync(priv,
                                             HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
                break;
        }
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
 
index fd77833..c2b91f5 100644 (file)
@@ -358,10 +358,12 @@ process_start:
                }
        } while (true);
 
-       if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto process_start;
+       }
 
-       spin_lock_irqsave(&adapter->main_proc_lock, flags);
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
index 8b05752..8c351f7 100644 (file)
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        dev_dbg(adapter->dev,
                "info: successfully disconnected from %pM: reason code %d\n",
                priv->cfg_bssid, reason_code);
-       if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+       if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+           priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
                cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
                                      GFP_KERNEL);
        }
index 76d95de..dc49e52 100644 (file)
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
                goto exit_release_regions;
        }
 
-       pci_enable_msi(pci_dev);
-
        hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
        if (!hw) {
                rt2x00_probe_err("Failed to allocate hardware\n");
                retval = -ENOMEM;
-               goto exit_disable_msi;
+               goto exit_release_regions;
        }
 
        pci_set_drvdata(pci_dev, hw);
@@ -152,9 +150,6 @@ exit_free_reg:
 exit_free_device:
        ieee80211_free_hw(hw);
 
-exit_disable_msi:
-       pci_disable_msi(pci_dev);
-
 exit_release_regions:
        pci_release_regions(pci_dev);
 
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
        rt2x00pci_free_reg(rt2x00dev);
        ieee80211_free_hw(hw);
 
-       pci_disable_msi(pci_dev);
-
        /*
         * Free the PCI device data.
         */
index 763cf1d..5a060e5 100644 (file)
@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
                                        (bool)GET_RX_DESC_PAGGR(pdesc));
        rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
        if (phystatus) {
-               p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+               p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+                                                    stats->rx_bufshift);
                rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
                                                 p_drvinfo);
        }
index b45bce2..1b08d87 100644 (file)
@@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
 static void connect(struct backend_info *);
 static void backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+                             enum xenbus_state state);
 
 static int netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
+       set_backend_state(be, XenbusStateClosed);
+
        unregister_hotplug_status_watch(be);
        if (be->vif) {
                kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
index 9d2009a..78cc760 100644 (file)
@@ -74,10 +74,4 @@ config OF_MTD
        depends on MTD
        def_bool y
 
-config OF_RESERVED_MEM
-       depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
-       def_bool y
-       help
-         Initialization code for DMA reserved memory
-
 endmenu # OF
index ed9660a..efd0510 100644 (file)
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO)   += of_mdio.o
 obj-$(CONFIG_OF_PCI)   += of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
 obj-$(CONFIG_OF_MTD)   += of_mtd.o
-obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
index 865d3f6..7d4c70f 100644 (file)
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
        struct device_node *cpun, *cpus;
 
        cpus = of_find_node_by_path("/cpus");
-       if (!cpus) {
-               pr_warn("Missing cpus node, bailing out\n");
+       if (!cpus)
                return NULL;
-       }
 
        for_each_child_of_node(cpus, cpun) {
                if (of_node_cmp(cpun->type, "cpu"))
index 229dd9d..a4fa9ad 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/random.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #ifdef CONFIG_PPC
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void)
 }
 
 #endif /* CONFIG_OF_EARLY_FLATTREE */
-
-/* Feed entire flattened device tree into the random pool */
-static int __init add_fdt_randomness(void)
-{
-       if (initial_boot_params)
-               add_device_randomness(initial_boot_params,
-                               be32_to_cpu(initial_boot_params->totalsize));
-
-       return 0;
-}
-core_initcall(add_fdt_randomness);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
deleted file mode 100644 (file)
index 0fe40c7..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Device tree based initialization code for reserved memory.
- *
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- * Author: Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- */
-
-#include <linux/memblock.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
-#include <linux/mm.h>
-#include <linux/sizes.h>
-#include <linux/mm_types.h>
-#include <linux/dma-contiguous.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
-
-#define MAX_RESERVED_REGIONS   16
-struct reserved_mem {
-       phys_addr_t             base;
-       unsigned long           size;
-       struct cma              *cma;
-       char                    name[32];
-};
-static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
-static int reserved_mem_count;
-
-static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
-                                       int depth, void *data)
-{
-       struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
-       phys_addr_t base, size;
-       int is_cma, is_reserved;
-       unsigned long len;
-       const char *status;
-       __be32 *prop;
-
-       is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
-              of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
-       is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
-
-       if (!is_reserved && !is_cma) {
-               /* ignore node and scan next one */
-               return 0;
-       }
-
-       status = of_get_flat_dt_prop(node, "status", &len);
-       if (status && strcmp(status, "okay") != 0) {
-               /* ignore disabled node nad scan next one */
-               return 0;
-       }
-
-       prop = of_get_flat_dt_prop(node, "reg", &len);
-       if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
-                            sizeof(__be32))) {
-               pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
-                      uname);
-               /* ignore node and scan next one */
-               return 0;
-       }
-       base = dt_mem_next_cell(dt_root_addr_cells, &prop);
-       size = dt_mem_next_cell(dt_root_size_cells, &prop);
-
-       if (!size) {
-               /* ignore node and scan next one */
-               return 0;
-       }
-
-       pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
-               uname, (unsigned long)base, (unsigned long)size / SZ_1M);
-
-       if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
-               return -ENOSPC;
-
-       rmem->base = base;
-       rmem->size = size;
-       strlcpy(rmem->name, uname, sizeof(rmem->name));
-
-       if (is_cma) {
-               struct cma *cma;
-               if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
-                       rmem->cma = cma;
-                       reserved_mem_count++;
-                       if (of_get_flat_dt_prop(node,
-                                               "linux,default-contiguous-region",
-                                               NULL))
-                               dma_contiguous_set_default(cma);
-               }
-       } else if (is_reserved) {
-               if (memblock_remove(base, size) == 0)
-                       reserved_mem_count++;
-               else
-                       pr_err("Failed to reserve memory for %s\n", uname);
-       }
-
-       return 0;
-}
-
-static struct reserved_mem *get_dma_memory_region(struct device *dev)
-{
-       struct device_node *node;
-       const char *name;
-       int i;
-
-       node = of_parse_phandle(dev->of_node, "memory-region", 0);
-       if (!node)
-               return NULL;
-
-       name = kbasename(node->full_name);
-       for (i = 0; i < reserved_mem_count; i++)
-               if (strcmp(name, reserved_mem[i].name) == 0)
-                       return &reserved_mem[i];
-       return NULL;
-}
-
-/**
- * of_reserved_mem_device_init() - assign reserved memory region to given device
- *
- * This function assign memory region pointed by "memory-region" device tree
- * property to the given device.
- */
-void of_reserved_mem_device_init(struct device *dev)
-{
-       struct reserved_mem *region = get_dma_memory_region(dev);
-       if (!region)
-               return;
-
-       if (region->cma) {
-               dev_set_cma_area(dev, region->cma);
-               pr_info("Assigned CMA %s to %s device\n", region->name,
-                       dev_name(dev));
-       } else {
-               if (dma_declare_coherent_memory(dev, region->base, region->base,
-                   region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
-                       pr_info("Declared reserved memory %s to %s device\n",
-                               region->name, dev_name(dev));
-       }
-}
-
-/**
- * of_reserved_mem_device_release() - release reserved memory device structures
- *
- * This function releases structures allocated for memory region handling for
- * the given device.
- */
-void of_reserved_mem_device_release(struct device *dev)
-{
-       struct reserved_mem *region = get_dma_memory_region(dev);
-       if (!region && !region->cma)
-               dma_release_declared_memory(dev);
-}
-
-/**
- * early_init_dt_scan_reserved_mem() - create reserved memory regions
- *
- * This function grabs memory from early allocator for device exclusive use
- * defined in device tree structures. It should be called by arch specific code
- * once the early allocator (memblock) has been activated and all other
- * subsystems have already allocated/reserved memory.
- */
-void __init early_init_dt_scan_reserved_mem(void)
-{
-       of_scan_flat_dt_by_path("/memory/reserved-memory",
-                               fdt_scan_reserved_mem, NULL);
-}
index 9b439ac..f6dcde2 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 
 const struct of_device_id of_default_bus_match_table[] = {
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata(
        dev->dev.bus = &platform_bus_type;
        dev->dev.platform_data = platform_data;
 
-       of_reserved_mem_device_init(&dev->dev);
-
        /* We do not fill the DMA ops for platform devices by default.
         * This is currently the responsibility of the platform code
         * to do such, possibly using a device notifier
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata(
 
        if (of_device_add(dev) != 0) {
                platform_device_put(dev);
-               of_reserved_mem_device_release(&dev->dev);
                return NULL;
        }
 
index 0b7d23b..be12fbf 100644 (file)
@@ -994,14 +994,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
 
                /*
                 * This bridge should have been registered as a hotplug function
-                * under its parent, so the context has to be there.  If not, we
-                * are in deep goo.
+                * under its parent, so the context should be there, unless the
+                * parent is going to be handled by pciehp, in which case this
+                * bridge is not interesting to us either.
                 */
                mutex_lock(&acpiphp_context_lock);
                context = acpiphp_get_context(handle);
-               if (WARN_ON(!context)) {
+               if (!context) {
                        mutex_unlock(&acpiphp_context_lock);
                        put_device(&bus->dev);
+                       pci_dev_put(bridge->pci_dev);
                        kfree(bridge);
                        return;
                }
index 96d6b2e..b51a746 100644 (file)
@@ -504,6 +504,7 @@ config ASUS_WMI
        depends on BACKLIGHT_CLASS_DEVICE
        depends on RFKILL || RFKILL = n
        depends on HOTPLUG_PCI
+       depends on ACPI_VIDEO || ACPI_VIDEO = n
        select INPUT_SPARSEKMAP
        select LEDS_CLASS
        select NEW_LEDS
index d3fd520..13ec195 100644 (file)
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
                 "default is -1 (automatic)");
 #endif
 
-static int kbd_backlight = 1;
+static int kbd_backlight = -1;
 module_param(kbd_backlight, int, 0444);
 MODULE_PARM_DESC(kbd_backlight,
                 "set this to 0 to disable keyboard backlight, "
-                "1 to enable it (default: 0)");
+                "1 to enable it (default: no change from current value)");
 
-static int kbd_backlight_timeout;      /* = 0 */
+static int kbd_backlight_timeout = -1;
 module_param(kbd_backlight_timeout, int, 0444);
 MODULE_PARM_DESC(kbd_backlight_timeout,
-                "set this to 0 to set the default 10 seconds timeout, "
-                "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
-                "(default: 0)");
+                "meaningful values vary from 0 to 3 and their meaning depends "
+                "on the model (default: no change from current value)");
 
 #ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void);
@@ -1844,6 +1843,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
        if (!kbdbl_ctl)
                return -ENOMEM;
 
+       kbdbl_ctl->mode = kbd_backlight;
+       kbdbl_ctl->timeout = kbd_backlight_timeout;
        kbdbl_ctl->handle = handle;
        if (handle == 0x0137)
                kbdbl_ctl->base = 0x0C00;
@@ -1870,8 +1871,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
        if (ret)
                goto outmode;
 
-       __sony_nc_kbd_backlight_mode_set(kbd_backlight);
-       __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout);
+       __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
+       __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
 
        return 0;
 
@@ -1886,17 +1887,8 @@ outkzalloc:
 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
        if (kbdbl_ctl) {
-               int result;
-
                device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
                device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
-
-               /* restore the default hw behaviour */
-               sony_call_snc_handle(kbdbl_ctl->handle,
-                               kbdbl_ctl->base | 0x10000, &result);
-               sony_call_snc_handle(kbdbl_ctl->handle,
-                               kbdbl_ctl->base + 0x200, &result);
-
                kfree(kbdbl_ctl);
                kbdbl_ctl = NULL;
        }
index 5adb204..cee7e27 100644 (file)
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        int intensity = 0;
        int r0_perm;
        int nr_tracks;
+       int use_prefix;
 
        startdev = dasd_alias_get_start_dev(base);
        if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
                intensity = fdata->intensity;
        }
 
+       use_prefix = base_priv->features.feature[8] & 0x01;
+
        switch (intensity) {
        case 0x00:      /* Normal format */
        case 0x08:      /* Normal format, use cdl. */
                cplength = 2 + (rpt*nr_tracks);
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       rpt * nr_tracks * sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
                break;
        case 0x01:      /* Write record zero and format track. */
        case 0x09:      /* Write record zero and format track, use cdl. */
                cplength = 2 + rpt * nr_tracks;
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       sizeof(struct eckd_count) +
-                       rpt * nr_tracks * sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
                break;
        case 0x04:      /* Invalidate track. */
        case 0x0c:      /* Invalidate track, use cdl. */
                cplength = 3;
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count);
                break;
        default:
                dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
 
        switch (intensity & ~0x08) {
        case 0x00: /* Normal format. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_CKD, base, startdev);
-               /* grant subsystem permission to format R0 */
-               if (r0_perm)
-                       ((struct PFX_eckd_data *)data)
-                               ->define_extent.ga_extended |= 0x04;
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+                       /* grant subsystem permission to format R0 */
+                       if (r0_perm)
+                               ((struct PFX_eckd_data *)data)
+                                       ->define_extent.ga_extended |= 0x04;
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                                     fdata->start_unit, fdata->stop_unit,
+                                     DASD_ECKD_CCW_WRITE_CKD, startdev);
+                       /* grant subsystem permission to format R0 */
+                       if (r0_perm)
+                               ((struct DE_eckd_data *) data)
+                                       ->ga_extended |= 0x04;
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
                data += sizeof(struct LO_eckd_data);
                break;
        case 0x01: /* Write record zero + format track. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
-                      base, startdev);
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+                              base, startdev);
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
                data += sizeof(struct LO_eckd_data);
                break;
        case 0x04: /* Invalidate track. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_CKD, base, startdev);
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, startdev);
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, 1,
index a3aa374..1fe2643 100644 (file)
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
        timeout = 0;
        if (timer_pending(&sclp_request_timer)) {
                /* Get timeout TOD value */
-               timeout = get_tod_clock() +
+               timeout = get_tod_clock_fast() +
                          sclp_tod_from_jiffies(sclp_request_timer.expires -
                                                jiffies);
        }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
                if (timer_pending(&sclp_request_timer) &&
-                   get_tod_clock() > timeout &&
+                   get_tod_clock_fast() > timeout &&
                    del_timer(&sclp_request_timer))
                        sclp_request_timer.function(sclp_request_timer.data);
                cpu_relax();
index 9b3a24e..cf31d33 100644 (file)
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
        int ret;
 
        dev_num = iminor(inode);
-       if (dev_num > MAXMINOR)
+       if (dev_num >= MAXMINOR)
                return -ENODEV;
        logptr = &sys_ser[dev_num];
 
index d7da67a..88e35d8 100644 (file)
@@ -878,9 +878,9 @@ static void css_reset(void)
                        atomic_inc(&chpid_reset_count);
        }
        /* Wait for machine check for all channel paths. */
-       timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
+       timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
        while (atomic_read(&chpid_reset_count) != 0) {
-               if (get_tod_clock() > timeout)
+               if (get_tod_clock_fast() > timeout)
                        break;
                cpu_relax();
        }
index 8ed52aa..bbd3e51 100644 (file)
@@ -338,10 +338,10 @@ again:
                retries++;
 
                if (!start_time) {
-                       start_time = get_tod_clock();
+                       start_time = get_tod_clock_fast();
                        goto again;
                }
-               if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+               if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
                        goto again;
        }
        if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        int count, stop;
        unsigned char state = 0;
 
-       q->timestamp = get_tod_clock();
+       q->timestamp = get_tod_clock_fast();
 
        /*
         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
         * At this point we know, that inbound first_to_check
         * has (probably) not moved (see qdio_inbound_processing).
         */
-       if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+       if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
                              q->first_to_check);
                return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
        int count, stop;
        unsigned char state = 0;
 
-       q->timestamp = get_tod_clock();
+       q->timestamp = get_tod_clock_fast();
 
        if (need_siga_sync(q))
                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
index feab3a5..757eb07 100644 (file)
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
        while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
                                        PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
                                        pci_device)) != NULL) {
-               struct blogic_adapter *adapter = adapter;
+               struct blogic_adapter *host_adapter = adapter;
                struct blogic_adapter_info adapter_info;
                enum blogic_isa_ioport mod_ioaddr_req;
                unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                   known and enabled, note that the particular Standard ISA I/O
                   Address should not be probed.
                 */
-               adapter->io_addr = io_addr;
-               blogic_intreset(adapter);
-               if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+               host_adapter->io_addr = io_addr;
+               blogic_intreset(host_adapter);
+               if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
                                &adapter_info, sizeof(adapter_info)) ==
                                sizeof(adapter_info)) {
                        if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                   I/O Address assigned at system initialization.
                 */
                mod_ioaddr_req = BLOGIC_IO_DISABLE;
-               blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
+               blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
                                sizeof(mod_ioaddr_req), NULL, 0);
                /*
                   For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 
                        fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
                        fetch_localram.count = sizeof(autoscsi_byte45);
-                       blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM,
+                       blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
                                        &fetch_localram, sizeof(fetch_localram),
                                        &autoscsi_byte45,
                                        sizeof(autoscsi_byte45));
-                       blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
-                                       sizeof(id));
+                       blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
+                                       &id, sizeof(id));
                        if (id.fw_ver_digit1 == '5')
                                force_scan_order =
                                        autoscsi_byte45.force_scan_order;
index 2ef497e..ee5c183 100644 (file)
@@ -20,7 +20,7 @@
  * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3058       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3059       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
  * |                              |                    | 0x302d,0x3033  |
index df1b30b..ff9c86b 100644 (file)
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        que = MSW(sts->handle);
        req = ha->req_q_map[que];
 
+       /* Check for invalid queue pointer */
+       if (req == NULL ||
+           que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+               ql_dbg(ql_dbg_io, vha, 0x3059,
+                   "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+                   "que=%u.\n", sts->handle, req, que);
+               return;
+       }
+
        /* Validate handle. */
        if (handle < req->num_outstanding_cmds)
                sp = req->outstanding_cmds[handle];
index e62d17d..5693f6d 100644 (file)
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
                gd->events |= DISK_EVENT_MEDIA_CHANGE;
        }
 
+       blk_pm_runtime_init(sdp->request_queue, dev);
        add_disk(gd);
        if (sdkp->capacity)
                sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
-       blk_pm_runtime_init(sdp->request_queue, dev);
        scsi_autopm_put_device(sdp);
        put_device(&sdkp->dev);
 }
index 5cbc4bb..df5e961 100644 (file)
@@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
 static int sg_add(struct device *, struct class_interface *);
 static void sg_remove(struct device *, struct class_interface *);
 
+static DEFINE_SPINLOCK(sg_open_exclusive_lock);
+
 static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock);
+static DEFINE_RWLOCK(sg_index_lock);   /* Also used to lock
+                                                          file descriptor list for device */
 
 static struct class_interface sg_interface = {
        .add_dev        = sg_add,
@@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
 } Sg_request;
 
 typedef struct sg_fd {         /* holds the state of a file descriptor */
-       struct list_head sfd_siblings; /* protected by sfd_lock of device */
+       /* sfd_siblings is protected by sg_index_lock */
+       struct list_head sfd_siblings;
        struct sg_device *parentdp;     /* owning device */
        wait_queue_head_t read_wait;    /* queue read until command done */
        rwlock_t rq_list_lock;  /* protect access to list in req_arr */
@@ -166,12 +170,13 @@ typedef struct sg_fd {            /* holds the state of a file descriptor */
 
 typedef struct sg_device { /* holds the state of each scsi generic device */
        struct scsi_device *device;
+       wait_queue_head_t o_excl_wait;  /* queue open() when O_EXCL in use */
        int sg_tablesize;       /* adapter's max scatter-gather table size */
        u32 index;              /* device index number */
-       spinlock_t sfd_lock;    /* protect file descriptor list for device */
+       /* sfds is protected by sg_index_lock */
        struct list_head sfds;
-       struct rw_semaphore o_sem;      /* exclude open should hold this rwsem */
        volatile char detached; /* 0->attached, 1->detached pending removal */
+       /* exclude protected by sg_open_exclusive_lock */
        char exclude;           /* opened for exclusive access */
        char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
        struct gendisk *disk;
@@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
        return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
 }
 
+static int get_exclude(Sg_device *sdp)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+       ret = sdp->exclude;
+       spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+       return ret;
+}
+
+static int set_exclude(Sg_device *sdp, char val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+       sdp->exclude = val;
+       spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+       return val;
+}
+
 static int sfds_list_empty(Sg_device *sdp)
 {
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&sdp->sfd_lock, flags);
+       read_lock_irqsave(&sg_index_lock, flags);
        ret = list_empty(&sdp->sfds);
-       spin_unlock_irqrestore(&sdp->sfd_lock, flags);
+       read_unlock_irqrestore(&sg_index_lock, flags);
        return ret;
 }
 
@@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
        struct request_queue *q;
        Sg_device *sdp;
        Sg_fd *sfp;
+       int res;
        int retval;
 
        nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
                goto error_out;
        }
 
-       if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
-               retval = -EPERM; /* Can't lock it with read only access */
-               goto error_out;
-       }
-       if (flags & O_NONBLOCK) {
-               if (flags & O_EXCL) {
-                       if (!down_write_trylock(&sdp->o_sem)) {
-                               retval = -EBUSY;
-                               goto error_out;
-                       }
-               } else {
-                       if (!down_read_trylock(&sdp->o_sem)) {
-                               retval = -EBUSY;
-                               goto error_out;
-                       }
+       if (flags & O_EXCL) {
+               if (O_RDONLY == (flags & O_ACCMODE)) {
+                       retval = -EPERM; /* Can't lock it with read only access */
+                       goto error_out;
+               }
+               if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
+                       retval = -EBUSY;
+                       goto error_out;
+               }
+               res = wait_event_interruptible(sdp->o_excl_wait,
+                                          ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
+               if (res) {
+                       retval = res;   /* -ERESTARTSYS because signal hit process */
+                       goto error_out;
+               }
+       } else if (get_exclude(sdp)) {  /* some other fd has an exclusive lock on dev */
+               if (flags & O_NONBLOCK) {
+                       retval = -EBUSY;
+                       goto error_out;
+               }
+               res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
+               if (res) {
+                       retval = res;   /* -ERESTARTSYS because signal hit process */
+                       goto error_out;
                }
-       } else {
-               if (flags & O_EXCL)
-                       down_write(&sdp->o_sem);
-               else
-                       down_read(&sdp->o_sem);
        }
-       /* Since write lock is held, no need to check sfd_list */
-       if (flags & O_EXCL)
-               sdp->exclude = 1;       /* used by release lock */
-
+       if (sdp->detached) {
+               retval = -ENODEV;
+               goto error_out;
+       }
        if (sfds_list_empty(sdp)) {     /* no existing opens on this device */
                sdp->sgdebug = 0;
                q = sdp->device->request_queue;
                sdp->sg_tablesize = queue_max_segments(q);
        }
-       sfp = sg_add_sfp(sdp, dev);
-       if (!IS_ERR(sfp))
+       if ((sfp = sg_add_sfp(sdp, dev)))
                filp->private_data = sfp;
-               /* retval is already provably zero at this point because of the
-                * check after retval = scsi_autopm_get_device(sdp->device))
-                */
        else {
-               retval = PTR_ERR(sfp);
-
                if (flags & O_EXCL) {
-                       sdp->exclude = 0;       /* undo if error */
-                       up_write(&sdp->o_sem);
-               } else
-                       up_read(&sdp->o_sem);
+                       set_exclude(sdp, 0);    /* undo if error */
+                       wake_up_interruptible(&sdp->o_excl_wait);
+               }
+               retval = -ENOMEM;
+               goto error_out;
+       }
+       retval = 0;
 error_out:
+       if (retval) {
                scsi_autopm_put_device(sdp->device);
 sdp_put:
                scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
 {
        Sg_device *sdp;
        Sg_fd *sfp;
-       int excl;
 
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
 
-       excl = sdp->exclude;
-       sdp->exclude = 0;
-       if (excl)
-               up_write(&sdp->o_sem);
-       else
-               up_read(&sdp->o_sem);
+       set_exclude(sdp, 0);
+       wake_up_interruptible(&sdp->o_excl_wait);
 
        scsi_autopm_put_device(sdp->device);
        kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
        disk->first_minor = k;
        sdp->disk = disk;
        sdp->device = scsidp;
-       spin_lock_init(&sdp->sfd_lock);
        INIT_LIST_HEAD(&sdp->sfds);
-       init_rwsem(&sdp->o_sem);
+       init_waitqueue_head(&sdp->o_excl_wait);
        sdp->sg_tablesize = queue_max_segments(q);
        sdp->index = k;
        kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
 
        /* Need a write lock to set sdp->detached. */
        write_lock_irqsave(&sg_index_lock, iflags);
-       spin_lock(&sdp->sfd_lock);
        sdp->detached = 1;
        list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
                wake_up_interruptible(&sfp->read_wait);
                kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
        }
-       spin_unlock(&sdp->sfd_lock);
        write_unlock_irqrestore(&sg_index_lock, iflags);
 
        sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
 
        sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
        if (!sfp)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        init_waitqueue_head(&sfp->read_wait);
        rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
        sfp->cmd_q = SG_DEF_COMMAND_Q;
        sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
        sfp->parentdp = sdp;
-       spin_lock_irqsave(&sdp->sfd_lock, iflags);
-       if (sdp->detached) {
-               spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
-               return ERR_PTR(-ENODEV);
-       }
+       write_lock_irqsave(&sg_index_lock, iflags);
        list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
-       spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+       write_unlock_irqrestore(&sg_index_lock, iflags);
        SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
        if (unlikely(sg_big_buff != def_reserved_size))
                sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
        struct sg_device *sdp = sfp->parentdp;
        unsigned long iflags;
 
-       spin_lock_irqsave(&sdp->sfd_lock, iflags);
+       write_lock_irqsave(&sg_index_lock, iflags);
        list_del(&sfp->sfd_siblings);
-       spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+       write_unlock_irqrestore(&sg_index_lock, iflags);
+       wake_up_interruptible(&sdp->o_excl_wait);
 
        INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
        schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
        return 0;
 }
 
-/* must be called while holding sg_index_lock and sfd_lock */
+/* must be called while holding sg_index_lock */
 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
 {
        int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
 
        read_lock_irqsave(&sg_index_lock, iflags);
        sdp = it ? sg_lookup_dev(it->index) : NULL;
-       if (sdp) {
-               spin_lock(&sdp->sfd_lock);
-               if (!list_empty(&sdp->sfds)) {
-                       struct scsi_device *scsidp = sdp->device;
+       if (sdp && !list_empty(&sdp->sfds)) {
+               struct scsi_device *scsidp = sdp->device;
 
-                       seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
-                       if (sdp->detached)
-                               seq_printf(s, "detached pending close ");
-                       else
-                               seq_printf
-                                   (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
-                                    scsidp->host->host_no,
-                                    scsidp->channel, scsidp->id,
-                                    scsidp->lun,
-                                    scsidp->host->hostt->emulated);
-                       seq_printf(s, " sg_tablesize=%d excl=%d\n",
-                                  sdp->sg_tablesize, sdp->exclude);
-                       sg_proc_debug_helper(s, sdp);
-               }
-               spin_unlock(&sdp->sfd_lock);
+               seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+               if (sdp->detached)
+                       seq_printf(s, "detached pending close ");
+               else
+                       seq_printf
+                           (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
+                            scsidp->host->host_no,
+                            scsidp->channel, scsidp->id,
+                            scsidp->lun,
+                            scsidp->host->hostt->emulated);
+               seq_printf(s, " sg_tablesize=%d excl=%d\n",
+                          sdp->sg_tablesize, get_exclude(sdp));
+               sg_proc_debug_helper(s, sdp);
        }
        read_unlock_irqrestore(&sg_index_lock, iflags);
        return 0;
index b94a95a..76d5bbd 100644 (file)
@@ -1,3 +1,4 @@
 config USB_MSI3101
        tristate "Mirics MSi3101 SDR Dongle"
        depends on USB && VIDEO_DEV && VIDEO_V4L2
+        select VIDEOBUF2_VMALLOC
index 24c7b70..4c3bf77 100644 (file)
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
        /* Absolute min and max number of buffers available for mmap() */
        *nbuffers = 32;
        *nplanes = 1;
-       sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */
+       /*
+        *   3, wMaxPacketSize 3x 1024 bytes
+        * 504, max IQ sample pairs per 1024 frame
+        *   2, two samples, I and Q
+        *   4, 32-bit float
+        */
+       sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
        dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
                        __func__, *nbuffers, sizes[0]);
        return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
                        f->frequency * 625UL / 10UL);
 }
 
-const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
+static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
        .vidioc_querycap          = msi3101_querycap,
 
        .vidioc_enum_input        = msi3101_enum_input,
index 551c96c..0f199f6 100644 (file)
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
         * pSCSI Host ID and enable for phba mode
         */
        sh = scsi_host_lookup(phv->phv_host_id);
-       if (IS_ERR(sh)) {
+       if (!sh) {
                pr_err("pSCSI: Unable to locate SCSI Host for"
                        " phv_host_id: %d\n", phv->phv_host_id);
-               return PTR_ERR(sh);
+               return -EINVAL;
        }
 
        phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
                        sh = phv->phv_lld_host;
                } else {
                        sh = scsi_host_lookup(pdv->pdv_host_id);
-                       if (IS_ERR(sh)) {
+                       if (!sh) {
                                pr_err("pSCSI: Unable to locate"
                                        " pdv_host_id: %d\n", pdv->pdv_host_id);
-                               return PTR_ERR(sh);
+                               return -EINVAL;
                        }
                }
        } else {
index 4714c6f..d9b92b2 100644 (file)
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                        sectors, cmd->se_dev->dev_attrib.max_write_same_len);
                return TCM_INVALID_CDB_FIELD;
        }
+       /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+       if (flags[0] & 0x10) {
+               pr_warn("WRITE SAME with ANCHOR not supported\n");
+               return TCM_INVALID_CDB_FIELD;
+       }
        /*
         * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
         * translated into block discard requests within backend code.
index 3da4fd1..474cd44 100644 (file)
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
        mutex_lock(&g_device_mutex);
        list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
 
+               if (!se_dev->dev_attrib.emulate_3pc)
+                       continue;
+
                memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
                target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
 
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
        struct se_cmd se_cmd;
        struct xcopy_op *xcopy_op;
        struct completion xpt_passthrough_sem;
+       unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
 };
 
 static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
 
        pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
                        se_cmd->scsi_status);
-       return 0;
+
+       return (se_cmd->scsi_status) ? -EINVAL : 0;
 }
 
 static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
                (unsigned long long)src_lba, src_sectors, length);
 
        transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
-                               DMA_FROM_DEVICE, 0, NULL);
+                             DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
        xop->src_pt_cmd = xpt_cmd;
 
        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
                (unsigned long long)dst_lba, dst_sectors, length);
 
        transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
-                               DMA_TO_DEVICE, 0, NULL);
+                             DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
        xop->dst_pt_cmd = xpt_cmd;
 
        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
 
 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
 {
+       struct se_device *dev = se_cmd->se_dev;
        struct xcopy_op *xop = NULL;
        unsigned char *p = NULL, *seg_desc;
        unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+       sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
        int rc;
        unsigned short tdll;
 
+       if (!dev->dev_attrib.emulate_3pc) {
+               pr_err("EXTENDED_COPY operation explicitly disabled\n");
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
        sa = se_cmd->t_task_cdb[1] & 0x1f;
        if (sa != 0x00) {
                pr_err("EXTENDED_COPY(LID4) not supported\n");
                return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
 
+       xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+       if (!xop) {
+               pr_err("Unable to allocate xcopy_op\n");
+               return TCM_OUT_OF_RESOURCES;
+       }
+       xop->xop_se_cmd = se_cmd;
+
        p = transport_kmap_data_sg(se_cmd);
        if (!p) {
                pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+               kfree(xop);
                return TCM_OUT_OF_RESOURCES;
        }
 
        list_id = p[0];
-       if (list_id != 0x00) {
-               pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
-               goto out;
-       }
-       list_id_usage = (p[1] & 0x18);
+       list_id_usage = (p[1] & 0x18) >> 3;
+
        /*
         * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
         */
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
                goto out;
        }
 
-       xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
-       if (!xop) {
-               pr_err("Unable to allocate xcopy_op\n");
-               goto out;
-       }
-       xop->xop_se_cmd = se_cmd;
-
        pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
                " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
                tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
        if (rc <= 0)
                goto out;
 
+       if (xop->src_dev->dev_attrib.block_size !=
+           xop->dst_dev->dev_attrib.block_size) {
+               pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
+                      " block_size: %u currently unsupported\n",
+                       xop->src_dev->dev_attrib.block_size,
+                       xop->dst_dev->dev_attrib.block_size);
+               xcopy_pt_undepend_remotedev(xop);
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               goto out;
+       }
+
        pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
                                rc * XCOPY_TARGET_DESC_LEN);
        seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
        if (p)
                transport_kunmap_data_sg(se_cmd);
        kfree(xop);
-       return TCM_INVALID_CDB_FIELD;
+       return ret;
 }
 
 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
index f10a6ad..c2301da 100644 (file)
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
        }
 
        th_zone = conf->pzone_data;
-       if (th_zone->therm_dev)
-               return;
 
        if (th_zone->bind == false) {
                for (i = 0; i < th_zone->cool_dev_size; i++) {
index b43afda..32f38b9 100644 (file)
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
 
        con = readl(data->base + reg->tmu_ctrl);
 
+       if (pdata->test_mux)
+               con |= (pdata->test_mux << reg->test_mux_addr_shift);
+
        if (pdata->reference_voltage) {
                con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
                con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
        },
        {
                .compatible = "samsung,exynos4412-tmu",
-               .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+               .data = (void *)EXYNOS4412_TMU_DRV_DATA,
        },
        {
                .compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       if (pdata->type == SOC_ARCH_EXYNOS ||
-               pdata->type == SOC_ARCH_EXYNOS4210 ||
-                               pdata->type == SOC_ARCH_EXYNOS5440)
+       if (pdata->type == SOC_ARCH_EXYNOS4210 ||
+           pdata->type == SOC_ARCH_EXYNOS4412 ||
+           pdata->type == SOC_ARCH_EXYNOS5250 ||
+           pdata->type == SOC_ARCH_EXYNOS5440)
                data->soc = pdata->type;
        else {
                ret = -EINVAL;
index b364c9e..3fb6554 100644 (file)
@@ -41,7 +41,8 @@ enum calibration_mode {
 
 enum soc_type {
        SOC_ARCH_EXYNOS4210 = 1,
-       SOC_ARCH_EXYNOS,
+       SOC_ARCH_EXYNOS4412,
+       SOC_ARCH_EXYNOS5250,
        SOC_ARCH_EXYNOS5440,
 };
 
@@ -84,6 +85,7 @@ enum soc_type {
  * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
        reg.
  * @tmu_ctrl: TMU main controller register.
+ * @test_mux_addr_shift: shift bits of test mux address.
  * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
  * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
  * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
        u32     triminfo_reload_shift;
 
        u32     tmu_ctrl;
+       u32     test_mux_addr_shift;
        u32     buf_vref_sel_shift;
        u32     buf_vref_sel_mask;
        u32     therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
  * @first_point_trim: temp value of the first point trimming
  * @second_point_trim: temp value of the second point trimming
  * @default_temp_offset: default temperature offset in case of no trimming
+ * @test_mux; information if SoC supports test MUX
  * @cal_type: calibration type for temperature
  * @cal_mode: calibration mode for temperature
  * @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
        u8 first_point_trim;
        u8 second_point_trim;
        u8 default_temp_offset;
+       u8 test_mux;
 
        enum calibration_type cal_type;
        enum calibration_mode cal_mode;
index 9002499..073c292 100644 (file)
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
 };
 #endif
 
-#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
-static const struct exynos_tmu_registers exynos5250_tmu_registers = {
+#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
+static const struct exynos_tmu_registers exynos4412_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
        .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
        .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
        .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+       .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
        .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
        .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
        .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
-#define EXYNOS5250_TMU_DATA \
+#define EXYNOS4412_TMU_DATA \
        .threshold_falling = 10, \
        .trigger_levels[0] = 85, \
        .trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
                .temp_level = 103, \
        }, \
        .freq_tab_count = 2, \
-       .type = SOC_ARCH_EXYNOS, \
-       .registers = &exynos5250_tmu_registers, \
+       .registers = &exynos4412_tmu_registers, \
        .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
                        TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
                        TMU_SUPPORT_EMUL_TIME)
+#endif
 
+#if defined(CONFIG_SOC_EXYNOS4412)
+struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
+       .tmu_data = {
+               {
+                       EXYNOS4412_TMU_DATA,
+                       .type = SOC_ARCH_EXYNOS4412,
+                       .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
+               },
+       },
+       .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
 struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
        .tmu_data = {
-               { EXYNOS5250_TMU_DATA },
+               {
+                       EXYNOS4412_TMU_DATA,
+                       .type = SOC_ARCH_EXYNOS5250,
+               },
        },
        .tmu_count = 1,
 };
index dc7feb5..a1ea19d 100644 (file)
 
 #define EXYNOS_MAX_TRIGGER_PER_REG     4
 
+/* Exynos4412 specific */
+#define EXYNOS4412_MUX_ADDR_VALUE          6
+#define EXYNOS4412_MUX_ADDR_SHIFT          20
+
 /*exynos5440 specific registers*/
 #define EXYNOS5440_TMU_S0_7_TRIM               0x000
 #define EXYNOS5440_TMU_S0_7_CTRL               0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
 #define EXYNOS4210_TMU_DRV_DATA (NULL)
 #endif
 
-#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412))
+#if defined(CONFIG_SOC_EXYNOS4412)
+extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
+#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
+#else
+#define EXYNOS4412_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
 extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
 #define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
 #else
index eeef0e2..fdb0719 100644 (file)
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&hwmon->tz_list);
        strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
-       hwmon->device = hwmon_device_register(&tz->device);
+       hwmon->device = hwmon_device_register(NULL);
        if (IS_ERR(hwmon->device)) {
                result = PTR_ERR(hwmon->device);
                goto free_mem;
index 4f8b9af..5a47cc8 100644 (file)
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
                } else {
                        dev_err(bgp->dev,
                                "Failed to read PCB state. Using defaults\n");
+                       ret = 0;
                }
        }
        *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
index f36950e..7722cb9 100644 (file)
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
        int phy_id = topology_physical_package_id(cpu);
        struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
        bool notify = false;
+       unsigned long flags;
 
        if (!phdev)
                return;
 
-       spin_lock(&pkg_work_lock);
+       spin_lock_irqsave(&pkg_work_lock, flags);
        ++pkg_work_cnt;
        if (unlikely(phy_id > max_phy_id)) {
-               spin_unlock(&pkg_work_lock);
+               spin_unlock_irqrestore(&pkg_work_lock, flags);
                return;
        }
        pkg_work_scheduled[phy_id] = 0;
-       spin_unlock(&pkg_work_lock);
+       spin_unlock_irqrestore(&pkg_work_lock, flags);
 
        enable_pkg_thres_interrupt();
        rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
        int thres_count;
        u32 eax, ebx, ecx, edx;
        u8 *temp;
+       unsigned long flags;
 
        cpuid(6, &eax, &ebx, &ecx, &edx);
        thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
                goto err_ret_unlock;
        }
 
-       spin_lock(&pkg_work_lock);
+       spin_lock_irqsave(&pkg_work_lock, flags);
        if (topology_physical_package_id(cpu) > max_phy_id)
                max_phy_id = topology_physical_package_id(cpu);
        temp = krealloc(pkg_work_scheduled,
                        (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
        if (!temp) {
-               spin_unlock(&pkg_work_lock);
+               spin_unlock_irqrestore(&pkg_work_lock, flags);
                err = -ENOMEM;
                goto err_ret_free;
        }
        pkg_work_scheduled = temp;
        pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
-       spin_unlock(&pkg_work_lock);
+       spin_unlock_irqrestore(&pkg_work_lock, flags);
 
        phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
        phy_dev_entry->first_cpu = cpu;
index a0ebbc9..042aa07 100644 (file)
@@ -1912,9 +1912,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 
        sport->devdata = of_id->data;
 
-       if (of_device_is_stdout_path(np))
-               add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
-
        return 0;
 }
 #else
index 93b697a..15ad6fc 100644 (file)
@@ -561,12 +561,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
        if (!mmres || !irqres)
                return -ENODEV;
 
-       if (np)
+       if (np) {
                port = of_alias_get_id(np, "serial");
                if (port >= VT8500_MAX_PORTS)
                        port = -1;
-       else
+       } else {
                port = -1;
+       }
 
        if (port < 0) {
                /* calculate the port id */
index 6f96795..64d7a6d 100644 (file)
@@ -100,8 +100,10 @@ static void host_stop(struct ci_hdrc *ci)
 {
        struct usb_hcd *hcd = ci->hcd;
 
-       usb_remove_hcd(hcd);
-       usb_put_hcd(hcd);
+       if (hcd) {
+               usb_remove_hcd(hcd);
+               usb_put_hcd(hcd);
+       }
        if (ci->platdata->reg_vbus)
                regulator_disable(ci->platdata->reg_vbus);
 }
index 5b44cd4..01fe362 100644 (file)
@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Alcor Micro Corp. Hub */
        { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* MicroTouch Systems touchscreen */
+       { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Broadcom BCM92035DGROM BT dongle */
        { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* MAYA44USB sound device */
+       { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 2c76ef1..08ef282 100644 (file)
@@ -799,7 +799,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
         * switchable ports.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
-                       cpu_to_le32(ports_available));
+                       ports_available);
 
        pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
                        &ports_available);
@@ -821,7 +821,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
         * host.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
-                       cpu_to_le32(ports_available));
+                       ports_available);
 
        pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
                        &ports_available);
index 773a6b2..e8b4c56 100644 (file)
@@ -1157,18 +1157,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                t1 = xhci_port_state_to_neutral(t1);
                if (t1 != t2)
                        xhci_writel(xhci, t2, port_array[port_index]);
-
-               if (hcd->speed != HCD_USB3) {
-                       /* enable remote wake up for USB 2.0 */
-                       __le32 __iomem *addr;
-                       u32 tmp;
-
-                       /* Get the port power control register address. */
-                       addr = port_array[port_index] + PORTPMSC;
-                       tmp = xhci_readl(xhci, addr);
-                       tmp |= PORT_RWE;
-                       xhci_writel(xhci, tmp, addr);
-               }
        }
        hcd->state = HC_STATE_SUSPENDED;
        bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1247,20 +1235,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                                xhci_ring_device(xhci, slot_id);
                } else
                        xhci_writel(xhci, temp, port_array[port_index]);
-
-               if (hcd->speed != HCD_USB3) {
-                       /* disable remote wake up for USB 2.0 */
-                       __le32 __iomem *addr;
-                       u32 tmp;
-
-                       /* Add one to the port status register address to get
-                        * the port power control register address.
-                        */
-                       addr = port_array[port_index] + PORTPMSC;
-                       tmp = xhci_readl(xhci, addr);
-                       tmp &= ~PORT_RWE;
-                       xhci_writel(xhci, tmp, addr);
-               }
        }
 
        (void) xhci_readl(xhci, &xhci->op_regs->command);
index 236c3aa..b8dffd5 100644 (file)
@@ -35,6 +35,9 @@
 #define PCI_VENDOR_ID_ETRON            0x1b6f
 #define PCI_DEVICE_ID_ASROCK_P67       0x7023
 
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI     0x8c31
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
+
 static const char hcd_name[] = "xhci_hcd";
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -69,6 +72,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                "QUIRK: Fresco Logic xHC needs configure"
                                " endpoint cmd after reset endpoint");
                }
+               if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+                               pdev->revision == 0x4) {
+                       xhci->quirks |= XHCI_SLOW_SUSPEND;
+                       xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+                               "QUIRK: Fresco Logic xHC revision %u"
+                               "must be suspended extra slowly",
+                               pdev->revision);
+               }
                /* Fresco Logic confirms: all revisions of this chip do not
                 * support MSI, even though some of them claim to in their PCI
                 * capabilities.
@@ -110,6 +121,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
                xhci->quirks |= XHCI_AVOID_BEI;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+               /* Workaround for occasional spurious wakeups from S5 (or
+                * any other sleep) on Haswell machines with LPT and LPT-LP
+                * with the new Intel BIOS
+                */
+               xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -217,6 +237,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
                usb_put_hcd(xhci->shared_hcd);
        }
        usb_hcd_pci_remove(dev);
+
+       /* Workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(dev, PCI_D3hot);
+
        kfree(xhci);
 }
 
index 1e36dbb..6e0d886 100644 (file)
@@ -730,6 +730,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
 
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
+       /* Workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               xhci_reset(xhci);
        spin_unlock_irq(&xhci->lock);
 
        xhci_cleanup_msix(xhci);
@@ -737,6 +740,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "xhci_shutdown completed - status = %x",
                        xhci_readl(xhci, &xhci->op_regs->status));
+
+       /* Yet another workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
 }
 
 #ifdef CONFIG_PM
@@ -839,6 +846,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
 int xhci_suspend(struct xhci_hcd *xhci)
 {
        int                     rc = 0;
+       unsigned int            delay = XHCI_MAX_HALT_USEC;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
 
@@ -861,8 +869,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command &= ~CMD_RUN;
        xhci_writel(xhci, command, &xhci->op_regs->command);
+
+       /* Some chips from Fresco Logic need an extraordinary delay */
+       delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
+
        if (xhci_handshake(xhci, &xhci->op_regs->status,
-                     STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
+                     STS_HALT, STS_HALT, delay)) {
                xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
index 289fbfb..941d5f5 100644 (file)
@@ -1548,6 +1548,8 @@ struct xhci_hcd {
 #define XHCI_COMP_MODE_QUIRK   (1 << 14)
 #define XHCI_AVOID_BEI         (1 << 15)
 #define XHCI_PLAT              (1 << 16)
+#define XHCI_SLOW_SUSPEND      (1 << 17)
+#define XHCI_SPURIOUS_WAKEUP   (1 << 18)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index e2b21c1..ba5f70f 100644 (file)
@@ -246,6 +246,6 @@ config USB_EZUSB_FX2
 config USB_HSIC_USB3503
        tristate "USB3503 HSIC to USB20 Driver"
        depends on I2C
-       select REGMAP
+       select REGMAP_I2C
        help
          This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
index 18e877f..cd70cc8 100644 (file)
@@ -921,6 +921,52 @@ static void musb_generic_disable(struct musb *musb)
 
 }
 
+/*
+ * Program the HDRC to start (enable interrupts, dma, etc.).
+ */
+void musb_start(struct musb *musb)
+{
+       void __iomem    *regs = musb->mregs;
+       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
+
+       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+
+       /*  Set INT enable registers, enable interrupts */
+       musb->intrtxe = musb->epmask;
+       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
+       musb->intrrxe = musb->epmask & 0xfffe;
+       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
+       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+       musb_writeb(regs, MUSB_TESTMODE, 0);
+
+       /* put into basic highspeed mode and start session */
+       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+                       | MUSB_POWER_HSENAB
+                       /* ENSUSPEND wedges tusb */
+                       /* | MUSB_POWER_ENSUSPEND */
+                  );
+
+       musb->is_active = 0;
+       devctl = musb_readb(regs, MUSB_DEVCTL);
+       devctl &= ~MUSB_DEVCTL_SESSION;
+
+       /* session started after:
+        * (a) ID-grounded irq, host mode;
+        * (b) vbus present/connect IRQ, peripheral mode;
+        * (c) peripheral initiates, using SRP
+        */
+       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+                       (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
+               musb->is_active = 1;
+       } else {
+               devctl |= MUSB_DEVCTL_SESSION;
+       }
+
+       musb_platform_enable(musb);
+       musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
 /*
  * Make the HDRC stop (disable interrupts, etc.);
  * reversible by musb_start
index 65f3917..1c5bf75 100644 (file)
@@ -503,6 +503,7 @@ static inline void musb_configure_ep0(struct musb *musb)
 extern const char musb_driver_name[];
 
 extern void musb_stop(struct musb *musb);
+extern void musb_start(struct musb *musb);
 
 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
index b19ed21..3671898 100644 (file)
@@ -1853,11 +1853,14 @@ static int musb_gadget_start(struct usb_gadget *g,
        musb->gadget_driver = driver;
 
        spin_lock_irqsave(&musb->lock, flags);
+       musb->is_active = 1;
 
        otg_set_peripheral(otg, &musb->g);
        musb->xceiv->state = OTG_STATE_B_IDLE;
        spin_unlock_irqrestore(&musb->lock, flags);
 
+       musb_start(musb);
+
        /* REVISIT:  funcall to other code, which also
         * handles power budgeting ... this way also
         * ensures HdrcStart is indirectly called.
index a523950..d1d6b83 100644 (file)
 
 #include "musb_core.h"
 
-/*
-* Program the HDRC to start (enable interrupts, dma, etc.).
-*/
-static void musb_start(struct musb *musb)
-{
-       void __iomem    *regs = musb->mregs;
-       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
-
-       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
-
-       /*  Set INT enable registers, enable interrupts */
-       musb->intrtxe = musb->epmask;
-       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
-       musb->intrrxe = musb->epmask & 0xfffe;
-       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
-       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
-
-       musb_writeb(regs, MUSB_TESTMODE, 0);
-
-       /* put into basic highspeed mode and start session */
-       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
-                                               | MUSB_POWER_HSENAB
-                                               /* ENSUSPEND wedges tusb */
-                                               /* | MUSB_POWER_ENSUSPEND */
-                                               );
-
-       musb->is_active = 0;
-       devctl = musb_readb(regs, MUSB_DEVCTL);
-       devctl &= ~MUSB_DEVCTL_SESSION;
-
-       /* session started after:
-        * (a) ID-grounded irq, host mode;
-        * (b) vbus present/connect IRQ, peripheral mode;
-        * (c) peripheral initiates, using SRP
-        */
-       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
-           (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
-               musb->is_active = 1;
-       } else {
-               devctl |= MUSB_DEVCTL_SESSION;
-       }
-
-       musb_platform_enable(musb);
-       musb_writeb(regs, MUSB_DEVCTL, devctl);
-}
-
 static void musb_port_suspend(struct musb *musb, bool do_suspend)
 {
        struct usb_otg  *otg = musb->xceiv->otg;
index 80a7104..acaee06 100644 (file)
@@ -451,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
 #define CHANGHONG_VENDOR_ID                    0x2077
 #define CHANGHONG_PRODUCT_CH690                        0x7001
 
+/* Inovia */
+#define INOVIA_VENDOR_ID                       0x20a6
+#define INOVIA_SEW858                          0x1105
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -689,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
 
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -1257,7 +1477,9 @@ static const struct usb_device_id option_ids[] = {
 
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
-       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist
+       },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -1345,6 +1567,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 760b785..c9a3569 100644 (file)
@@ -190,6 +190,7 @@ static struct usb_device_id ti_id_table_combined[] = {
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
        { }     /* terminator */
 };
index 94d75ed..18509e6 100644 (file)
@@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
                /*
                 * Many devices do not respond properly to READ_CAPACITY_16.
                 * Tell the SCSI layer to try READ_CAPACITY_10 first.
+                * However some USB 3.0 drive enclosures return capacity
+                * modulo 2TB. Those must use READ_CAPACITY_16
                 */
-               sdev->try_rc_10_first = 1;
+               if (!(us->fflags & US_FL_NEEDS_CAP16))
+                       sdev->try_rc_10_first = 1;
 
                /* assume SPC3 or latter devices support sense size > 18 */
                if (sdev->scsi_level > SCSI_SPC_2)
index c015f2c..de32cfa 100644 (file)
@@ -1925,6 +1925,13 @@ UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com> */
+UNUSUAL_DEV(  0x174c, 0x55aa, 0x0100, 0x0100,
+               "ASMedia",
+               "AS2105",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NEEDS_CAP16),
+
 /* Reported by Jesse Feddema <jdfeddema@gmail.com> */
 UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
                "Yarvik",
index a9807de..4fb7a8f 100644 (file)
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        long npage;
        int ret = 0, prot = 0;
        uint64_t mask;
+       struct vfio_dma *dma = NULL;
+       unsigned long pfn;
 
        end = map->iova + map->size;
 
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        }
 
        for (iova = map->iova; iova < end; iova += size, vaddr += size) {
-               struct vfio_dma *dma = NULL;
-               unsigned long pfn;
                long i;
 
                /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                if (npage <= 0) {
                        WARN_ON(!npage);
                        ret = (int)npage;
-                       break;
+                       goto out;
                }
 
                /* Verify pages are not already mapped */
                for (i = 0; i < npage; i++) {
                        if (iommu_iova_to_phys(iommu->domain,
                                               iova + (i << PAGE_SHIFT))) {
-                               vfio_unpin_pages(pfn, npage, prot, true);
                                ret = -EBUSY;
-                               break;
+                               goto out_unpin;
                        }
                }
 
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                if (ret) {
                        if (ret != -EBUSY ||
                            map_try_harder(iommu, iova, pfn, npage, prot)) {
-                               vfio_unpin_pages(pfn, npage, prot, true);
-                               break;
+                               goto out_unpin;
                        }
                }
 
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
                        if (!dma) {
                                iommu_unmap(iommu->domain, iova, size);
-                               vfio_unpin_pages(pfn, npage, prot, true);
                                ret = -ENOMEM;
-                               break;
+                               goto out_unpin;
                        }
 
                        dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                }
        }
 
-       if (ret) {
-               struct vfio_dma *tmp;
-               iova = map->iova;
-               size = map->size;
-               while ((tmp = vfio_find_dma(iommu, iova, size))) {
-                       int r = vfio_remove_dma_overlap(iommu, iova,
-                                                       &size, tmp);
-                       if (WARN_ON(r || !size))
-                               break;
-               }
+       WARN_ON(ret);
+       mutex_unlock(&iommu->lock);
+       return ret;
+
+out_unpin:
+       vfio_unpin_pages(pfn, npage, prot, true);
+
+out:
+       iova = map->iova;
+       size = map->size;
+       while ((dma = vfio_find_dma(iommu, iova, size))) {
+               int r = vfio_remove_dma_overlap(iommu, iova,
+                                               &size, dma);
+               if (WARN_ON(r || !size))
+                       break;
        }
 
        mutex_unlock(&iommu->lock);
index ce5221f..e663921 100644 (file)
@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                if (data_direction != DMA_NONE) {
                        ret = vhost_scsi_map_iov_to_sgl(cmd,
                                        &vq->iov[data_first], data_num,
-                                       data_direction == DMA_TO_DEVICE);
+                                       data_direction == DMA_FROM_DEVICE);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
                                goto err_free;
index c7c64f1..fa932c2 100644 (file)
@@ -613,6 +613,9 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
        sl = dev_to_w1_slave(dev);
        fops = sl->family->fops;
 
+       if (!fops)
+               return 0;
+
        switch (action) {
        case BUS_NOTIFY_ADD_DEVICE:
                /* if the family driver needs to initialize something... */
@@ -713,7 +716,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
        atomic_set(&sl->refcnt, 0);
        init_completion(&sl->released);
 
+       /* slave modules need to be loaded in a context with unlocked mutex */
+       mutex_unlock(&dev->mutex);
        request_module("w1-family-0x%0x", rn->family);
+       mutex_lock(&dev->mutex);
 
        spin_lock(&w1_flock);
        f = w1_family_registered(rn->family);
index b0ef7b0..51e3afa 100644 (file)
@@ -6437,6 +6437,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 
        if (btrfs_extent_readonly(root, disk_bytenr))
                goto out;
+       btrfs_release_path(path);
 
        /*
         * look for other files referencing this extent, if we
index 4d74335..6024877 100644 (file)
@@ -1005,9 +1005,19 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        struct buffer_head *bh;
        sector_t end_block;
        int ret = 0;            /* Will call free_more_memory() */
+       gfp_t gfp_mask;
 
-       page = find_or_create_page(inode->i_mapping, index,
-               (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+       gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS;
+       gfp_mask |= __GFP_MOVABLE;
+       /*
+        * XXX: __getblk_slow() can not really deal with failure and
+        * will endlessly loop on improvised global reclaim.  Prefer
+        * looping in the allocator rather than here, at least that
+        * code knows what it's doing.
+        */
+       gfp_mask |= __GFP_NOFAIL;
+
+       page = find_or_create_page(inode->i_mapping, index, gfp_mask);
        if (!page)
                return ret;
 
index a16b4e5..77fc5e1 100644 (file)
@@ -120,14 +120,16 @@ cifs_read_super(struct super_block *sb)
 {
        struct inode *inode;
        struct cifs_sb_info *cifs_sb;
+       struct cifs_tcon *tcon;
        int rc = 0;
 
        cifs_sb = CIFS_SB(sb);
+       tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
                sb->s_flags |= MS_POSIXACL;
 
-       if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
+       if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
        else
                sb->s_maxbytes = MAX_NON_LFS;
@@ -147,7 +149,7 @@ cifs_read_super(struct super_block *sb)
                goto out_no_root;
        }
 
-       if (cifs_sb_master_tcon(cifs_sb)->nocase)
+       if (tcon->nocase)
                sb->s_d_op = &cifs_ci_dentry_ops;
        else
                sb->s_d_op = &cifs_dentry_ops;
index a630475..08f9dfb 100644 (file)
@@ -1491,15 +1491,30 @@ struct file_notify_information {
        __u8  FileName[0];
 } __attribute__((packed));
 
-struct reparse_data {
-       __u32   ReparseTag;
-       __u16   ReparseDataLength;
+/* For IO_REPARSE_TAG_SYMLINK */
+struct reparse_symlink_data {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
        __u16   Reserved;
-       __u16   SubstituteNameOffset;
-       __u16   SubstituteNameLength;
-       __u16   PrintNameOffset;
-       __u16   PrintNameLength;
-       __u32   Flags;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __le32  Flags;
+       char    PathBuffer[0];
+} __attribute__((packed));
+
+/* For IO_REPARSE_TAG_NFS */
+#define NFS_SPECFILE_LNK       0x00000000014B4E4C
+#define NFS_SPECFILE_CHR       0x0000000000524843
+#define NFS_SPECFILE_BLK       0x00000000004B4C42
+#define NFS_SPECFILE_FIFO      0x000000004F464946
+#define NFS_SPECFILE_SOCK      0x000000004B434F53
+struct reparse_posix_data {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le64  InodeType; /* LNK, FIFO, CHR etc. */
        char    PathBuffer[0];
 } __attribute__((packed));
 
index 4baf359..ccd31ab 100644 (file)
@@ -3088,7 +3088,8 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
        bool is_unicode;
        unsigned int sub_len;
        char *sub_start;
-       struct reparse_data *reparse_buf;
+       struct reparse_symlink_data *reparse_buf;
+       struct reparse_posix_data *posix_buf;
        __u32 data_offset, data_count;
        char *end_of_smb;
 
@@ -3137,20 +3138,47 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                goto qreparse_out;
        }
        end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
-       reparse_buf = (struct reparse_data *)
+       reparse_buf = (struct reparse_symlink_data *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
        if ((char *)reparse_buf >= end_of_smb) {
                rc = -EIO;
                goto qreparse_out;
        }
-       if ((reparse_buf->PathBuffer + reparse_buf->PrintNameOffset +
-                               reparse_buf->PrintNameLength) > end_of_smb) {
+       if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
+               cifs_dbg(FYI, "NFS style reparse tag\n");
+               posix_buf =  (struct reparse_posix_data *)reparse_buf;
+
+               if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
+                       cifs_dbg(FYI, "unsupported file type 0x%llx\n",
+                                le64_to_cpu(posix_buf->InodeType));
+                       rc = -EOPNOTSUPP;
+                       goto qreparse_out;
+               }
+               is_unicode = true;
+               sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
+               if (posix_buf->PathBuffer + sub_len > end_of_smb) {
+                       cifs_dbg(FYI, "reparse buf beyond SMB\n");
+                       rc = -EIO;
+                       goto qreparse_out;
+               }
+               *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
+                               sub_len, is_unicode, nls_codepage);
+               goto qreparse_out;
+       } else if (reparse_buf->ReparseTag !=
+                       cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
+               rc = -EOPNOTSUPP;
+               goto qreparse_out;
+       }
+
+       /* Reparse tag is NTFS symlink */
+       sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
+                               reparse_buf->PathBuffer;
+       sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
+       if (sub_start + sub_len > end_of_smb) {
                cifs_dbg(FYI, "reparse buf beyond SMB\n");
                rc = -EIO;
                goto qreparse_out;
        }
-       sub_start = reparse_buf->SubstituteNameOffset + reparse_buf->PathBuffer;
-       sub_len = reparse_buf->SubstituteNameLength;
        if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
                is_unicode = true;
        else
index af847e1..651a527 100644 (file)
@@ -780,7 +780,9 @@ static const struct {
        ERRDOS, ERRnoaccess, 0xc0000290}, {
        ERRDOS, ERRbadfunc, 0xc000029c}, {
        ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
-       ERRDOS, ERRinvlevel, 0x007c0001}, };
+       ERRDOS, ERRinvlevel, 0x007c0001}, {
+       0, 0, 0 }
+};
 
 /*****************************************************************************
  Print an error message from the status code
index 352358d..e87387d 100644 (file)
@@ -500,9 +500,9 @@ select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
                                return NTLMv2;
                        if (global_secflags & CIFSSEC_MAY_NTLM)
                                return NTLM;
-                       /* Fallthrough */
                default:
-                       return Unspecified;
+                       /* Fallthrough to attempt LANMAN authentication next */
+                       break;
                }
        case CIFS_NEGFLAVOR_LANMAN:
                switch (requested) {
index eba0efd..edccb52 100644 (file)
@@ -687,6 +687,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        else
                return -EIO;
 
+       /* no need to send SMB logoff if uid already closed due to reconnect */
+       if (ses->need_reconnect)
+               goto smb2_session_already_dead;
+
        rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
        if (rc)
                return rc;
@@ -701,6 +705,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
         * No tcon so can't do
         * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
         */
+
+smb2_session_already_dead:
        return rc;
 }
 
index d952ee4..a4b2391 100644 (file)
 #define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */
 #define FSCTL_SRV_READ_HASH          0x001441BB /* BB add struct */
 
+/* See FSCC 2.1.2.5 */
 #define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
 #define IO_REPARSE_TAG_HSM           0xC0000004
 #define IO_REPARSE_TAG_SIS           0x80000007
+#define IO_REPARSE_TAG_HSM2          0x80000006
+#define IO_REPARSE_TAG_DRIVER_EXTENDER 0x80000005
+/* Used by the DFS filter. See MS-DFSC */
+#define IO_REPARSE_TAG_DFS           0x8000000A
+/* Used by the DFS filter See MS-DFSC */
+#define IO_REPARSE_TAG_DFSR          0x80000012
+#define IO_REPARSE_TAG_FILTER_MANAGER 0x8000000B
+/* See section MS-FSCC 2.1.2.4 */
+#define IO_REPARSE_TAG_SYMLINK       0xA000000C
+#define IO_REPARSE_TAG_DEDUP         0x80000013
+#define IO_REPARSE_APPXSTREAM       0xC0000014
+/* NFS symlinks, Win 8/SMB3 and later */
+#define IO_REPARSE_TAG_NFS           0x80000014
 
 /* fsctl flags */
 /* If Flags is set to this value, the request is an FSCTL not ioctl request */
index 6fdcb1b..800b938 100644 (file)
@@ -410,8 +410,13 @@ static int
 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
                      const int optype)
 {
-       return wait_for_free_credits(server, timeout,
-                               server->ops->get_credits_field(server, optype));
+       int *val;
+
+       val = server->ops->get_credits_field(server, optype);
+       /* Since an echo is already inflight, no need to wait to send another */
+       if (*val <= 0 && optype == CIFS_ECHO_OP)
+               return -EAGAIN;
+       return wait_for_free_credits(server, timeout, val);
 }
 
 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
index 4100030..20532cb 100644 (file)
@@ -1331,14 +1331,6 @@ rename_retry:
  * list is non-empty and continue searching.
  */
 
-/**
- * have_submounts - check for mounts over a dentry
- * @parent: dentry to check.
- *
- * Return true if the parent or its subdirectories contain
- * a mount point
- */
-
 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
 {
        int *ret = data;
@@ -1349,6 +1341,13 @@ static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
        return D_WALK_CONTINUE;
 }
 
+/**
+ * have_submounts - check for mounts over a dentry
+ * @parent: dentry to check.
+ *
+ * Return true if the parent or its subdirectories contain
+ * a mount point
+ */
 int have_submounts(struct dentry *parent)
 {
        int ret = 0;
index c88e355..000eae2 100644 (file)
@@ -408,7 +408,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
                                    struct page *page)
 {
        return ecryptfs_lower_header_size(crypt_stat) +
-              (page->index << PAGE_CACHE_SHIFT);
+              ((loff_t)page->index << PAGE_CACHE_SHIFT);
 }
 
 /**
index 7d52806..4725a07 100644 (file)
@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        struct ecryptfs_msg_ctx *msg_ctx;
        struct ecryptfs_message *msg = NULL;
        char *auth_tok_sig;
-       char *payload;
+       char *payload = NULL;
        size_t payload_len = 0;
        int rc;
 
@@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        }
 out:
        kfree(msg);
+       kfree(payload);
        return rc;
 }
 
index 1194b1f..f8cde46 100644 (file)
@@ -1783,7 +1783,7 @@ retry:
                d_tmpfile(dentry, inode);
                err = ext3_orphan_add(handle, inode);
                if (err)
-                       goto err_drop_inode;
+                       goto err_unlock_inode;
                mark_inode_dirty(inode);
                unlock_new_inode(inode);
        }
@@ -1791,10 +1791,9 @@ retry:
        if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
-err_drop_inode:
+err_unlock_inode:
        ext3_journal_stop(handle);
        unlock_new_inode(inode);
-       iput(inode);
        return err;
 }
 
index 1bec5a5..5a0408d 100644 (file)
@@ -2319,7 +2319,7 @@ retry:
                d_tmpfile(dentry, inode);
                err = ext4_orphan_add(handle, inode);
                if (err)
-                       goto err_drop_inode;
+                       goto err_unlock_inode;
                mark_inode_dirty(inode);
                unlock_new_inode(inode);
        }
@@ -2328,10 +2328,9 @@ retry:
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
-err_drop_inode:
+err_unlock_inode:
        ext4_journal_stop(handle);
        unlock_new_inode(inode);
-       iput(inode);
        return err;
 }
 
index abdd15a..e900ca5 100644 (file)
@@ -297,7 +297,7 @@ void flush_delayed_fput(void)
        delayed_fput(NULL);
 }
 
-static DECLARE_WORK(delayed_fput_work, delayed_fput);
+static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
 
 void fput(struct file *file)
 {
@@ -317,7 +317,7 @@ void fput(struct file *file)
                }
 
                if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
-                       schedule_work(&delayed_fput_work);
+                       schedule_delayed_work(&delayed_fput_work, 1);
        }
 }
 
index c1a3e60..7f464c5 100644 (file)
@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
 
        if (insert_inode_locked(inode) < 0) {
                rc = -EINVAL;
-               goto fail_unlock;
+               goto fail_put;
        }
 
        inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
 fail_drop:
        dquot_drop(inode);
        inode->i_flags |= S_NOQUOTA;
-fail_unlock:
        clear_nlink(inode);
        unlock_new_inode(inode);
 fail_put:
index 645268f..caa2805 100644 (file)
@@ -2294,10 +2294,11 @@ out:
  * path_mountpoint - look up a path to be umounted
  * @dfd:       directory file descriptor to start walk from
  * @name:      full pathname to walk
+ * @path:      pointer to container for result
  * @flags:     lookup flags
  *
  * Look up the given name, but don't attempt to revalidate the last component.
- * Returns 0 and "path" will be valid on success; Retuns error otherwise.
+ * Returns 0 and "path" will be valid on success; Returns error otherwise.
  */
 static int
 path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
index 9f8ef9b..8eaa1ba 100644 (file)
@@ -288,10 +288,14 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct proc_dir_entry *pde = PDE(file_inode(file));
-       int rv = -EIO;
-       unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+       unsigned long rv = -EIO;
+       unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL;
        if (use_pde(pde)) {
-               get_unmapped_area = pde->proc_fops->get_unmapped_area;
+#ifdef CONFIG_MMU
+               get_unmapped_area = current->mm->get_unmapped_area;
+#endif
+               if (pde->proc_fops->get_unmapped_area)
+                       get_unmapped_area = pde->proc_fops->get_unmapped_area;
                if (get_unmapped_area)
                        rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
                unuse_pde(pde);
index 7366e9d..390bdab 100644 (file)
@@ -941,6 +941,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
                frame = pte_pfn(pte);
                flags = PM_PRESENT;
                page = vm_normal_page(vma, addr, pte);
+               if (pte_soft_dirty(pte))
+                       flags2 |= __PM_SOFT_DIRTY;
        } else if (is_swap_pte(pte)) {
                swp_entry_t entry;
                if (pte_swp_soft_dirty(pte))
@@ -960,7 +962,7 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 
        if (page && !PageAnon(page))
                flags |= PM_FILE;
-       if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
+       if ((vma->vm_flags & VM_SOFTDIRTY))
                flags2 |= __PM_SOFT_DIRTY;
 
        *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
index 3135c25..a290157 100644 (file)
@@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
                                m->read_pos = offset;
                                retval = file->f_pos = offset;
                        }
+               } else {
+                       file->f_pos = offset;
                }
        }
        file->f_version = m->version;
index 02e113b..d901982 100644 (file)
@@ -311,7 +311,6 @@ struct acpi_device {
        unsigned int physical_node_count;
        struct list_head physical_node_list;
        struct mutex physical_node_lock;
-       struct list_head power_dependent;
        void (*remove)(struct acpi_device *);
 };
 
@@ -456,8 +455,6 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
 acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
                                    acpi_notify_handler handler);
 int acpi_pm_device_sleep_state(struct device *, int *, int);
-void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev);
-void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev);
 #else
 static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
                                               acpi_notify_handler handler,
@@ -478,10 +475,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
        return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
                m : ACPI_STATE_D0;
 }
-static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
-                                            struct device *depdev) {}
-static inline void acpi_dev_pm_remove_dependent(acpi_handle handle,
-                                               struct device *depdev) {}
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
index a6ac848..ff4e40c 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/atomic.h>
 #include <linux/compat.h>
+#include <linux/workqueue.h>
 #include <uapi/linux/filter.h>
 
 #ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
 {
        atomic_t                refcnt;
        unsigned int            len;    /* Number of filter blocks */
+       struct rcu_head         rcu;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct sock_filter *filter);
-       struct rcu_head         rcu;
-       struct sock_filter      insns[0];
+       union {
+               struct sock_filter      insns[0];
+               struct work_struct      work;
+       };
 };
 
-static inline unsigned int sk_filter_len(const struct sk_filter *fp)
+static inline unsigned int sk_filter_size(unsigned int proglen)
 {
-       return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
+       return max(sizeof(struct sk_filter),
+                  offsetof(struct sk_filter, insns[proglen]));
 }
 
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 }
 #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
 #else
+#include <linux/slab.h>
 static inline void bpf_jit_compile(struct sk_filter *fp)
 {
 }
 static inline void bpf_jit_free(struct sk_filter *fp)
 {
+       kfree(fp);
 }
 #define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
 #endif
index ecc82b3..b3e7a66 100644 (file)
@@ -137,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
-/**
- * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
- * @new: true to enable, false to disable
- *
- * Toggle whether a failed memcg charge should invoke the OOM killer
- * or just return -ENOMEM.  Returns the previous toggle state.
- *
- * NOTE: Any path that enables the OOM killer before charging must
- *       call mem_cgroup_oom_synchronize() afterward to finalize the
- *       OOM handling and clean up.
- */
-static inline bool mem_cgroup_toggle_oom(bool new)
+static inline void mem_cgroup_oom_enable(void)
 {
-       bool old;
-
-       old = current->memcg_oom.may_oom;
-       current->memcg_oom.may_oom = new;
-
-       return old;
+       WARN_ON(current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 1;
 }
 
-static inline void mem_cgroup_enable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
 {
-       bool old = mem_cgroup_toggle_oom(true);
-
-       WARN_ON(old == true);
-}
-
-static inline void mem_cgroup_disable_oom(void)
-{
-       bool old = mem_cgroup_toggle_oom(false);
-
-       WARN_ON(old == false);
+       WARN_ON(!current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 0;
 }
 
 static inline bool task_in_memcg_oom(struct task_struct *p)
 {
-       return p->memcg_oom.in_memcg_oom;
+       return p->memcg_oom.memcg;
 }
 
-bool mem_cgroup_oom_synchronize(void);
+bool mem_cgroup_oom_synchronize(bool wait);
 
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
@@ -402,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
 {
 }
 
-static inline bool mem_cgroup_toggle_oom(bool new)
-{
-       return false;
-}
-
-static inline void mem_cgroup_enable_oom(void)
+static inline void mem_cgroup_oom_enable(void)
 {
 }
 
-static inline void mem_cgroup_disable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
 {
 }
 
@@ -420,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
        return false;
 }
 
-static inline bool mem_cgroup_oom_synchronize(void)
+static inline bool mem_cgroup_oom_synchronize(bool wait)
 {
        return false;
 }
index 68029b3..5eb4e31 100644 (file)
@@ -181,7 +181,7 @@ enum {
        MLX5_DEV_CAP_FLAG_TLP_HINTS     = 1LL << 39,
        MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
        MLX5_DEV_CAP_FLAG_DCT           = 1LL << 41,
-       MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 1LL << 46,
+       MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 3LL << 46,
 };
 
 enum {
@@ -417,7 +417,7 @@ struct mlx5_init_seg {
        struct health_buffer    health;
        __be32                  rsvd2[884];
        __be32                  health_counter;
-       __be32                  rsvd3[1023];
+       __be32                  rsvd3[1019];
        __be64                  ieee1588_clk;
        __be32                  ieee1588_clk_type;
        __be32                  clr_intx;
index 8888381..6b8c496 100644 (file)
@@ -82,7 +82,7 @@ enum {
 };
 
 enum {
-       MLX5_MAX_EQ_NAME        = 20
+       MLX5_MAX_EQ_NAME        = 32
 };
 
 enum {
@@ -747,8 +747,7 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
 
 enum {
        MLX5_PROF_MASK_QP_SIZE          = (u64)1 << 0,
-       MLX5_PROF_MASK_CMDIF_CSUM       = (u64)1 << 1,
-       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 2,
+       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 1,
 };
 
 enum {
@@ -758,7 +757,6 @@ enum {
 struct mlx5_profile {
        u64     mask;
        u32     log_max_qp;
-       int     cmdif_csum;
        struct {
                int     size;
                int     limit;
index 3de49ac..25f5d2d 100644 (file)
@@ -2264,11 +2264,12 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 }
 
 #ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
+extern int netif_set_xps_queue(struct net_device *dev,
+                              const struct cpumask *mask,
                               u16 index);
 #else
 static inline int netif_set_xps_queue(struct net_device *dev,
-                                     struct cpumask *mask,
+                                     const struct cpumask *mask,
                                      u16 index)
 {
        return 0;
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
deleted file mode 100644 (file)
index c841282..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __OF_RESERVED_MEM_H
-#define __OF_RESERVED_MEM_H
-
-#ifdef CONFIG_OF_RESERVED_MEM
-void of_reserved_mem_device_init(struct device *dev);
-void of_reserved_mem_device_release(struct device *dev);
-void early_init_dt_scan_reserved_mem(void);
-#else
-static inline void of_reserved_mem_device_init(struct device *dev) { }
-static inline void of_reserved_mem_device_release(struct device *dev) { }
-static inline void early_init_dt_scan_reserved_mem(void) { }
-#endif
-
-#endif /* __OF_RESERVED_MEM_H */
index 6682da3..e27baee 100644 (file)
@@ -1394,11 +1394,10 @@ struct task_struct {
        } memcg_batch;
        unsigned int memcg_kmem_skip_account;
        struct memcg_oom_info {
+               struct mem_cgroup *memcg;
+               gfp_t gfp_mask;
+               int order;
                unsigned int may_oom:1;
-               unsigned int in_memcg_oom:1;
-               unsigned int oom_locked:1;
-               int wakeups;
-               struct mem_cgroup *wait_on_memcg;
        } memcg_oom;
 #endif
 #ifdef CONFIG_UPROBES
diff --git a/include/linux/tc_act/tc_defact.h b/include/linux/tc_act/tc_defact.h
deleted file mode 100644 (file)
index 6f65d07..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __LINUX_TC_DEF_H
-#define __LINUX_TC_DEF_H
-
-#include <linux/pkt_cls.h>
-
-struct tc_defact {
-       tc_gen;
-};
-                                                                                
-enum {
-       TCA_DEF_UNSPEC,
-       TCA_DEF_TM,
-       TCA_DEF_PARMS,
-       TCA_DEF_DATA,
-       __TCA_DEF_MAX
-};
-#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
-
-#endif
index f9a7e7b..11d85b9 100644 (file)
@@ -12,7 +12,7 @@ struct usb_phy_gen_xceiv_platform_data {
        unsigned int needs_reset:1;
 };
 
-#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
+#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
 /* sometimes transceivers are accessed only through e.g. ULPI */
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
index bf99cd0..6303568 100644 (file)
@@ -66,7 +66,9 @@
        US_FLAG(INITIAL_READ10, 0x00100000)                     \
                /* Initial READ(10) (and others) must be retried */     \
        US_FLAG(WRITE_CACHE,    0x00200000)                     \
-               /* Write Cache status is not available */
+               /* Write Cache status is not available */       \
+       US_FLAG(NEEDS_CAP16,    0x00400000)
+               /* cannot handle READ_CAPACITY_10 */
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index 7fe2822..512cdc2 100644 (file)
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
 
 struct yamdrv_ioctl_mcs {
        int cmd;
-       int bitrate;
+       unsigned int bitrate;
        unsigned char bits[YAM_FPGA_SIZE];
 };
index a7a683e..a8c2ef6 100644 (file)
@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        unsigned char err_offset = 0;
        u8 opt_len = opt[1];
        u8 opt_iter;
+       u8 tag_len;
 
        if (opt_len < 8) {
                err_offset = 1;
@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
-               if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+               tag_len = opt[opt_iter + 1];
+               if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
                        goto out;
                }
-               opt_iter += opt[opt_iter + 1];
+               opt_iter += tag_len;
        }
 
 out:
index 3bc4865..3c4c944 100644 (file)
@@ -479,10 +479,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
 {
        return dst_orig;
 } 
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+       return NULL;
+}
+
 #else
 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                     const struct flowi *fl, struct sock *sk,
                                     int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+       return dst->xfrm;
+}
 #endif
 
 #endif /* _NET_DST_H */
index f525e70..2b786b7 100644 (file)
@@ -194,11 +194,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
               skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
 }
 
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
 {
-       if (rt->rt6i_flags & RTF_GATEWAY)
-               return &rt->rt6i_gateway;
-       return dest;
+       return &rt->rt6i_gateway;
 }
 
 #endif
index d0d11df..807d6b7 100644 (file)
@@ -133,7 +133,7 @@ struct ieee802154_ops {
 
 /* Basic interface to register ieee802154 device */
 struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops);
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
 void ieee802154_free_device(struct ieee802154_dev *dev);
 int ieee802154_register_device(struct ieee802154_dev *dev);
 void ieee802154_unregister_device(struct ieee802154_dev *dev);
index 1d37a80..808cbc2 100644 (file)
@@ -1630,16 +1630,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 {
-       unsigned int size = sk_filter_len(fp);
-
-       atomic_sub(size, &sk->sk_omem_alloc);
+       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
        sk_filter_release(fp);
 }
 
 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
        atomic_inc(&fp->refcnt);
-       atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
 }
 
 /*
index fe66533..fb0a312 100644 (file)
@@ -68,6 +68,7 @@ struct rsnd_scu_platform_info {
  *
  * A : generation
  */
+#define RSND_GEN_MASK  (0xF << 0)
 #define RSND_GEN1      (1 << 0) /* fixme */
 #define RSND_GEN2      (2 << 0) /* fixme */
 
index aef8fc3..da9cc0f 100644 (file)
@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
        ),
 
        TP_fast_assign(
-               __entry->unpacked_lun   = cmd->se_lun->unpacked_lun;
+               __entry->unpacked_lun   = cmd->orig_fe_lun;
                __entry->opcode         = cmd->t_task_cdb[0];
                __entry->data_length    = cmd->data_length;
                __entry->task_attribute = cmd->sam_task_attr;
@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
        ),
 
        TP_fast_assign(
-               __entry->unpacked_lun   = cmd->se_lun->unpacked_lun;
+               __entry->unpacked_lun   = cmd->orig_fe_lun;
                __entry->opcode         = cmd->t_task_cdb[0];
                __entry->data_length    = cmd->data_length;
                __entry->task_attribute = cmd->sam_task_attr;
index 5508117..28acbaf 100644 (file)
@@ -223,6 +223,8 @@ struct drm_mode_get_connector {
        __u32 connection;
        __u32 mm_width, mm_height; /**< HxW in millimeters */
        __u32 subpixel;
+
+       __u32 pad;
 };
 
 #define DRM_MODE_PROP_PENDING  (1<<0)
index 0623ec4..56f1216 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += tc_csum.h
+header-y += tc_defact.h
 header-y += tc_gact.h
 header-y += tc_ipt.h
 header-y += tc_mirred.h
diff --git a/include/uapi/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h
new file mode 100644 (file)
index 0000000..17dddb4
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef __LINUX_TC_DEF_H
+#define __LINUX_TC_DEF_H
+
+#include <linux/pkt_cls.h>
+
+struct tc_defact {
+       tc_gen;
+};
+
+enum {
+       TCA_DEF_UNSPEC,
+       TCA_DEF_TM,
+       TCA_DEF_PARMS,
+       TCA_DEF_DATA,
+       __TCA_DEF_MAX
+};
+#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
+
+#endif
index 0b233c5..e3ddd86 100644 (file)
@@ -87,8 +87,10 @@ enum {
        IB_USER_VERBS_CMD_CLOSE_XRCD,
        IB_USER_VERBS_CMD_CREATE_XSRQ,
        IB_USER_VERBS_CMD_OPEN_QP,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_CMD_DESTROY_FLOW
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 };
 
 /*
@@ -126,6 +128,7 @@ struct ib_uverbs_cmd_hdr {
        __u16 out_words;
 };
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 struct ib_uverbs_cmd_hdr_ex {
        __u32 command;
        __u16 in_words;
@@ -134,6 +137,7 @@ struct ib_uverbs_cmd_hdr_ex {
        __u16 provider_out_words;
        __u32 cmd_hdr_reserved;
 };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 struct ib_uverbs_get_context {
        __u64 response;
@@ -696,6 +700,7 @@ struct ib_uverbs_detach_mcast {
        __u64 driver_data[0];
 };
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 struct ib_kern_eth_filter {
        __u8  dst_mac[6];
        __u8  src_mac[6];
@@ -780,6 +785,7 @@ struct ib_uverbs_destroy_flow  {
        __u32 comp_mask;
        __u32 flow_handle;
 };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 struct ib_uverbs_create_srq {
        __u64 response;
index 8c4f59b..db9d241 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1282,6 +1282,12 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 
        sem_lock(sma, NULL, -1);
 
+       if (sma->sem_perm.deleted) {
+               sem_unlock(sma, -1);
+               rcu_read_unlock();
+               return -EIDRM;
+       }
+
        curr = &sma->sem_base[semnum];
 
        ipc_assert_locked_object(&sma->sem_perm);
@@ -1336,12 +1342,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                int i;
 
                sem_lock(sma, NULL, -1);
+               if (sma->sem_perm.deleted) {
+                       err = -EIDRM;
+                       goto out_unlock;
+               }
                if(nsems > SEMMSL_FAST) {
                        if (!ipc_rcu_getref(sma)) {
-                               sem_unlock(sma, -1);
-                               rcu_read_unlock();
                                err = -EIDRM;
-                               goto out_free;
+                               goto out_unlock;
                        }
                        sem_unlock(sma, -1);
                        rcu_read_unlock();
@@ -1354,10 +1362,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                        rcu_read_lock();
                        sem_lock_and_putref(sma);
                        if (sma->sem_perm.deleted) {
-                               sem_unlock(sma, -1);
-                               rcu_read_unlock();
                                err = -EIDRM;
-                               goto out_free;
+                               goto out_unlock;
                        }
                }
                for (i = 0; i < sma->sem_nsems; i++)
@@ -1375,8 +1381,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                struct sem_undo *un;
 
                if (!ipc_rcu_getref(sma)) {
-                       rcu_read_unlock();
-                       return -EIDRM;
+                       err = -EIDRM;
+                       goto out_rcu_wakeup;
                }
                rcu_read_unlock();
 
@@ -1404,10 +1410,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                rcu_read_lock();
                sem_lock_and_putref(sma);
                if (sma->sem_perm.deleted) {
-                       sem_unlock(sma, -1);
-                       rcu_read_unlock();
                        err = -EIDRM;
-                       goto out_free;
+                       goto out_unlock;
                }
 
                for (i = 0; i < nsems; i++)
@@ -1431,6 +1435,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                goto out_rcu_wakeup;
 
        sem_lock(sma, NULL, -1);
+       if (sma->sem_perm.deleted) {
+               err = -EIDRM;
+               goto out_unlock;
+       }
        curr = &sma->sem_base[semnum];
 
        switch (cmd) {
@@ -1836,6 +1844,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        if (error)
                goto out_rcu_wakeup;
 
+       error = -EIDRM;
+       locknum = sem_lock(sma, sops, nsops);
+       if (sma->sem_perm.deleted)
+               goto out_unlock_free;
        /*
         * semid identifiers are not unique - find_alloc_undo may have
         * allocated an undo structure, it was invalidated by an RMID
@@ -1843,8 +1855,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
         * This case can be detected checking un->semid. The existence of
         * "un" itself is guaranteed by rcu.
         */
-       error = -EIDRM;
-       locknum = sem_lock(sma, sops, nsops);
        if (un && un->semid == -1)
                goto out_unlock_free;
 
@@ -2057,6 +2067,12 @@ void exit_sem(struct task_struct *tsk)
                }
 
                sem_lock(sma, NULL, -1);
+               /* exit_sem raced with IPC_RMID, nothing to do */
+               if (sma->sem_perm.deleted) {
+                       sem_unlock(sma, -1);
+                       rcu_read_unlock();
+                       continue;
+               }
                un = __lookup_undo(ulp, semid);
                if (un == NULL) {
                        /* exit_sem raced with IPC_RMID+semget() that created
index fdb8ae7..7684f41 100644 (file)
  *            Pavel Emelianov <xemul@openvz.org>
  *
  * General sysv ipc locking scheme:
- *  when doing ipc id lookups, take the ids->rwsem
- *      rcu_read_lock()
- *          obtain the ipc object (kern_ipc_perm)
- *          perform security, capabilities, auditing and permission checks, etc.
- *          acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
- *             perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
+ *     rcu_read_lock()
+ *          obtain the ipc object (kern_ipc_perm) by looking up the id in an idr
+ *         tree.
+ *         - perform initial checks (capabilities, auditing and permission,
+ *           etc).
+ *         - perform read-only operations, such as STAT, INFO commands.
+ *           acquire the ipc lock (kern_ipc_perm.lock) through
+ *           ipc_lock_object()
+ *             - perform data updates, such as SET, RMID commands and
+ *               mechanism-specific operations (semop/semtimedop,
+ *               msgsnd/msgrcv, shmat/shmdt).
+ *         drop the ipc lock, through ipc_unlock_object().
+ *     rcu_read_unlock()
+ *
+ *  The ids->rwsem must be taken when:
+ *     - creating, removing and iterating the existing entries in ipc
+ *       identifier sets.
+ *     - iterating through files under /proc/sysvipc/
+ *
+ *  Note that sems have a special fast path that avoids kern_ipc_perm.lock -
+ *  see sem_lock().
  */
 
 #include <linux/mm.h>
index 2418b6e..8bd9cfd 100644 (file)
@@ -2039,7 +2039,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
 
                /* @tsk either already exited or can't exit until the end */
                if (tsk->flags & PF_EXITING)
-                       continue;
+                       goto next;
 
                /* as per above, nr_threads may decrease, but not increase. */
                BUG_ON(i >= group_size);
@@ -2047,7 +2047,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                ent.cgrp = task_cgroup_from_root(tsk, root);
                /* nothing to do if this task is already in the cgroup */
                if (ent.cgrp == cgrp)
-                       continue;
+                       goto next;
                /*
                 * saying GFP_ATOMIC has no effect here because we did prealloc
                 * earlier, but it's good form to communicate our expectations.
@@ -2055,7 +2055,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
                BUG_ON(retval != 0);
                i++;
-
+       next:
                if (!threadgroup)
                        break;
        } while_each_thread(leader, tsk);
@@ -3188,11 +3188,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* if first iteration, visit the leftmost descendant */
-       if (!pos) {
-               next = css_leftmost_descendant(root);
-               return next != root ? next : NULL;
-       }
+       /* if first iteration, visit leftmost descendant which may be @root */
+       if (!pos)
+               return css_leftmost_descendant(root);
 
        /* if we visited @root, we're done */
        if (pos == root)
index d49a9d2..953c143 100644 (file)
@@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        if (ret)
                return -EFAULT;
 
+       /* disabled for now */
+       if (attr->mmap2)
+               return -EINVAL;
+
        if (attr->__reserved_1)
                return -EINVAL;
 
index 6d647ae..d24105b 100644 (file)
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
 static __always_inline int __sched
 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    struct lockdep_map *nest_lock, unsigned long ip,
-                   struct ww_acquire_ctx *ww_ctx)
+                   struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
        struct task_struct *task = current;
        struct mutex_waiter waiter;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                struct task_struct *owner;
                struct mspin_node  node;
 
-               if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+               if (use_ww_ctx && ww_ctx->acquired > 0) {
                        struct ww_mutex *ww;
 
                        ww = container_of(lock, struct ww_mutex, base);
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                if ((atomic_read(&lock->count) == 1) &&
                    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
                        lock_acquired(&lock->dep_map, ip);
-                       if (!__builtin_constant_p(ww_ctx == NULL)) {
+                       if (use_ww_ctx) {
                                struct ww_mutex *ww;
                                ww = container_of(lock, struct ww_mutex, base);
 
@@ -551,7 +551,7 @@ slowpath:
                        goto err;
                }
 
-               if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+               if (use_ww_ctx && ww_ctx->acquired > 0) {
                        ret = __mutex_lock_check_stamp(lock, ww_ctx);
                        if (ret)
                                goto err;
@@ -575,7 +575,7 @@ skip_wait:
        lock_acquired(&lock->dep_map, ip);
        mutex_set_owner(lock);
 
-       if (!__builtin_constant_p(ww_ctx == NULL)) {
+       if (use_ww_ctx) {
                struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
                struct mutex_waiter *cur;
 
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 {
        might_sleep();
        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           subclass, NULL, _RET_IP_, NULL);
+                           subclass, NULL, _RET_IP_, NULL, 0);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 {
        might_sleep();
        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           0, nest, _RET_IP_, NULL);
+                           0, nest, _RET_IP_, NULL, 0);
 }
 
 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 {
        might_sleep();
        return __mutex_lock_common(lock, TASK_KILLABLE,
-                                  subclass, NULL, _RET_IP_, NULL);
+                                  subclass, NULL, _RET_IP_, NULL, 0);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
        might_sleep();
        return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
-                                  subclass, NULL, _RET_IP_, NULL);
+                                  subclass, NULL, _RET_IP_, NULL, 0);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
        might_sleep();
        ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
-                                  0, &ctx->dep_map, _RET_IP_, ctx);
+                                  0, &ctx->dep_map, _RET_IP_, ctx, 1);
        if (!ret && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
        might_sleep();
        ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
-                                 0, &ctx->dep_map, _RET_IP_, ctx);
+                                 0, &ctx->dep_map, _RET_IP_, ctx, 1);
 
        if (!ret && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
        struct mutex *lock = container_of(lock_count, struct mutex, count);
 
        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
-                           NULL, _RET_IP_, NULL);
+                           NULL, _RET_IP_, NULL, 0);
 }
 
 static noinline int __sched
 __mutex_lock_killable_slowpath(struct mutex *lock)
 {
        return __mutex_lock_common(lock, TASK_KILLABLE, 0,
-                                  NULL, _RET_IP_, NULL);
+                                  NULL, _RET_IP_, NULL, 0);
 }
 
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(struct mutex *lock)
 {
        return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, NULL);
+                                  NULL, _RET_IP_, NULL, 0);
 }
 
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx);
+                                  NULL, _RET_IP_, ctx, 1);
 }
 
 static noinline int __sched
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
                                            struct ww_acquire_ctx *ctx)
 {
        return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx);
+                                  NULL, _RET_IP_, ctx, 1);
 }
 
 #endif
index c9c759d..0121dab 100644 (file)
@@ -846,7 +846,7 @@ static int software_resume(void)
        goto Finish;
 }
 
-late_initcall(software_resume);
+late_initcall_sync(software_resume);
 
 
 static const char * const hibernation_modes[] = {
index 38959c8..662c579 100644 (file)
@@ -33,29 +33,64 @@ struct ce_unbind {
        int res;
 };
 
-/**
- * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
- * @latch:     value to convert
- * @evt:       pointer to clock event device descriptor
- *
- * Math helper, returns latch value converted to nanoseconds (bound checked)
- */
-u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
+                       bool ismax)
 {
        u64 clc = (u64) latch << evt->shift;
+       u64 rnd;
 
        if (unlikely(!evt->mult)) {
                evt->mult = 1;
                WARN_ON(1);
        }
+       rnd = (u64) evt->mult - 1;
+
+       /*
+        * Upper bound sanity check. If the backwards conversion is
+        * not equal latch, we know that the above shift overflowed.
+        */
+       if ((clc >> evt->shift) != (u64)latch)
+               clc = ~0ULL;
+
+       /*
+        * Scaled math oddities:
+        *
+        * For mult <= (1 << shift) we can safely add mult - 1 to
+        * prevent integer rounding loss. So the backwards conversion
+        * from nsec to device ticks will be correct.
+        *
+        * For mult > (1 << shift), i.e. device frequency is > 1GHz we
+        * need to be careful. Adding mult - 1 will result in a value
+        * which when converted back to device ticks can be larger
+        * than latch by up to (mult - 1) >> shift. For the min_delta
+        * calculation we still want to apply this in order to stay
+        * above the minimum device ticks limit. For the upper limit
+        * we would end up with a latch value larger than the upper
+        * limit of the device, so we omit the add to stay below the
+        * device upper boundary.
+        *
+        * Also omit the add if it would overflow the u64 boundary.
+        */
+       if ((~0ULL - clc > rnd) &&
+           (!ismax || evt->mult <= (1U << evt->shift)))
+               clc += rnd;
 
        do_div(clc, evt->mult);
-       if (clc < 1000)
-               clc = 1000;
-       if (clc > KTIME_MAX)
-               clc = KTIME_MAX;
 
-       return clc;
+       /* Deltas less than 1usec are pointless noise */
+       return clc > 1000 ? clc : 1000;
+}
+
+/**
+ * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
+ * @latch:     value to convert
+ * @evt:       pointer to clock event device descriptor
+ *
+ * Math helper, returns latch value converted to nanoseconds (bound checked)
+ */
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+{
+       return cev_delta2ns(latch, evt, false);
 }
 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
 
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
                sec = 600;
 
        clockevents_calc_mult_shift(dev, freq, sec);
-       dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
-       dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
+       dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
+       dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
 }
 
 /**
index 7deeb62..1a53d49 100644 (file)
@@ -53,6 +53,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
        ref->release = release;
        return 0;
 }
+EXPORT_SYMBOL_GPL(percpu_ref_init);
 
 /**
  * percpu_ref_cancel_init - cancel percpu_ref_init()
@@ -84,6 +85,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref)
                free_percpu(ref->pcpu_count);
        }
 }
+EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
 
 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 {
@@ -156,3 +158,4 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 
        call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
 }
+EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
index 1e6aec4..ae4846f 100644 (file)
@@ -1616,7 +1616,6 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = mapping->host;
        pgoff_t offset = vmf->pgoff;
        struct page *page;
-       bool memcg_oom;
        pgoff_t size;
        int ret = 0;
 
@@ -1625,11 +1624,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        /*
-        * Do we have something in the page cache already?  Either
-        * way, try readahead, but disable the memcg OOM killer for it
-        * as readahead is optional and no errors are propagated up
-        * the fault stack.  The OOM killer is enabled while trying to
-        * instantiate the faulting page individually below.
+        * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
        if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
@@ -1637,14 +1632,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
-               memcg_oom = mem_cgroup_toggle_oom(false);
                do_async_mmap_readahead(vma, ra, file, page, offset);
-               mem_cgroup_toggle_oom(memcg_oom);
        } else if (!page) {
                /* No page in the page cache at all */
-               memcg_oom = mem_cgroup_toggle_oom(false);
                do_sync_mmap_readahead(vma, ra, file, offset);
-               mem_cgroup_toggle_oom(memcg_oom);
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
index 7489884..610e3df 100644 (file)
@@ -2697,6 +2697,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
 
        mmun_start = haddr;
        mmun_end   = haddr + HPAGE_PMD_SIZE;
+again:
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2719,7 +2720,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
        split_huge_page(page);
 
        put_page(page);
-       BUG_ON(pmd_trans_huge(*pmd));
+
+       /*
+        * We don't always have down_write of mmap_sem here: a racing
+        * do_huge_pmd_wp_page() might have copied-on-write to another
+        * huge page before our split_huge_page() got the anon_vma lock.
+        */
+       if (unlikely(pmd_trans_huge(*pmd)))
+               goto again;
 }
 
 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
index b49579c..0b7656e 100644 (file)
@@ -653,6 +653,7 @@ static void free_huge_page(struct page *page)
        BUG_ON(page_count(page));
        BUG_ON(page_mapcount(page));
        restore_reserve = PagePrivate(page);
+       ClearPagePrivate(page);
 
        spin_lock(&hugetlb_lock);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
@@ -695,8 +696,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        /* we rely on prep_new_huge_page to set the destructor */
        set_compound_order(page, order);
        __SetPageHead(page);
+       __ClearPageReserved(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               /*
+                * For gigantic hugepages allocated through bootmem at
+                * boot, it's safer to be consistent with the not-gigantic
+                * hugepages and clear the PG_reserved bit from all tail pages
+                * too.  Otherwse drivers using get_user_pages() to access tail
+                * pages may get the reference counting wrong if they see
+                * PG_reserved set on a tail page (despite the head page not
+                * having PG_reserved set).  Enforcing this consistency between
+                * head and tail pages allows drivers to optimize away a check
+                * on the head page when they need know if put_page() is needed
+                * after get_user_pages().
+                */
+               __ClearPageReserved(p);
                set_page_count(p, 0);
                p->first_page = page;
        }
@@ -1329,9 +1344,9 @@ static void __init gather_bootmem_prealloc(void)
 #else
                page = virt_to_page(m);
 #endif
-               __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
+               WARN_ON(PageReserved(page));
                prep_new_huge_page(h, page, page_to_nid(page));
                /*
                 * If we had gigantic hugepages allocated at boot time, we need
index 1c52ddb..34d3ca9 100644 (file)
@@ -866,6 +866,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
        unsigned long val = 0;
        int cpu;
 
+       get_online_cpus();
        for_each_online_cpu(cpu)
                val += per_cpu(memcg->stat->events[idx], cpu);
 #ifdef CONFIG_HOTPLUG_CPU
@@ -873,6 +874,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
        val += memcg->nocpu_base.events[idx];
        spin_unlock(&memcg->pcp_counter_lock);
 #endif
+       put_online_cpus();
        return val;
 }
 
@@ -2159,110 +2161,59 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
                memcg_wakeup_oom(memcg);
 }
 
-/*
- * try to call OOM killer
- */
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       bool locked;
-       int wakeups;
-
        if (!current->memcg_oom.may_oom)
                return;
-
-       current->memcg_oom.in_memcg_oom = 1;
-
        /*
-        * As with any blocking lock, a contender needs to start
-        * listening for wakeups before attempting the trylock,
-        * otherwise it can miss the wakeup from the unlock and sleep
-        * indefinitely.  This is just open-coded because our locking
-        * is so particular to memcg hierarchies.
+        * We are in the middle of the charge context here, so we
+        * don't want to block when potentially sitting on a callstack
+        * that holds all kinds of filesystem and mm locks.
+        *
+        * Also, the caller may handle a failed allocation gracefully
+        * (like optional page cache readahead) and so an OOM killer
+        * invocation might not even be necessary.
+        *
+        * That's why we don't do anything here except remember the
+        * OOM context and then deal with it at the end of the page
+        * fault when the stack is unwound, the locks are released,
+        * and when we know whether the fault was overall successful.
         */
-       wakeups = atomic_read(&memcg->oom_wakeups);
-       mem_cgroup_mark_under_oom(memcg);
-
-       locked = mem_cgroup_oom_trylock(memcg);
-
-       if (locked)
-               mem_cgroup_oom_notify(memcg);
-
-       if (locked && !memcg->oom_kill_disable) {
-               mem_cgroup_unmark_under_oom(memcg);
-               mem_cgroup_out_of_memory(memcg, mask, order);
-               mem_cgroup_oom_unlock(memcg);
-               /*
-                * There is no guarantee that an OOM-lock contender
-                * sees the wakeups triggered by the OOM kill
-                * uncharges.  Wake any sleepers explicitely.
-                */
-               memcg_oom_recover(memcg);
-       } else {
-               /*
-                * A system call can just return -ENOMEM, but if this
-                * is a page fault and somebody else is handling the
-                * OOM already, we need to sleep on the OOM waitqueue
-                * for this memcg until the situation is resolved.
-                * Which can take some time because it might be
-                * handled by a userspace task.
-                *
-                * However, this is the charge context, which means
-                * that we may sit on a large call stack and hold
-                * various filesystem locks, the mmap_sem etc. and we
-                * don't want the OOM handler to deadlock on them
-                * while we sit here and wait.  Store the current OOM
-                * context in the task_struct, then return -ENOMEM.
-                * At the end of the page fault handler, with the
-                * stack unwound, pagefault_out_of_memory() will check
-                * back with us by calling
-                * mem_cgroup_oom_synchronize(), possibly putting the
-                * task to sleep.
-                */
-               current->memcg_oom.oom_locked = locked;
-               current->memcg_oom.wakeups = wakeups;
-               css_get(&memcg->css);
-               current->memcg_oom.wait_on_memcg = memcg;
-       }
+       css_get(&memcg->css);
+       current->memcg_oom.memcg = memcg;
+       current->memcg_oom.gfp_mask = mask;
+       current->memcg_oom.order = order;
 }
 
 /**
  * mem_cgroup_oom_synchronize - complete memcg OOM handling
+ * @handle: actually kill/wait or just clean up the OOM state
  *
- * This has to be called at the end of a page fault if the the memcg
- * OOM handler was enabled and the fault is returning %VM_FAULT_OOM.
+ * This has to be called at the end of a page fault if the memcg OOM
+ * handler was enabled.
  *
- * Memcg supports userspace OOM handling, so failed allocations must
+ * Memcg supports userspace OOM handling where failed allocations must
  * sleep on a waitqueue until the userspace task resolves the
  * situation.  Sleeping directly in the charge context with all kinds
  * of locks held is not a good idea, instead we remember an OOM state
  * in the task and mem_cgroup_oom_synchronize() has to be called at
- * the end of the page fault to put the task to sleep and clean up the
- * OOM state.
+ * the end of the page fault to complete the OOM handling.
  *
  * Returns %true if an ongoing memcg OOM situation was detected and
- * finalized, %false otherwise.
+ * completed, %false otherwise.
  */
-bool mem_cgroup_oom_synchronize(void)
+bool mem_cgroup_oom_synchronize(bool handle)
 {
+       struct mem_cgroup *memcg = current->memcg_oom.memcg;
        struct oom_wait_info owait;
-       struct mem_cgroup *memcg;
+       bool locked;
 
        /* OOM is global, do not handle */
-       if (!current->memcg_oom.in_memcg_oom)
-               return false;
-
-       /*
-        * We invoked the OOM killer but there is a chance that a kill
-        * did not free up any charges.  Everybody else might already
-        * be sleeping, so restart the fault and keep the rampage
-        * going until some charges are released.
-        */
-       memcg = current->memcg_oom.wait_on_memcg;
        if (!memcg)
-               goto out;
+               return false;
 
-       if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
-               goto out_memcg;
+       if (!handle)
+               goto cleanup;
 
        owait.memcg = memcg;
        owait.wait.flags = 0;
@@ -2271,13 +2222,25 @@ bool mem_cgroup_oom_synchronize(void)
        INIT_LIST_HEAD(&owait.wait.task_list);
 
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
-       /* Only sleep if we didn't miss any wakeups since OOM */
-       if (atomic_read(&memcg->oom_wakeups) == current->memcg_oom.wakeups)
+       mem_cgroup_mark_under_oom(memcg);
+
+       locked = mem_cgroup_oom_trylock(memcg);
+
+       if (locked)
+               mem_cgroup_oom_notify(memcg);
+
+       if (locked && !memcg->oom_kill_disable) {
+               mem_cgroup_unmark_under_oom(memcg);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
+               mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
+                                        current->memcg_oom.order);
+       } else {
                schedule();
-       finish_wait(&memcg_oom_waitq, &owait.wait);
-out_memcg:
-       mem_cgroup_unmark_under_oom(memcg);
-       if (current->memcg_oom.oom_locked) {
+               mem_cgroup_unmark_under_oom(memcg);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
+       }
+
+       if (locked) {
                mem_cgroup_oom_unlock(memcg);
                /*
                 * There is no guarantee that an OOM-lock contender
@@ -2286,10 +2249,9 @@ out_memcg:
                 */
                memcg_oom_recover(memcg);
        }
+cleanup:
+       current->memcg_oom.memcg = NULL;
        css_put(&memcg->css);
-       current->memcg_oom.wait_on_memcg = NULL;
-out:
-       current->memcg_oom.in_memcg_oom = 0;
        return true;
 }
 
@@ -2703,6 +2665,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                     || fatal_signal_pending(current)))
                goto bypass;
 
+       if (unlikely(task_in_memcg_oom(current)))
+               goto bypass;
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -2801,6 +2766,8 @@ done:
        return 0;
 nomem:
        *ptr = NULL;
+       if (gfp_mask & __GFP_NOFAIL)
+               return 0;
        return -ENOMEM;
 bypass:
        *ptr = root_mem_cgroup;
index ca00039..1311f26 100644 (file)
@@ -837,6 +837,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                         */
                                        make_migration_entry_read(&entry);
                                        pte = swp_entry_to_pte(entry);
+                                       if (pte_swp_soft_dirty(*src_pte))
+                                               pte = pte_swp_mksoft_dirty(pte);
                                        set_pte_at(src_mm, addr, src_pte, pte);
                                }
                        }
@@ -3863,15 +3865,21 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * space.  Kernel faults are handled more gracefully.
         */
        if (flags & FAULT_FLAG_USER)
-               mem_cgroup_enable_oom();
+               mem_cgroup_oom_enable();
 
        ret = __handle_mm_fault(mm, vma, address, flags);
 
-       if (flags & FAULT_FLAG_USER)
-               mem_cgroup_disable_oom();
-
-       if (WARN_ON(task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)))
-               mem_cgroup_oom_synchronize();
+       if (flags & FAULT_FLAG_USER) {
+               mem_cgroup_oom_disable();
+                /*
+                 * The task may have entered a memcg OOM situation but
+                 * if the allocation error was handled gracefully (no
+                 * VM_FAULT_OOM), there is no need to kill anything.
+                 * Just clean up the OOM state peacefully.
+                 */
+                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+                        mem_cgroup_oom_synchronize(false);
+       }
 
        return ret;
 }
index a26bccd..7a7325e 100644 (file)
@@ -161,6 +161,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
 
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+       if (pte_swp_soft_dirty(*ptep))
+               pte = pte_mksoft_dirty(pte);
        if (is_write_migration_entry(entry))
                pte = pte_mkwrite(pte);
 #ifdef CONFIG_HUGETLB_PAGE
index 94722a4..a3af058 100644 (file)
@@ -94,13 +94,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 
                        if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
                                /*
                                 * A protection check is difficult so
                                 * just be safe and disable write
                                 */
                                make_migration_entry_read(&entry);
-                               set_pte_at(mm, addr, pte,
-                                       swp_entry_to_pte(entry));
+                               newpte = swp_entry_to_pte(entry);
+                               if (pte_swp_soft_dirty(oldpte))
+                                       newpte = pte_swp_mksoft_dirty(newpte);
+                               set_pte_at(mm, addr, pte, newpte);
                        }
                        pages++;
                }
index 91b13d6..0843feb 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
 
 #include "internal.h"
 
@@ -63,10 +62,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                return NULL;
 
        pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd) {
-               pud_free(mm, pud);
+       if (!pmd)
                return NULL;
-       }
 
        VM_BUG_ON(pmd_trans_huge(*pmd));
 
index 314e9d2..6738c47 100644 (file)
@@ -680,7 +680,7 @@ void pagefault_out_of_memory(void)
 {
        struct zonelist *zonelist;
 
-       if (mem_cgroup_oom_synchronize())
+       if (mem_cgroup_oom_synchronize(true))
                return;
 
        zonelist = node_zonelist(first_online_node, GFP_KERNEL);
index f5236f8..6380758 100644 (file)
@@ -1210,11 +1210,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
        return 1;
 }
 
-static long bdi_max_pause(struct backing_dev_info *bdi,
-                         unsigned long bdi_dirty)
+static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
+                                  unsigned long bdi_dirty)
 {
-       long bw = bdi->avg_write_bandwidth;
-       long t;
+       unsigned long bw = bdi->avg_write_bandwidth;
+       unsigned long t;
 
        /*
         * Limit pause time for small memory systems. If sleeping for too long
@@ -1226,7 +1226,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi,
        t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
        t++;
 
-       return min_t(long, t, MAX_PAUSE);
+       return min_t(unsigned long, t, MAX_PAUSE);
 }
 
 static long bdi_min_pause(struct backing_dev_info *bdi,
index a344327..e2e98af 100644 (file)
@@ -56,6 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        continue;
                }
 
+#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
                /*
                 * For simplicity, we won't check this in the list of memcg
                 * caches. We have control over memcg naming, and if there
@@ -69,6 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        s = NULL;
                        return -EINVAL;
                }
+#endif
        }
 
        WARN_ON(strchr(name, ' '));     /* It confuses parsers */
index 3963fc2..de7c904 100644 (file)
@@ -1824,6 +1824,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        struct filename *pathname;
        int i, type, prev;
        int err;
+       unsigned int old_block_size;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -1914,6 +1915,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        }
 
        swap_file = p->swap_file;
+       old_block_size = p->old_block_size;
        p->swap_file = NULL;
        p->max = 0;
        swap_map = p->swap_map;
@@ -1938,7 +1940,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        inode = mapping->host;
        if (S_ISBLK(inode->i_mode)) {
                struct block_device *bdev = I_BDEV(inode);
-               set_blocksize(bdev, p->old_block_size);
+               set_blocksize(bdev, old_block_size);
                blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
        } else {
                mutex_lock(&inode->i_mutex);
index 53f2f82..eea668d 100644 (file)
@@ -211,6 +211,7 @@ void unregister_shrinker(struct shrinker *shrinker)
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
+       kfree(shrinker->nr_deferred);
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
index 841e35f..d93510c 100644 (file)
@@ -804,6 +804,10 @@ static void zswap_frontswap_invalidate_area(unsigned type)
        }
        tree->rbroot = RB_ROOT;
        spin_unlock(&tree->lock);
+
+       zbud_destroy_pool(tree->pool);
+       kfree(tree);
+       zswap_trees[type] = NULL;
 }
 
 static struct zbud_ops zswap_zbud_ops = {
index 3091297..c7e634a 100644 (file)
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
 
        return nla_total_size(2) +      /* IFLA_VLAN_PROTOCOL */
               nla_total_size(2) +      /* IFLA_VLAN_ID */
-              sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+              nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
               vlan_qos_map_size(vlan->nr_ingress_mappings) +
               vlan_qos_map_size(vlan->nr_egress_mappings);
 }
index c72d1bc..1356af6 100644 (file)
@@ -65,6 +65,7 @@ static int __init batadv_init(void)
        batadv_recv_handler_init();
 
        batadv_iv_init();
+       batadv_nc_init();
 
        batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
@@ -142,7 +143,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       ret = batadv_nc_init(bat_priv);
+       ret = batadv_nc_mesh_init(bat_priv);
        if (ret < 0)
                goto err;
 
@@ -167,7 +168,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
        batadv_vis_quit(bat_priv);
 
        batadv_gw_node_purge(bat_priv);
-       batadv_nc_free(bat_priv);
+       batadv_nc_mesh_free(bat_priv);
        batadv_dat_free(bat_priv);
        batadv_bla_free(bat_priv);
 
index a487d46..4ecc0b6 100644 (file)
@@ -34,6 +34,20 @@ static void batadv_nc_worker(struct work_struct *work);
 static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
+/**
+ * batadv_nc_init - one-time initialization for network coding
+ */
+int __init batadv_nc_init(void)
+{
+       int ret;
+
+       /* Register our packet type */
+       ret = batadv_recv_handler_register(BATADV_CODED,
+                                          batadv_nc_recv_coded_packet);
+
+       return ret;
+}
+
 /**
  * batadv_nc_start_timer - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
@@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  */
-int batadv_nc_init(struct batadv_priv *bat_priv)
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
        bat_priv->nc.timestamp_fwd_flush = jiffies;
        bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
        batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
                                   &batadv_nc_decoding_hash_lock_class_key);
 
-       /* Register our packet type */
-       if (batadv_recv_handler_register(BATADV_CODED,
-                                        batadv_nc_recv_coded_packet) < 0)
-               goto err;
-
        INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
        batadv_nc_start_timer(bat_priv);
 
@@ -1721,12 +1730,11 @@ free_nc_packet:
 }
 
 /**
- * batadv_nc_free - clean up network coding memory
+ * batadv_nc_mesh_free - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
-void batadv_nc_free(struct batadv_priv *bat_priv)
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
-       batadv_recv_handler_unregister(BATADV_CODED);
        cancel_delayed_work_sync(&bat_priv->nc.work);
 
        batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
index 85a4ec8..ddfa618 100644 (file)
@@ -22,8 +22,9 @@
 
 #ifdef CONFIG_BATMAN_ADV_NC
 
-int batadv_nc_init(struct batadv_priv *bat_priv);
-void batadv_nc_free(struct batadv_priv *bat_priv);
+int batadv_nc_init(void);
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
 void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node,
                              struct batadv_orig_node *orig_neigh_node,
@@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
 
 #else /* ifdef CONFIG_BATMAN_ADV_NC */
 
-static inline int batadv_nc_init(struct batadv_priv *bat_priv)
+static inline int batadv_nc_init(void)
 {
        return 0;
 }
 
-static inline void batadv_nc_free(struct batadv_priv *bat_priv)
+static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
+{
+       return 0;
+}
+
+static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
        return;
 }
index ffd5874..33e8f23 100644 (file)
@@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
                vid = nla_get_u16(tb[NDA_VLAN]);
 
-               if (vid >= VLAN_N_VID) {
+               if (!vid || vid >= VLAN_VID_MASK) {
                        pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
                                vid);
                        return -EINVAL;
@@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
                vid = nla_get_u16(tb[NDA_VLAN]);
 
-               if (vid >= VLAN_N_VID) {
+               if (!vid || vid >= VLAN_VID_MASK) {
                        pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
                                vid);
                        return -EINVAL;
index 85a09bb..b7b1914 100644 (file)
@@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
                err = 0;
 
-               if (!mp->ports && !mp->mglist && mp->timer_armed &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
                break;
index d1c5786..8b0b610 100644 (file)
@@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                del_timer(&p->timer);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-               if (!mp->ports && !mp->mglist && mp->timer_armed &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
 
@@ -620,7 +620,6 @@ rehash:
 
        mp->br = br;
        mp->addr = *group;
-
        setup_timer(&mp->timer, br_multicast_group_expired,
                    (unsigned long)mp);
 
@@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
        struct net_bridge_port_group __rcu **pp;
+       unsigned long now = jiffies;
        int err;
 
        spin_lock(&br->multicast_lock);
@@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br,
 
        if (!port) {
                mp->mglist = true;
+               mod_timer(&mp->timer, now + br->multicast_membership_interval);
                goto out;
        }
 
@@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br,
             (p = mlock_dereference(*pp, br)) != NULL;
             pp = &p->next) {
                if (p->port == port)
-                       goto out;
+                       goto found;
                if ((unsigned long)p->port < (unsigned long)port)
                        break;
        }
@@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br,
        rcu_assign_pointer(*pp, p);
        br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
 
+found:
+       mod_timer(&p->timer, now + br->multicast_membership_interval);
 out:
        err = 0;
 
@@ -1191,9 +1194,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
-       mod_timer(&mp->timer, now + br->multicast_membership_interval);
-       mp->timer_armed = true;
-
        max_delay *= br->multicast_last_member_count;
 
        if (mp->mglist &&
@@ -1270,9 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
-       mod_timer(&mp->timer, now + br->multicast_membership_interval);
-       mp->timer_armed = true;
-
        max_delay *= br->multicast_last_member_count;
        if (mp->mglist &&
            (timer_pending(&mp->timer) ?
@@ -1358,7 +1355,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
                        br_mdb_notify(br->dev, port, group, RTM_DELMDB);
 
-                       if (!mp->ports && !mp->mglist && mp->timer_armed &&
+                       if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
                                mod_timer(&mp->timer, jiffies);
                }
@@ -1370,12 +1367,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     br->multicast_last_member_interval;
 
        if (!port) {
-               if (mp->mglist && mp->timer_armed &&
+               if (mp->mglist &&
                    (timer_pending(&mp->timer) ?
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
                        mod_timer(&mp->timer, time);
                }
+
+               goto out;
+       }
+
+       for (p = mlock_dereference(mp->ports, br);
+            p != NULL;
+            p = mlock_dereference(p->next, br)) {
+               if (p->port != port)
+                       continue;
+
+               if (!hlist_unhashed(&p->mglist) &&
+                   (timer_pending(&p->timer) ?
+                    time_after(p->timer.expires, time) :
+                    try_to_del_timer_sync(&p->timer) >= 0)) {
+                       mod_timer(&p->timer, time);
+               }
+
+               break;
        }
 out:
        spin_unlock(&br->multicast_lock);
@@ -1798,7 +1813,6 @@ void br_multicast_stop(struct net_bridge *br)
                hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
                                          hlist[ver]) {
                        del_timer(&mp->timer);
-                       mp->timer_armed = false;
                        call_rcu_bh(&mp->rcu, br_multicast_free_group);
                }
        }
index e74ddc1..f75d92e 100644 (file)
@@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br,
 
                vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
 
-               if (vinfo->vid >= VLAN_N_VID)
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
                        return -EINVAL;
 
                switch (cmd) {
index efb57d9..e14c33b 100644 (file)
@@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
        struct timer_list               timer;
        struct br_ip                    addr;
        bool                            mglist;
-       bool                            timer_armed;
 };
 
 struct net_bridge_mdb_htable
@@ -643,9 +642,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
         * vid wasn't set
         */
        smp_rmb();
-       return (v->pvid & VLAN_TAG_PRESENT) ?
-                       (v->pvid & ~VLAN_TAG_PRESENT) :
-                       VLAN_N_VID;
+       return v->pvid ?: VLAN_N_VID;
 }
 
 #else
index 108084a..656a6f3 100644 (file)
@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
 
        if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
-       else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
+       else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
 
        if (r == 0) {
index 9a9ffe7..53f0990 100644 (file)
@@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
                return 0;
        }
 
-       if (vid) {
-               if (v->port_idx) {
-                       p = v->parent.port;
-                       br = p->br;
-                       dev = p->dev;
-               } else {
-                       br = v->parent.br;
-                       dev = br->dev;
-               }
-               ops = dev->netdev_ops;
-
-               if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
-                       /* Add VLAN to the device filter if it is supported.
-                        * Stricly speaking, this is not necessary now, since
-                        * devices are made promiscuous by the bridge, but if
-                        * that ever changes this code will allow tagged
-                        * traffic to enter the bridge.
-                        */
-                       err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
-                                                      vid);
-                       if (err)
-                               return err;
-               }
-
-               err = br_fdb_insert(br, p, dev->dev_addr, vid);
-               if (err) {
-                       br_err(br, "failed insert local address into bridge "
-                              "forwarding table\n");
-                       goto out_filt;
-               }
+       if (v->port_idx) {
+               p = v->parent.port;
+               br = p->br;
+               dev = p->dev;
+       } else {
+               br = v->parent.br;
+               dev = br->dev;
+       }
+       ops = dev->netdev_ops;
+
+       if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+               /* Add VLAN to the device filter if it is supported.
+                * Stricly speaking, this is not necessary now, since
+                * devices are made promiscuous by the bridge, but if
+                * that ever changes this code will allow tagged
+                * traffic to enter the bridge.
+                */
+               err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
+                                              vid);
+               if (err)
+                       return err;
+       }
 
+       err = br_fdb_insert(br, p, dev->dev_addr, vid);
+       if (err) {
+               br_err(br, "failed insert local address into bridge "
+                      "forwarding table\n");
+               goto out_filt;
        }
 
        set_bit(vid, v->vlan_bitmap);
@@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
        __vlan_delete_pvid(v, vid);
        clear_bit(vid, v->untagged_bitmap);
 
-       if (v->port_idx && vid) {
+       if (v->port_idx) {
                struct net_device *dev = v->parent.port->dev;
                const struct net_device_ops *ops = dev->netdev_ops;
 
@@ -192,6 +189,8 @@ out:
 bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
                        struct sk_buff *skb, u16 *vid)
 {
+       int err;
+
        /* If VLAN filtering is disabled on the bridge, all packets are
         * permitted.
         */
@@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                return false;
 
-       if (br_vlan_get_tag(skb, vid)) {
+       err = br_vlan_get_tag(skb, vid);
+       if (!*vid) {
                u16 pvid = br_get_pvid(v);
 
-               /* Frame did not have a tag.  See if pvid is set
-                * on this port.  That tells us which vlan untagged
-                * traffic belongs to.
+               /* Frame had a tag with VID 0 or did not have a tag.
+                * See if pvid is set on this port.  That tells us which
+                * vlan untagged or priority-tagged traffic belongs to.
                 */
                if (pvid == VLAN_N_VID)
                        return false;
 
-               /* PVID is set on this port.  Any untagged ingress
-                * frame is considered to belong to this vlan.
+               /* PVID is set on this port.  Any untagged or priority-tagged
+                * ingress frame is considered to belong to this vlan.
                 */
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+               *vid = pvid;
+               if (likely(err))
+                       /* Untagged Frame. */
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+               else
+                       /* Priority-tagged Frame.
+                        * At this point, We know that skb->vlan_tci had
+                        * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+                        * We update only VID field and preserve PCP field.
+                        */
+                       skb->vlan_tci |= pvid;
+
                return true;
        }
 
@@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br,
        return false;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 {
        struct net_port_vlans *pv = NULL;
@@ -278,7 +291,9 @@ out:
        return err;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int br_vlan_delete(struct net_bridge *br, u16 vid)
 {
        struct net_port_vlans *pv;
@@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
        if (!pv)
                return -EINVAL;
 
-       if (vid) {
-               /* If the VID !=0 remove fdb for this vid. VID 0 is special
-                * in that it's the default and is always there in the fdb.
-                */
-               spin_lock_bh(&br->hash_lock);
-               fdb_delete_by_addr(br, br->dev->dev_addr, vid);
-               spin_unlock_bh(&br->hash_lock);
-       }
+       spin_lock_bh(&br->hash_lock);
+       fdb_delete_by_addr(br, br->dev->dev_addr, vid);
+       spin_unlock_bh(&br->hash_lock);
 
        __vlan_del(pv, vid);
        return 0;
@@ -329,7 +339,9 @@ unlock:
        return 0;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 {
        struct net_port_vlans *pv = NULL;
@@ -363,7 +375,9 @@ clean_up:
        return err;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 {
        struct net_port_vlans *pv;
@@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
        if (!pv)
                return -EINVAL;
 
-       if (vid) {
-               /* If the VID !=0 remove fdb for this vid. VID 0 is special
-                * in that it's the default and is always there in the fdb.
-                */
-               spin_lock_bh(&port->br->hash_lock);
-               fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
-               spin_unlock_bh(&port->br->hash_lock);
-       }
+       spin_lock_bh(&port->br->hash_lock);
+       fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
+       spin_unlock_bh(&port->br->hash_lock);
 
        return __vlan_del(pv, vid);
 }
index f0a1ba6..8903258 100644 (file)
@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
            __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
            __get_user(kmsg->msg_flags, &umsg->msg_flags))
                return -EFAULT;
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               return -EINVAL;
        kmsg->msg_name = compat_ptr(tmp1);
        kmsg->msg_iov = compat_ptr(tmp2);
        kmsg->msg_control = compat_ptr(tmp3);
index 65f829c..3430b1e 100644 (file)
@@ -1917,7 +1917,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
        return new_map;
 }
 
-int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+                       u16 index)
 {
        struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
        struct xps_map *map, *new_map;
index 6438f29..01b7808 100644 (file)
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
        bpf_jit_free(fp);
-       kfree(fp);
 }
 EXPORT_SYMBOL(sk_filter_release_rcu);
 
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+       fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
        memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        struct sk_filter *fp, *old_fp;
        unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+       unsigned int sk_fsize = sk_filter_size(fprog->len);
        int err;
 
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+       fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
-               sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+               sock_kfree_s(sk, fp, sk_fsize);
                return -EFAULT;
        }
 
index 3f1ec15..8d9d05e 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <net/secure_seq.h>
 
+#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
 #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
 
 static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
@@ -29,6 +30,7 @@ static void net_secret_init(void)
                cmpxchg(&net_secret[--i], 0, tmp);
        }
 }
+#endif
 
 #ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
index 5b6beba..0b39e7a 100644 (file)
@@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_ll_usec          =       sysctl_net_busy_read;
 #endif
 
+       sk->sk_pacing_rate = ~0U;
        /*
         * Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.txt for details)
index c85e71e..ff41b4d 100644 (file)
@@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
        real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
        if (!real_dev)
                return -ENODEV;
+       if (real_dev->type != ARPHRD_IEEE802154)
+               return -EINVAL;
 
        lowpan_dev_info(dev)->real_dev = real_dev;
        lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
 
        entry->ldev = dev;
 
+       /* Set the lowpan harware address to the wpan hardware address. */
+       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
        mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
        INIT_LIST_HEAD(&entry->list);
        list_add_tail(&entry->list, &lowpan_devices);
index 7bd8983..96da9c7 100644 (file)
@@ -287,7 +287,7 @@ begintw:
                        if (unlikely(!INET_TW_MATCH(sk, net, acookie,
                                                    saddr, daddr, ports,
                                                    dif))) {
-                               sock_put(sk);
+                               inet_twsk_put(inet_twsk(sk));
                                goto begintw;
                        }
                        goto out;
index a04d872..3982eab 100644 (file)
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
                /* initialize protocol header pointer */
                skb->transport_header = skb->network_header + fragheaderlen;
 
-               skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
 
-               /* specify the length of each IP datagram fragment */
-               skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
                __skb_queue_tail(queue, skb);
+       } else if (skb_is_gso(skb)) {
+               goto append;
        }
 
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       /* specify the length of each IP datagram fragment */
+       skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
index e805e7b..6e87f85 100644 (file)
@@ -125,8 +125,17 @@ static int vti_rcv(struct sk_buff *skb)
                                  iph->saddr, iph->daddr, 0);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
+               u32 oldmark = skb->mark;
+               int ret;
 
-               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+
+               /* temporarily mark the skb with the tunnel o_key, to
+                * only match policies with this mark.
+                */
+               skb->mark = be32_to_cpu(tunnel->parms.o_key);
+               ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
+               skb->mark = oldmark;
+               if (!ret)
                        return -1;
 
                tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +144,6 @@ static int vti_rcv(struct sk_buff *skb)
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               skb->mark = 0;
                secpath_reset(skb);
                skb->dev = tunnel->dev;
                return 1;
@@ -167,7 +175,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&fl4, 0, sizeof(fl4));
        flowi4_init_output(&fl4, tunnel->parms.link,
-                          be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
+                          be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
                           RT_SCOPE_UNIVERSE,
                           IPPROTO_IPIP, 0,
                           dst, tiph->saddr, 0, 0);
index 727f436..6011615 100644 (file)
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                                                              RT_SCOPE_LINK);
                        goto make_route;
                }
-               if (fl4->saddr) {
+               if (!fl4->saddr) {
                        if (ipv4_is_multicast(fl4->daddr))
                                fl4->saddr = inet_select_addr(dev_out, 0,
                                                              fl4->flowi4_scope);
index 25a89ea..a16b01b 100644 (file)
@@ -1284,7 +1284,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                tp->lost_cnt_hint -= tcp_skb_pcount(prev);
        }
 
-       TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
+       TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               TCP_SKB_CB(prev)->end_seq++;
+
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
@@ -3288,7 +3291,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                        tcp_init_cwnd_reduction(sk, true);
                        tcp_set_ca_state(sk, TCP_CA_CWR);
                        tcp_end_cwnd_reduction(sk);
-                       tcp_set_ca_state(sk, TCP_CA_Open);
+                       tcp_try_keep_open(sk);
                        NET_INC_STATS_BH(sock_net(sk),
                                         LINUX_MIB_TCPLOSSPROBERECOVERY);
                }
@@ -5709,6 +5712,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                } else
                        tcp_init_metrics(sk);
 
+               tcp_update_pacing_rate(sk);
+
                /* Prevent spurious tcp_cwnd_restart() on first data packet */
                tp->lsndtime = tcp_time_stamp;
 
index e6bb825..d46f214 100644 (file)
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
        unsigned int size = 0;
        unsigned int eff_sacks;
 
+       opts->options = 0;
+
 #ifdef CONFIG_TCP_MD5SIG
        *md5 = tp->af_specific->md5_lookup(sk, sk);
        if (unlikely(*md5)) {
@@ -984,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
                                 unsigned int mss_now)
 {
-       if (skb->len <= mss_now || !sk_can_gso(sk) ||
-           skb->ip_summed == CHECKSUM_NONE) {
+       /* Make sure we own this skb before messing gso_size/gso_segs */
+       WARN_ON_ONCE(skb_cloned(skb));
+
+       if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
                /* Avoid the costly divide in the normal
                 * non-TSO case.
                 */
@@ -1065,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        if (nsize < 0)
                nsize = 0;
 
-       if (skb_cloned(skb) &&
-           skb_is_nonlinear(skb) &&
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                return -ENOMEM;
 
        /* Get a new skb... force flag on. */
@@ -2342,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                int oldpcount = tcp_skb_pcount(skb);
 
                if (unlikely(oldpcount > 1)) {
+                       if (skb_unclone(skb, GFP_ATOMIC))
+                               return -ENOMEM;
                        tcp_init_tso_segs(sk, skb, cur_mss);
                        tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
                }
index 9a459be..ccde542 100644 (file)
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 
        memset(fl4, 0, sizeof(struct flowi4));
        fl4->flowi4_mark = skb->mark;
+       fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
 
        if (!ip_is_fragment(iph)) {
                switch (iph->protocol) {
index 73784c3..82e1da3 100644 (file)
@@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index d3618a7..e67e63f 100644 (file)
@@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index 32b4a16..066640e 100644 (file)
@@ -116,7 +116,7 @@ begintw:
                        }
                        if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
                                                     ports, dif))) {
-                               sock_put(sk);
+                               inet_twsk_put(inet_twsk(sk));
                                goto begintw;
                        }
                        goto out;
index 7bb5446..bf4a9a0 100644 (file)
@@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                if (t->parms.o_flags&GRE_SEQ)
                        addend += 4;
        }
+       t->hlen = addend;
 
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                int strict = (ipv6_addr_type(&p->raddr) &
@@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                }
                ip6_rt_put(rt);
        }
-
-       t->hlen = addend;
 }
 
 static int ip6gre_tnl_change(struct ip6_tnl *t,
@@ -1173,9 +1172,8 @@ done:
 
 static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct ip6_tnl *tunnel = netdev_priv(dev);
        if (new_mtu < 68 ||
-           new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+           new_mtu > 0xFFF8 - dev->hard_header_len)
                return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
index a54c45c..91fb4e8 100644 (file)
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock_bh();
-       nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+       nexthop = rt6_nexthop((struct rt6_info *)dst);
        neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
        if (unlikely(!neigh))
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         */
        rt = (struct rt6_info *) *dst;
        rcu_read_lock_bh();
-       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
+       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
        err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
        rcu_read_unlock_bh();
 
@@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 
 {
        struct sk_buff *skb;
+       struct frag_hdr fhdr;
        int err;
 
        /* There is support for UDP large send offload by network
@@ -1015,8 +1016,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
         * udp datagram
         */
        if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
-               struct frag_hdr fhdr;
-
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
@@ -1036,20 +1035,24 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb->transport_header = skb->network_header + fragheaderlen;
 
                skb->protocol = htons(ETH_P_IPV6);
-               skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
 
-               /* Specify the length of each IPv6 datagram fragment.
-                * It has to be a multiple of 8.
-                */
-               skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
-                                            sizeof(struct frag_hdr)) & ~7;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-               ipv6_select_ident(&fhdr, rt);
-               skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
                __skb_queue_tail(&sk->sk_write_queue, skb);
+       } else if (skb_is_gso(skb)) {
+               goto append;
        }
 
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       /* Specify the length of each IPv6 datagram fragment.
+        * It has to be a multiple of 8.
+        */
+       skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+                                    sizeof(struct frag_hdr)) & ~7;
+       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+       ipv6_select_ident(&fhdr, rt);
+       skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+
+append:
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
index a791552..583b77e 100644 (file)
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static int
 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if (new_mtu < IPV6_MIN_MTU) {
-               return -EINVAL;
+       struct ip6_tnl *tnl = netdev_priv(dev);
+
+       if (tnl->parms.proto == IPPROTO_IPIP) {
+               if (new_mtu < 68)
+                       return -EINVAL;
+       } else {
+               if (new_mtu < IPV6_MIN_MTU)
+                       return -EINVAL;
        }
+       if (new_mtu > 0xFFF8 - dev->hard_header_len)
+               return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
 }
index 5636a91..ce507d9 100644 (file)
@@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                (struct ip_comp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index c979dd9..f54e3a1 100644 (file)
@@ -476,6 +476,24 @@ out:
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
+struct __rt6_probe_work {
+       struct work_struct work;
+       struct in6_addr target;
+       struct net_device *dev;
+};
+
+static void rt6_probe_deferred(struct work_struct *w)
+{
+       struct in6_addr mcaddr;
+       struct __rt6_probe_work *work =
+               container_of(w, struct __rt6_probe_work, work);
+
+       addrconf_addr_solict_mult(&work->target, &mcaddr);
+       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+       dev_put(work->dev);
+       kfree(w);
+}
+
 static void rt6_probe(struct rt6_info *rt)
 {
        struct neighbour *neigh;
@@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt)
 
        if (!neigh ||
            time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-               struct in6_addr mcaddr;
-               struct in6_addr *target;
+               struct __rt6_probe_work *work;
 
-               if (neigh) {
+               work = kmalloc(sizeof(*work), GFP_ATOMIC);
+
+               if (neigh && work)
                        neigh->updated = jiffies;
+
+               if (neigh)
                        write_unlock(&neigh->lock);
-               }
 
-               target = (struct in6_addr *)&rt->rt6i_gateway;
-               addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
+               if (work) {
+                       INIT_WORK(&work->work, rt6_probe_deferred);
+                       work->target = rt->rt6i_gateway;
+                       dev_hold(rt->dst.dev);
+                       work->dev = rt->dst.dev;
+                       schedule_work(&work->work);
+               }
        } else {
 out:
                write_unlock(&neigh->lock);
@@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
                        if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
-                       rt->rt6i_gateway = *daddr;
                }
 
                rt->rt6i_flags |= RTF_CACHE;
@@ -1338,6 +1361,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
        atomic_set(&rt->dst.__refcnt, 1);
+       rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
@@ -1873,7 +1897,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
                        in6_dev_hold(rt->rt6i_idev);
                rt->dst.lastuse = jiffies;
 
-               rt->rt6i_gateway = ort->rt6i_gateway;
+               if (ort->rt6i_flags & RTF_GATEWAY)
+                       rt->rt6i_gateway = ort->rt6i_gateway;
+               else
+                       rt->rt6i_gateway = *dest;
                rt->rt6i_flags = ort->rt6i_flags;
                if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
                    (RTF_DEFAULT | RTF_ADDRCONF))
@@ -2160,6 +2187,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        else
                rt->rt6i_flags |= RTF_LOCAL;
 
+       rt->rt6i_gateway  = *addr;
        rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
index 72b7eaa..1878609 100644 (file)
@@ -1225,9 +1225,6 @@ do_udp_sendmsg:
        if (tclass < 0)
                tclass = np->tclass;
 
-       if (dontfrag < 0)
-               dontfrag = np->dontfrag;
-
        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
 back_from_confirm:
@@ -1246,6 +1243,8 @@ back_from_confirm:
        up->pending = AF_INET6;
 
 do_append_data:
+       if (dontfrag < 0)
+               dontfrag = np->dontfrag;
        up->len += ulen;
        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
index 23ed03d..08ed277 100644 (file)
@@ -138,6 +138,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 
        memset(fl6, 0, sizeof(struct flowi6));
        fl6->flowi6_mark = skb->mark;
+       fl6->flowi6_oif = skb_dst(skb)->dev->ifindex;
 
        fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
        fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
index 9d58537..911ef03 100644 (file)
@@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 
        x->id.proto = proto;
        x->id.spi = sa->sadb_sa_spi;
-       x->props.replay_window = sa->sadb_sa_replay;
+       x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
+                                       (sizeof(x->replay.bitmap) * 8));
        if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
                x->props.flags |= XFRM_STATE_NOECN;
        if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
index feae495..b076e83 100644 (file)
@@ -115,6 +115,11 @@ struct l2tp_net {
 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 
+static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+{
+       return sk->sk_user_data;
+}
+
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
        BUG_ON(!net);
@@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
                return 0;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == PF_INET6) {
+       if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
                if (!uh->check) {
                        LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
                        return 1;
@@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        /* Queue the packet to IP for output */
        skb->local_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
-       if (skb->sk->sk_family == PF_INET6)
+       if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                error = inet6_csk_xmit(skb, NULL);
        else
 #endif
@@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
 
                /* Calculate UDP checksum if configured to do so */
 #if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == PF_INET6)
+               if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                        l2tp_xmit_ipv6_csum(sk, skb, udp_len);
                else
 #endif
@@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
  */
 static void l2tp_tunnel_destruct(struct sock *sk)
 {
-       struct l2tp_tunnel *tunnel;
+       struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
        struct l2tp_net *pn;
 
-       tunnel = sk->sk_user_data;
        if (tunnel == NULL)
                goto end;
 
@@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        }
 
        /* Check if this socket has already been prepped */
-       tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+       tunnel = l2tp_tunnel(sk);
        if (tunnel != NULL) {
                /* This socket has already been prepped */
                err = -EBUSY;
@@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        if (cfg != NULL)
                tunnel->debug = cfg->debug;
 
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == PF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+
+               if (ipv6_addr_v4mapped(&np->saddr) &&
+                   ipv6_addr_v4mapped(&np->daddr)) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       tunnel->v4mapped = true;
+                       inet->inet_saddr = np->saddr.s6_addr32[3];
+                       inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
+                       inet->inet_daddr = np->daddr.s6_addr32[3];
+               } else {
+                       tunnel->v4mapped = false;
+               }
+       }
+#endif
+
        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
        tunnel->encap = encap;
        if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
                udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
                udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
 #if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == PF_INET6)
+               if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                        udpv6_encap_enable();
                else
 #endif
index 66a559b..6f251cb 100644 (file)
@@ -194,6 +194,9 @@ struct l2tp_tunnel {
        struct sock             *sock;          /* Parent socket */
        int                     fd;             /* Parent fd, if tunnel socket
                                                 * was created by userspace */
+#if IS_ENABLED(CONFIG_IPV6)
+       bool                    v4mapped;
+#endif
 
        struct work_struct      del_work;
 
index 5ebee2d..8c46b27 100644 (file)
@@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
                goto error_put_sess_tun;
        }
 
+       local_bh_disable();
        l2tp_xmit_skb(session, skb, session->hdr_len);
+       local_bh_enable();
 
        sock_put(ps->tunnel_sock);
        sock_put(sk);
@@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        skb->data[0] = ppph[0];
        skb->data[1] = ppph[1];
 
+       local_bh_disable();
        l2tp_xmit_skb(session, skb, session->hdr_len);
+       local_bh_enable();
 
        sock_put(sk_tun);
        sock_put(sk);
index 2e7855a..629dee7 100644 (file)
@@ -3518,7 +3518,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
                return -EINVAL;
        }
        band = chanctx_conf->def.chan->band;
-       sta = sta_info_get(sdata, peer);
+       sta = sta_info_get_bss(sdata, peer);
        if (sta) {
                qos = test_sta_flag(sta, WLAN_STA_WME);
        } else {
index b618651..611abfc 100644 (file)
@@ -893,6 +893,8 @@ struct tpt_led_trigger {
  *     that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
  *     a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ *     cancelled.
  */
 enum {
        SCAN_SW_SCANNING,
@@ -900,6 +902,7 @@ enum {
        SCAN_ONCHANNEL_SCANNING,
        SCAN_COMPLETED,
        SCAN_ABORTED,
+       SCAN_HW_CANCELLED,
 };
 
 /**
index acd1f71..0c2a294 100644 (file)
@@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work)
 
                if (started)
                        ieee80211_start_next_roc(local);
+               else if (list_empty(&local->roc_list))
+                       ieee80211_run_deferred_scan(local);
        }
 
  out_unlock:
index 54395d7..674eac1 100644 (file)
@@ -3056,6 +3056,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
        case NL80211_IFTYPE_ADHOC:
                if (!bssid)
                        return 0;
+               if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+                       return 0;
                if (ieee80211_is_beacon(hdr->frame_control)) {
                        return 1;
                } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
index 08afe74..d2d17a4 100644 (file)
@@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        enum ieee80211_band band;
        int i, ielen, n_chans;
 
+       if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+               return false;
+
        do {
                if (local->hw_scan_band == IEEE80211_NUM_BANDS)
                        return false;
@@ -940,7 +943,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
        if (!local->scan_req)
                goto out;
 
+       /*
+        * We have a scan running and the driver already reported completion,
+        * but the worker hasn't run yet or is stuck on the mutex - mark it as
+        * cancelled.
+        */
+       if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+           test_bit(SCAN_COMPLETED, &local->scanning)) {
+               set_bit(SCAN_HW_CANCELLED, &local->scanning);
+               goto out;
+       }
+
        if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
+               /*
+                * Make sure that __ieee80211_scan_completed doesn't trigger a
+                * scan on another band.
+                */
+               set_bit(SCAN_HW_CANCELLED, &local->scanning);
                if (local->ops->cancel_hw_scan)
                        drv_cancel_hw_scan(local,
                                rcu_dereference_protected(local->scan_sdata,
index 368837f..78dc2e9 100644 (file)
@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
 
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               sta->last_rx = jiffies;
+
        if (ieee80211_is_data_qos(mgmt->frame_control)) {
                struct ieee80211_hdr *hdr = (void *) skb->data;
                u8 *qc = ieee80211_get_qos_ctl(hdr);
index 3456c04..70b5a05 100644 (file)
@@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                tx->sta = rcu_dereference(sdata->u.vlan.sta);
                if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
                        return TX_DROP;
-       } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+       } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
+                                 IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
                   tx->sdata->control_port_protocol == tx->skb->protocol) {
                tx->sta = sta_info_get_bss(sdata, hdr->addr1);
        }
index e1b34a1..69e4ef5 100644 (file)
@@ -2103,7 +2103,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
-       int rate, skip, shift;
+       int rate, shift;
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
        u32 rate_flags;
@@ -2131,14 +2131,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
                pos = skb_put(skb, exrates + 2);
                *pos++ = WLAN_EID_EXT_SUPP_RATES;
                *pos++ = exrates;
-               skip = 0;
                for (i = 8; i < sband->n_bitrates; i++) {
                        u8 basic = 0;
                        if ((rate_flags & sband->bitrates[i].flags)
                            != rate_flags)
                                continue;
-                       if (skip++ < 8)
-                               continue;
                        if (need_basic && basic_rates & BIT(i))
                                basic = 0x80;
                        rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
@@ -2241,6 +2238,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
        }
 
        rate = cfg80211_calculate_bitrate(&ri);
+       if (WARN_ONCE(!rate,
+                     "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
+                     status->flag, status->rate_idx, status->vht_nss))
+               return 0;
 
        /* rewind from end of MPDU */
        if (status->flag & RX_FLAG_MACTIME_END)
index bdebd03..70866d1 100644 (file)
@@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                                   flowi6_to_flowi(&fl1), false)) {
                        if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
                                           flowi6_to_flowi(&fl2), false)) {
-                               if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
-                                           sizeof(rt1->rt6i_gateway)) &&
+                               if (ipv6_addr_equal(rt6_nexthop(rt1),
+                                                   rt6_nexthop(rt2)) &&
                                    rt1->dst.dev == rt2->dst.dev)
                                        ret = 1;
                                dst_release(&rt2->dst);
index a2fef8b..a9dfdda 100644 (file)
@@ -472,20 +472,16 @@ begin:
        if (f->credit > 0 || !q->rate_enable)
                goto out;
 
-       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
-               rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+       rate = q->flow_max_rate;
+       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+               rate = min(skb->sk->sk_pacing_rate, rate);
 
-               rate = min(rate, q->flow_max_rate);
-       } else {
-               rate = q->flow_max_rate;
-               if (rate == ~0U)
-                       goto out;
-       }
-       if (rate) {
+       if (rate != ~0U) {
                u32 plen = max(qdisc_pkt_len(skb), q->quantum);
                u64 len = (u64)plen * NSEC_PER_SEC;
 
-               do_div(len, rate);
+               if (likely(rate))
+                       do_div(len, rate);
                /* Since socket rate can change later,
                 * clamp the delay to 125 ms.
                 * TODO: maybe segment the too big skb, as in commit
@@ -656,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
                q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
-               q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+               q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 
        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
                q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
 
+       /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
+        * do not bother giving its value
+        */
        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
-           nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
                goto nla_put_failure;
index a6d788d..b87e83d 100644 (file)
@@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
        return PSCHED_NS2TICKS(ticks);
 }
 
+static void tfifo_reset(struct Qdisc *sch)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       struct rb_node *p;
+
+       while ((p = rb_first(&q->t_root))) {
+               struct sk_buff *skb = netem_rb_to_skb(p);
+
+               rb_erase(p, &q->t_root);
+               skb->next = NULL;
+               skb->prev = NULL;
+               kfree_skb(skb);
+       }
+}
+
 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -520,6 +535,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
                        skb->next = NULL;
                        skb->prev = NULL;
                        len = qdisc_pkt_len(skb);
+                       sch->qstats.backlog -= len;
                        kfree_skb(skb);
                }
        }
@@ -609,6 +625,7 @@ static void netem_reset(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset_queue(sch);
+       tfifo_reset(sch);
        if (q->qdisc)
                qdisc_reset(q->qdisc);
        qdisc_watchdog_cancel(&q->watchdog);
index 0ac3a65..3191373 100644 (file)
@@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
         */
        if (!sctp_checksum_disable) {
-               if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+               if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
+                   (dst_xfrm(dst) != NULL) || packet->ipfragok) {
                        __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
                        /* 3) Put the resultant value into the checksum field in the
index ebed4b6..c226ace 100644 (file)
@@ -1964,6 +1964,16 @@ struct used_address {
        unsigned int name_len;
 };
 
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+                                struct msghdr __user *umsg)
+{
+       if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+               return -EFAULT;
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               return -EINVAL;
+       return 0;
+}
+
 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags,
                         struct used_address *used_address)
@@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
        if (MSG_CMSG_COMPAT & flags) {
                if (get_compat_msghdr(msg_sys, msg_compat))
                        return -EFAULT;
-       } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-               return -EFAULT;
+       } else {
+               err = copy_msghdr_from_user(msg_sys, msg);
+               if (err)
+                       return err;
+       }
 
        if (msg_sys->msg_iovlen > UIO_FASTIOV) {
                err = -EMSGSIZE;
@@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
        if (MSG_CMSG_COMPAT & flags) {
                if (get_compat_msghdr(msg_sys, msg_compat))
                        return -EFAULT;
-       } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-               return -EFAULT;
+       } else {
+               err = copy_msghdr_from_user(msg_sys, msg);
+               if (err)
+                       return err;
+       }
 
        if (msg_sys->msg_iovlen > UIO_FASTIOV) {
                err = -EMSGSIZE;
index 86de99a..c1f403b 100644 (file)
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
        return 0;
 }
 
+static void unix_sock_inherit_flags(const struct socket *old,
+                                   struct socket *new)
+{
+       if (test_bit(SOCK_PASSCRED, &old->flags))
+               set_bit(SOCK_PASSCRED, &new->flags);
+       if (test_bit(SOCK_PASSSEC, &old->flags))
+               set_bit(SOCK_PASSSEC, &new->flags);
+}
+
 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
 {
        struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
        /* attach accepted sock to socket */
        unix_state_lock(tsk);
        newsock->state = SS_CONNECTED;
+       unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
        unix_state_unlock(tsk);
        return 0;
index d591091..86fa0f3 100644 (file)
@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        rep->udiag_family = AF_UNIX;
        rep->udiag_type = sk->sk_type;
        rep->udiag_state = sk->sk_state;
+       rep->pad = 0;
        rep->udiag_ino = sk_ino;
        sock_diag_save_cookie(sk, rep->udiag_cookie);
 
index 6715396..aff959e 100644 (file)
@@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
        /* check and set up bitrates */
        ieee80211_set_bitrate_flags(wiphy);
 
-
+       rtnl_lock();
        res = device_add(&rdev->wiphy.dev);
-       if (res)
-               return res;
-
-       res = rfkill_register(rdev->rfkill);
        if (res) {
-               device_del(&rdev->wiphy.dev);
+               rtnl_unlock();
                return res;
        }
 
-       rtnl_lock();
        /* set up regulatory info */
        wiphy_regulatory_register(wiphy);
 
@@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
 
        rdev->wiphy.registered = true;
        rtnl_unlock();
+
+       res = rfkill_register(rdev->rfkill);
+       if (res) {
+               rfkill_destroy(rdev->rfkill);
+               rdev->rfkill = NULL;
+               wiphy_unregister(&rdev->wiphy);
+               return res;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL(wiphy_register);
@@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
                rtnl_unlock();
                __count == 0; }));
 
-       rfkill_unregister(rdev->rfkill);
+       if (rdev->rfkill)
+               rfkill_unregister(rdev->rfkill);
 
        rtnl_lock();
        rdev->wiphy.registered = false;
@@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
                        return notifier_from_errno(-EOPNOTSUPP);
-               if (rfkill_blocked(rdev->rfkill))
-                       return notifier_from_errno(-ERFKILL);
                ret = cfg80211_can_add_interface(rdev, wdev->iftype);
                if (ret)
                        return notifier_from_errno(ret);
index 9ad43c6..3159e9c 100644 (file)
@@ -411,6 +411,9 @@ static inline int
 cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
                           enum nl80211_iftype iftype)
 {
+       if (rfkill_blocked(rdev->rfkill))
+               return -ERFKILL;
+
        return cfg80211_can_change_interface(rdev, NULL, iftype);
 }
 
index 39bff7d..403fe29 100644 (file)
@@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
                                if (chan->flags & IEEE80211_CHAN_DISABLED)
                                        continue;
                                wdev->wext.ibss.chandef.chan = chan;
+                               wdev->wext.ibss.chandef.center_freq1 =
+                                       chan->center_freq;
                                break;
                        }
 
@@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
        if (chan) {
                wdev->wext.ibss.chandef.chan = chan;
                wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+               wdev->wext.ibss.chandef.center_freq1 = freq;
                wdev->wext.ibss.channel_fixed = true;
        } else {
                /* cfg80211_ibss_wext_join will pick one if needed */
index af8d84a..626dc3b 100644 (file)
@@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
                change = true;
        }
 
-       if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
+       if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
@@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                                  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
                                  &flags);
 
-       if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
+       if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
index 7d604c0..a271c27 100644 (file)
@@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
        struct ieee80211_radiotap_header *radiotap_header,
        int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
 {
+       /* check the radiotap header can actually be present */
+       if (max_length < sizeof(struct ieee80211_radiotap_header))
+               return -EINVAL;
+
        /* Linux only supports version 0 radiotap format */
        if (radiotap_header->it_version)
                return -EINVAL;
@@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
                         */
 
                        if ((unsigned long)iterator->_arg -
-                           (unsigned long)iterator->_rtheader >
+                           (unsigned long)iterator->_rtheader +
+                           sizeof(uint32_t) >
                            (unsigned long)iterator->_max_length)
                                return -EINVAL;
                }
index ed38d5d..76e1873 100644 (file)
@@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
 
        atomic_inc(&policy->genid);
 
-       del_timer(&policy->polq.hold_timer);
+       if (del_timer(&policy->polq.hold_timer))
+               xfrm_pol_put(policy);
        xfrm_queue_purge(&policy->polq.hold_queue);
 
        if (del_timer(&policy->timer))
@@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
 
        spin_lock_bh(&pq->hold_queue.lock);
        skb_queue_splice_init(&pq->hold_queue, &list);
-       del_timer(&pq->hold_timer);
+       if (del_timer(&pq->hold_timer))
+               xfrm_pol_put(old);
        spin_unlock_bh(&pq->hold_queue.lock);
 
        if (skb_queue_empty(&list))
@@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
        spin_lock_bh(&pq->hold_queue.lock);
        skb_queue_splice(&list, &pq->hold_queue);
        pq->timeout = XFRM_QUEUE_TMO_MIN;
-       mod_timer(&pq->hold_timer, jiffies);
+       if (!mod_timer(&pq->hold_timer, jiffies))
+               xfrm_pol_hold(new);
        spin_unlock_bh(&pq->hold_queue.lock);
 }
 
@@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg)
 
        spin_lock(&pq->hold_queue.lock);
        skb = skb_peek(&pq->hold_queue);
+       if (!skb) {
+               spin_unlock(&pq->hold_queue.lock);
+               goto out;
+       }
        dst = skb_dst(skb);
        sk = skb->sk;
        xfrm_decode_session(skb, &fl, dst->ops->family);
@@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg)
                        goto purge_queue;
 
                pq->timeout = pq->timeout << 1;
-               mod_timer(&pq->hold_timer, jiffies + pq->timeout);
-               return;
+               if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
+                       xfrm_pol_hold(pol);
+       goto out;
        }
 
        dst_release(dst);
@@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg)
                err = dst_output(skb);
        }
 
+out:
+       xfrm_pol_put(pol);
        return;
 
 purge_queue:
        pq->timeout = 0;
        xfrm_queue_purge(&pq->hold_queue);
+       xfrm_pol_put(pol);
 }
 
 static int xdst_queue_output(struct sk_buff *skb)
@@ -1831,7 +1842,8 @@ static int xdst_queue_output(struct sk_buff *skb)
        unsigned long sched_next;
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
-       struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
+       struct xfrm_policy *pol = xdst->pols[0];
+       struct xfrm_policy_queue *pq = &pol->polq;
 
        if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
                kfree_skb(skb);
@@ -1850,10 +1862,12 @@ static int xdst_queue_output(struct sk_buff *skb)
        if (del_timer(&pq->hold_timer)) {
                if (time_before(pq->hold_timer.expires, sched_next))
                        sched_next = pq->hold_timer.expires;
+               xfrm_pol_put(pol);
        }
 
        __skb_queue_tail(&pq->hold_queue, skb);
-       mod_timer(&pq->hold_timer, sched_next);
+       if (!mod_timer(&pq->hold_timer, sched_next))
+               xfrm_pol_hold(pol);
 
        spin_unlock_bh(&pq->hold_queue.lock);
 
index 8dafe6d..dab57da 100644 (file)
@@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
-                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
+               if (!x->replay_maxdiff ||
+                   ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
+                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
                        if (x->xflags & XFRM_TIME_DEFER)
                                event = XFRM_REPLAY_TIMEOUT;
                        else
@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
                return 0;
 
        diff = x->replay.seq - seq;
-       if (diff >= min_t(unsigned int, x->props.replay_window,
-                         sizeof(x->replay.bitmap) * 8)) {
+       if (diff >= x->props.replay_window) {
                x->stats.replay_window++;
                goto err;
        }
@@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
-                   (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) {
+               if (!x->replay_maxdiff ||
+                   ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
+                   (replay_esn->oseq - preplay_esn->oseq
+                    < x->replay_maxdiff))) {
                        if (x->xflags & XFRM_TIME_DEFER)
                                event = XFRM_REPLAY_TIMEOUT;
                        else
@@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (!x->replay_maxdiff)
-                       break;
-
-               if (replay_esn->seq_hi == preplay_esn->seq_hi)
-                       seq_diff = replay_esn->seq - preplay_esn->seq;
-               else
-                       seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
-
-               if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
-                       oseq_diff = replay_esn->oseq - preplay_esn->oseq;
-               else
-                       oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
-
-               if (seq_diff < x->replay_maxdiff &&
-                   oseq_diff < x->replay_maxdiff) {
+               if (x->replay_maxdiff) {
+                       if (replay_esn->seq_hi == preplay_esn->seq_hi)
+                               seq_diff = replay_esn->seq - preplay_esn->seq;
+                       else
+                               seq_diff = ~preplay_esn->seq + replay_esn->seq
+                                          + 1;
 
-                       if (x->xflags & XFRM_TIME_DEFER)
-                               event = XFRM_REPLAY_TIMEOUT;
+                       if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+                               oseq_diff = replay_esn->oseq
+                                           - preplay_esn->oseq;
                        else
-                               return;
+                               oseq_diff = ~preplay_esn->oseq
+                                           + replay_esn->oseq + 1;
+
+                       if (seq_diff >= x->replay_maxdiff ||
+                           oseq_diff >= x->replay_maxdiff)
+                               break;
                }
 
+               if (x->xflags & XFRM_TIME_DEFER)
+                       event = XFRM_REPLAY_TIMEOUT;
+               else
+                       return;
+
                break;
 
        case XFRM_REPLAY_TIMEOUT:
index 3f565e4..f964d4c 100644 (file)
@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
        memcpy(&x->sel, &p->sel, sizeof(x->sel));
        memcpy(&x->lft, &p->lft, sizeof(x->lft));
        x->props.mode = p->mode;
-       x->props.replay_window = p->replay_window;
+       x->props.replay_window = min_t(unsigned int, p->replay_window,
+                                       sizeof(x->replay.bitmap) * 8);
        x->props.reqid = p->reqid;
        x->props.family = p->family;
        memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
@@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (x->km.state != XFRM_STATE_VALID)
                goto out;
 
-       err = xfrm_replay_verify_len(x->replay_esn, rp);
+       err = xfrm_replay_verify_len(x->replay_esn, re);
        if (err)
                goto out;
 
index 95c2b26..7db9954 100644 (file)
@@ -580,15 +580,13 @@ static struct aa_namespace *__next_namespace(struct aa_namespace *root,
 
        /* check if the next ns is a sibling, parent, gp, .. */
        parent = ns->parent;
-       while (parent) {
+       while (ns != root) {
                mutex_unlock(&ns->lock);
                next = list_entry_next(ns, base.list);
                if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
                        mutex_lock(&next->lock);
                        return next;
                }
-               if (parent == root)
-                       return NULL;
                ns = parent;
                parent = parent->parent;
        }
index 345bec0..705c287 100644 (file)
@@ -610,6 +610,7 @@ void aa_free_profile(struct aa_profile *profile)
        aa_put_dfa(profile->policy.dfa);
        aa_put_replacedby(profile->replacedby);
 
+       kzfree(profile->hash);
        kzfree(profile);
 }
 
index ac41e9c..26ad4f0 100644 (file)
@@ -3531,7 +3531,7 @@ static int create_capture_mixers(struct hda_codec *codec)
                if (!multi)
                        err = create_single_cap_vol_ctl(codec, n, vol, sw,
                                                        inv_dmic);
-               else if (!multi_cap_vol)
+               else if (!multi_cap_vol && !inv_dmic)
                        err = create_bind_cap_vol_ctl(codec, n, vol, sw);
                else
                        err = create_multi_cap_vol_ctl(codec);
index 4f255df..f59a321 100644 (file)
@@ -4845,6 +4845,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                        if ((err = hdsp_get_iobox_version(hdsp)) < 0)
                                return err;
                }
+               memset(&hdsp_version, 0, sizeof(hdsp_version));
                hdsp_version.io_type = hdsp->io_type;
                hdsp_version.firmware_rev = hdsp->firmware_rev;
                if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version))))
index 651ce09..c91eba5 100644 (file)
@@ -270,7 +270,7 @@ MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
 static const struct regmap_config pcm1681_regmap = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .max_register           = ARRAY_SIZE(pcm1681_reg_defaults) + 1,
+       .max_register           = 0x13,
        .reg_defaults           = pcm1681_reg_defaults,
        .num_reg_defaults       = ARRAY_SIZE(pcm1681_reg_defaults),
        .writeable_reg          = pcm1681_writeable_reg,
index 2a8eccf..7613181 100644 (file)
@@ -188,7 +188,7 @@ MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
 static const struct regmap_config pcm1792a_regmap = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .max_register           = 24,
+       .max_register           = 23,
        .reg_defaults           = pcm1792a_reg_defaults,
        .num_reg_defaults       = ARRAY_SIZE(pcm1792a_reg_defaults),
        .writeable_reg          = pcm1792a_writeable_reg,
index 6e3f269..64ad84d 100644 (file)
@@ -674,6 +674,8 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* Left Input */
        {"Left Line1L Mux", "single-ended", "LINE1L"},
        {"Left Line1L Mux", "differential", "LINE1L"},
+       {"Left Line1R Mux", "single-ended", "LINE1R"},
+       {"Left Line1R Mux", "differential", "LINE1R"},
 
        {"Left Line2L Mux", "single-ended", "LINE2L"},
        {"Left Line2L Mux", "differential", "LINE2L"},
@@ -690,6 +692,8 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* Right Input */
        {"Right Line1R Mux", "single-ended", "LINE1R"},
        {"Right Line1R Mux", "differential", "LINE1R"},
+       {"Right Line1L Mux", "single-ended", "LINE1L"},
+       {"Right Line1L Mux", "differential", "LINE1L"},
 
        {"Right Line2R Mux", "single-ended", "LINE2R"},
        {"Right Line2R Mux", "differential", "LINE2R"},
index c6b7439..6b81d0c 100644 (file)
@@ -936,7 +936,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        ssi_private->ssi_phys = res.start;
 
        ssi_private->irq = irq_of_parse_and_map(np, 0);
-       if (ssi_private->irq == NO_IRQ) {
+       if (ssi_private->irq == 0) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
                return -ENXIO;
        }
index a3d60d4..a2fd732 100644 (file)
@@ -112,7 +112,7 @@ static int imx_mc13783_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (machine_is_mx31_3ds()) {
+       if (machine_is_mx31_3ds() || machine_is_mx31moboard()) {
                imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
                        IMX_AUDMUX_V2_PTCR_SYN,
                        IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
index f58bcd8..57d6941 100644 (file)
@@ -600,19 +600,17 @@ static int imx_ssi_probe(struct platform_device *pdev)
        ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
        ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
-       ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
-       if (ret)
-               goto failed_pcm_fiq;
+       ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+       ssi->dma_init = imx_pcm_dma_init(pdev);
 
-       ret = imx_pcm_dma_init(pdev);
-       if (ret)
-               goto failed_pcm_dma;
+       if (ssi->fiq_init && ssi->dma_init) {
+               ret = ssi->fiq_init;
+               goto failed_pcm;
+       }
 
        return 0;
 
-failed_pcm_dma:
-       imx_pcm_fiq_exit(pdev);
-failed_pcm_fiq:
+failed_pcm:
        snd_soc_unregister_component(&pdev->dev);
 failed_register:
        release_mem_region(res->start, resource_size(res));
@@ -628,8 +626,11 @@ static int imx_ssi_remove(struct platform_device *pdev)
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct imx_ssi *ssi = platform_get_drvdata(pdev);
 
-       imx_pcm_dma_exit(pdev);
-       imx_pcm_fiq_exit(pdev);
+       if (!ssi->dma_init)
+               imx_pcm_dma_exit(pdev);
+
+       if (!ssi->fiq_init)
+               imx_pcm_fiq_exit(pdev);
 
        snd_soc_unregister_component(&pdev->dev);
 
index fb1616b..560c40f 100644 (file)
@@ -211,6 +211,8 @@ struct imx_ssi {
        struct imx_dma_data filter_data_rx;
        struct imx_pcm_fiq_params fiq_params;
 
+       int fiq_init;
+       int dma_init;
        int enabled;
 };
 
index daa78a0..4a07f71 100644 (file)
@@ -1,6 +1,6 @@
 config SND_OMAP_SOC
        tristate "SoC Audio for the Texas Instruments OMAP chips"
-       depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST)
+       depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
        select SND_DMAENGINE_PCM
 
 config SND_OMAP_SOC_DMIC
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
 
 config SND_OMAP_SOC_RX51
        tristate "SoC Audio support for Nokia RX-51"
-       depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
+       depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
        select SND_OMAP_SOC_MCBSP
        select SND_SOC_TLV320AIC3X
        select SND_SOC_TPA6130A2
index 9cc6986..5dd87f4 100644 (file)
@@ -220,8 +220,8 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
 void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
                               struct rsnd_mod *mod,
                               enum rsnd_reg reg);
-#define rsnd_is_gen1(s)                ((s)->info->flags & RSND_GEN1)
-#define rsnd_is_gen2(s)                ((s)->info->flags & RSND_GEN2)
+#define rsnd_is_gen1(s)                (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
+#define rsnd_is_gen2(s)                (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
 
 /*
  *     R-Car ADG
index d0323a6..999550b 100644 (file)
@@ -262,7 +262,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
        }
 
        area->vm_ops = &usb_stream_hwdep_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       area->vm_flags |= VM_DONTDUMP;
+       if (!read)
+               area->vm_flags |= VM_DONTEXPAND;
        area->vm_private_data = us122l;
        atomic_inc(&us122l->mmap_count);
 out:
index 9b393e7..63df031 100644 (file)
@@ -187,7 +187,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                return -1;
        }
 
-       event->header.type = PERF_RECORD_MMAP2;
+       event->header.type = PERF_RECORD_MMAP;
        /*
         * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
         */
@@ -198,7 +198,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                char prot[5];
                char execname[PATH_MAX];
                char anonstr[] = "//anon";
-               unsigned int ino;
                size_t size;
                ssize_t n;
 
@@ -209,13 +208,10 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                strcpy(execname, "");
 
                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
-               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
-                      &event->mmap2.start, &event->mmap2.len, prot,
-                      &event->mmap2.pgoff, &event->mmap2.maj,
-                      &event->mmap2.min,
-                      &ino, execname);
-
-               event->mmap2.ino = (u64)ino;
+               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
+                      &event->mmap.start, &event->mmap.len, prot,
+                      &event->mmap.pgoff,
+                      execname);
 
                if (n != 8)
                        continue;
@@ -227,15 +223,15 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                        strcpy(execname, anonstr);
 
                size = strlen(execname) + 1;
-               memcpy(event->mmap2.filename, execname, size);
+               memcpy(event->mmap.filename, execname, size);
                size = PERF_ALIGN(size, sizeof(u64));
-               event->mmap2.len -= event->mmap.start;
-               event->mmap2.header.size = (sizeof(event->mmap2) -
-                                       (sizeof(event->mmap2.filename) - size));
-               memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
-               event->mmap2.header.size += machine->id_hdr_size;
-               event->mmap2.pid = tgid;
-               event->mmap2.tid = pid;
+               event->mmap.len -= event->mmap.start;
+               event->mmap.header.size = (sizeof(event->mmap) -
+                                       (sizeof(event->mmap.filename) - size));
+               memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+               event->mmap.header.size += machine->id_hdr_size;
+               event->mmap.pid = tgid;
+               event->mmap.tid = pid;
 
                if (process(tool, event, &synth_sample, machine) != 0) {
                        rc = -1;
index 0ce9feb..9f1ef9b 100644 (file)
@@ -678,7 +678,6 @@ void perf_evsel__config(struct perf_evsel *evsel,
                attr->sample_type       |= PERF_SAMPLE_WEIGHT;
 
        attr->mmap  = track;
-       attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
 
        /*
index c09e0a9..f069273 100644 (file)
@@ -1357,10 +1357,10 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
                        goto post;
                }
 
+               fname = dwarf_decl_file(&spdie);
                if (addr == (unsigned long)baseaddr) {
                        /* Function entry - Relative line number is 0 */
                        lineno = baseline;
-                       fname = dwarf_decl_file(&spdie);
                        goto post;
                }
 
index a85e4ae..c0c9795 100644 (file)
@@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
 
        event = find_cache_event(evsel);
        if (!event)
-               die("ug! no event found for type %" PRIu64, evsel->attr.config);
+               die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
 
        pid = raw_field_value(event, "common_pid", data);
 
index 4fa655d..41bd855 100644 (file)
@@ -151,7 +151,7 @@ static int check_timer_create(int which)
        fflush(stdout);
 
        done = 0;
-       timer_create(which, NULL, &id);
+       err = timer_create(which, NULL, &id);
        if (err < 0) {
                perror("Can't create timer\n");
                return -1;