Merge branches 'pm-cpuidle', 'pm-devfreq' and 'pm-clk'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 1 Sep 2015 13:53:37 +0000 (15:53 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 1 Sep 2015 13:53:37 +0000 (15:53 +0200)
* pm-cpuidle:
  cpuidle/coupled: Remove redundant 'dev' argument of cpuidle_state_is_coupled()
  cpuidle/coupled: Remove cpuidle_device::safe_state_index
  intel_idle: Skylake Client Support
  intel_idle: allow idle states to be freeze-mode specific

* pm-devfreq:
  PM / devfreq: exynos-ppmu: Update documentation to support PPMUv2
  PM / devfreq: exynos-ppmu: Add the support of PPMUv2 for Exynos5433
  PM / devfreq: event: Remove incorrect property in exynos-ppmu DT binding

* pm-clk:
  PM / clk: don't return int on __pm_clk_enable()

228 files changed:
Documentation/acpi/method-tracing.txt
Documentation/cpu-freq/core.txt
Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt [new file with mode: 0644]
Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
Documentation/devicetree/bindings/opp/opp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/opp.txt [deleted file]
MAINTAINERS
Makefile
arch/arm64/kvm/inject_fault.c
arch/powerpc/include/asm/opal-api.h
arch/powerpc/kernel/pci_of_scan.c
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_ipmi.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_memhotplug.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acdispat.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acparser.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsdebug.c [new file with mode: 0644]
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exresnte.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psobject.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/utfileio.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utnonansi.c [new file with mode: 0644]
drivers/acpi/acpica/utstring.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/einj.c
drivers/acpi/apei/erst-dbg.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/apei/hest.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/cm_sbs.c
drivers/acpi/container.c
drivers/acpi/debugfs.c
drivers/acpi/device_pm.c
drivers/acpi/device_sysfs.c [new file with mode: 0644]
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/fan.c
drivers/acpi/hed.c
drivers/acpi/internal.h
drivers/acpi/nfit.c
drivers/acpi/numa.c
drivers/acpi/osl.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/acpi/pci_root.c
drivers/acpi/pci_slot.c
drivers/acpi/power.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/acpi/processor_throttling.c
drivers/acpi/property.c
drivers/acpi/resource.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/acpi/tables.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/ata/libata-core.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/power/clock_ops.c
drivers/base/power/opp.c
drivers/base/power/power.h
drivers/base/power/qos.c
drivers/base/power/sysfs.c
drivers/base/property.c
drivers/block/mtip32xx/mtip32xx.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_opp.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/integrator-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/mt8173-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpufreq/sfi-cpufreq.c
drivers/cpufreq/speedstep-lib.c
drivers/devfreq/event/exynos-ppmu.c
drivers/devfreq/event/exynos-ppmu.h
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/idma64.c [new file with mode: 0644]
drivers/dma/idma64.h [new file with mode: 0644]
drivers/mailbox/Kconfig
drivers/mailbox/pcc.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/intel-lpss-acpi.c [new file with mode: 0644]
drivers/mfd/intel-lpss-pci.c [new file with mode: 0644]
drivers/mfd/intel-lpss.c [new file with mode: 0644]
drivers/mfd/intel-lpss.h [new file with mode: 0644]
drivers/mfd/mfd-core.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_core.h
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/Makefile
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/usb/usbnet.c
drivers/net/vxlan.c
drivers/pci/probe.c
drivers/thermal/power_allocator.c
drivers/video/fbdev/pxafb.c
drivers/video/fbdev/sa1100fb.c
drivers/xen/xen-acpi-processor.c
fs/fs-writeback.c
include/acpi/acbuffer.h
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acoutput.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/actbl2.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acenvex.h
include/acpi/platform/acmsvcex.h [new file with mode: 0644]
include/acpi/platform/acwinex.h [new file with mode: 0644]
include/acpi/processor.h
include/linux/acpi.h
include/linux/cpufreq.h
include/linux/device.h
include/linux/klist.h
include/linux/of.h
include/linux/pci.h
include/linux/pm_opp.h
include/linux/pm_qos.h
lib/klist.c
net/ipv6/ip6_gre.c
net/key/af_key.c
net/netlink/af_netlink.c
net/sched/cls_u32.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
security/security.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/patch_conexant.c
sound/usb/quirks.c
tools/power/acpi/Makefile
tools/power/acpi/Makefile.config [new file with mode: 0644]
tools/power/acpi/Makefile.rules [new file with mode: 0644]
tools/power/acpi/tools/acpidump/Makefile [new file with mode: 0644]
tools/power/acpi/tools/ec/Makefile

index f6efb1e..c2505ee 100644 (file)
-/sys/module/acpi/parameters/:
+ACPICA Trace Facility
 
-trace_method_name
-       The AML method name that the user wants to trace
+Copyright (C) 2015, Intel Corporation
+Author: Lv Zheng <lv.zheng@intel.com>
 
-trace_debug_layer
-       The temporary debug_layer used when tracing the method.
-       Using 0xffffffff by default if it is 0.
 
-trace_debug_level
-       The temporary debug_level used when tracing the method.
-       Using 0x00ffffff by default if it is 0.
+Abstract:
 
-trace_state
-       The status of the tracing feature.
+This document describes the functions and the interfaces of the method
+tracing facility.
+
+1. Functionalities and usage examples:
+
+   ACPICA provides method tracing capability. And two functions are
+   currently implemented using this capability.
+
+   A. Log reducer
+   ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
+   enabled. The debugging messages which are deployed via
+   ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
+   level (known as debug layer, configured via
+   /sys/module/acpi/parameters/debug_layer) and per-type level (known as
+   debug level, configured via /sys/module/acpi/parameters/debug_level).
+
+   But when the particular layer/level is applied to the control method
+   evaluations, the quantity of the debugging outputs may still be too
+   large to be put into the kernel log buffer. The idea thus is worked out
+   to only enable the particular debug layer/level (normally more detailed)
+   logs when the control method evaluation is started, and disable the
+   detailed logging when the control method evaluation is stopped.
+
+   The following command examples illustrate the usage of the "log reducer"
+   functionality:
+   a. Filter out the debug layer/level matched logs when control methods
+      are being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0xXXXXXXXX" > trace_debug_layer
+      # echo "0xYYYYYYYY" > trace_debug_level
+      # echo "enable" > trace_state
+   b. Filter out the debug layer/level matched logs when the specified
+      control method is being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0xXXXXXXXX" > trace_debug_layer
+      # echo "0xYYYYYYYY" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method" > /sys/module/acpi/parameters/trace_state
+   c. Filter out the debug layer/level matched logs when the specified
+      control method is being evaluated for the first time:
+      # cd /sys/module/acpi/parameters
+      # echo "0xXXXXXXXX" > trace_debug_layer
+      # echo "0xYYYYYYYY" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method-once" > /sys/module/acpi/parameters/trace_state
+   Where:
+      0xXXXXXXXX/0xYYYYYYYY: Refer to Documentation/acpi/debug.txt for
+                            possible debug layer/level masking values.
+      \PPPP.AAAA.TTTT.HHHH: Full path of a control method that can be found
+                           in the ACPI namespace. It needn't be an entry
+                           of a control method evaluation.
+
+   B. AML tracer
+
+   There are special log entries added by the method tracing facility at
+   the "trace points" the AML interpreter starts/stops to execute a control
+   method, or an AML opcode. Note that the format of the log entries are
+   subject to change:
+     [    0.186427]   exdebug-0398 ex_trace_point        : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+     [    0.186630]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905c88:If] execution.
+     [    0.186820]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905cc0:LEqual] execution.
+     [    0.187010]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905a20:-NamePath-] execution.
+     [    0.187214]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905a20:-NamePath-] execution.
+     [    0.187407]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905f60:One] execution.
+     [    0.187594]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905f60:One] execution.
+     [    0.187789]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905cc0:LEqual] execution.
+     [    0.187980]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905cc0:Return] execution.
+     [    0.188146]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905f60:One] execution.
+     [    0.188334]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905f60:One] execution.
+     [    0.188524]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905cc0:Return] execution.
+     [    0.188712]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905c88:If] execution.
+     [    0.188903]   exdebug-0398 ex_trace_point        : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
 
-       "enabled" means this feature is enabled
-       and the AML method is traced every time it's executed.
+   Developers can utilize these special log entries to track the AML
+   interpretion, thus can aid issue debugging and performance tuning. Note
+   that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
+   macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
+   "AML tracer" logs.
 
-       "1" means this feature is enabled and the AML method
-       will only be traced during the next execution.
+   The following command examples illustrate the usage of the "AML tracer"
+   functionality:
+   a. Filter out the method start/stop "AML tracer" logs when control
+      methods are being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "enable" > trace_state
+   b. Filter out the method start/stop "AML tracer" when the specified
+      control method is being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method" > trace_state
+   c. Filter out the method start/stop "AML tracer" logs when the specified
+      control method is being evaluated for the first time:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method-once" > trace_state
+   d. Filter out the method/opcode start/stop "AML tracer" when the
+      specified control method is being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "opcode" > trace_state
+   e. Filter out the method/opcode start/stop "AML tracer" when the
+      specified control method is being evaluated for the first time:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "opcode-opcode" > trace_state
 
-       "disabled" means this feature is disabled.
-       Users can enable/disable this debug tracing feature by
-       "echo string > /sys/module/acpi/parameters/trace_state".
-       "string" should be one of "enable", "disable" and "1".
+  Note that all above method tracing facility related module parameters can
+  be used as the boot parameters, for example:
+      acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
+      acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
+
+2. Interface descriptions:
+
+   All method tracing functions can be configured via ACPI module
+   parameters that are accessible at /sys/module/acpi/parameters/:
+
+   trace_method_name
+       The full path of the AML method that the user wants to trace.
+       Note that the full path shouldn't contain the trailing "_"s in its
+       name segments but may contain "\" to form an absolute path.
+
+   trace_debug_layer
+       The temporary debug_layer used when the tracing feature is enabled.
+       Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
+       used to match all "AML tracer" logs.
+
+   trace_debug_level
+       The temporary debug_level used when the tracing feature is enabled.
+       Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
+       debug_level used to match all "AML tracer" logs.
+
+   trace_state
+       The status of the tracing feature.
+       Users can enable/disable this debug tracing feature by executing
+       the following command:
+           # echo string > /sys/module/acpi/parameters/trace_state
+       Where "string" should be one of the followings:
+       "disable"
+           Disable the method tracing feature.
+       "enable"
+           Enable the method tracing feature.
+           ACPICA debugging messages matching
+           "trace_debug_layer/trace_debug_level" during any method
+           execution will be logged.
+       "method"
+           Enable the method tracing feature.
+           ACPICA debugging messages matching
+           "trace_debug_layer/trace_debug_level" during method execution
+           of "trace_method_name" will be logged.
+       "method-once"
+           Enable the method tracing feature.
+           ACPICA debugging messages matching
+           "trace_debug_layer/trace_debug_level" during method execution
+           of "trace_method_name" will be logged only once.
+       "opcode"
+           Enable the method tracing feature.
+           ACPICA debugging messages matching
+           "trace_debug_layer/trace_debug_level" during method/opcode
+           execution of "trace_method_name" will be logged.
+       "opcode-once"
+           Enable the method tracing feature.
+           ACPICA debugging messages matching
+           "trace_debug_layer/trace_debug_level" during method/opcode
+           execution of "trace_method_name" will be logged only once.
+       Note that, the difference between the "enable" and other feature
+        enabling options are:
+       1. When "enable" is specified, since
+          "trace_debug_layer/trace_debug_level" shall apply to all control
+          method evaluations, after configuring "trace_state" to "enable",
+          "trace_method_name" will be reset to NULL.
+       2. When "method/opcode" is specified, if
+          "trace_method_name" is NULL when "trace_state" is configured to
+          these options, the "trace_debug_layer/trace_debug_level" will
+          apply to all control method evaluations.
index 70933ea..ba78e7c 100644 (file)
@@ -55,16 +55,13 @@ transition notifiers.
 ----------------------------
 
 These are notified when a new policy is intended to be set. Each
-CPUFreq policy notifier is called three times for a policy transition:
+CPUFreq policy notifier is called twice for a policy transition:
 
 1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if
     they see a need for this - may it be thermal considerations or
     hardware limitations.
 
-2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid
-    hardware failure.
-
-3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
+2.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
    - if two hardware drivers failed to agree on a new policy before this
    stage, the incompatible hardware shall be shut down, and the user
    informed of this.
diff --git a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
new file mode 100644 (file)
index 0000000..52b457c
--- /dev/null
@@ -0,0 +1,83 @@
+Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC
+
+Required properties:
+- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
+- clock-names: Should contain the following:
+       "cpu"           - The multiplexer for clock input of CPU cluster.
+       "intermediate"  - A parent of "cpu" clock which is used as "intermediate" clock
+                         source (usually MAINPLL) when the original CPU PLL is under
+                         transition and not stable yet.
+       Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
+       generic clock consumer properties.
+- proc-supply: Regulator for Vproc of CPU cluster.
+
+Optional properties:
+- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
+              needs to do "voltage tracking" to step by step scale up/down Vproc and
+              Vsram to fit SoC specific needs. When absent, the voltage scaling
+              flow is handled by hardware, hence no software "voltage tracking" is
+              needed.
+
+Example:
+--------
+       cpu0: cpu@0 {
+               device_type = "cpu";
+               compatible = "arm,cortex-a53";
+               reg = <0x000>;
+               enable-method = "psci";
+               cpu-idle-states = <&CPU_SLEEP_0>;
+               clocks = <&infracfg CLK_INFRA_CA53SEL>,
+                        <&apmixedsys CLK_APMIXED_MAINPLL>;
+               clock-names = "cpu", "intermediate";
+       };
+
+       cpu1: cpu@1 {
+               device_type = "cpu";
+               compatible = "arm,cortex-a53";
+               reg = <0x001>;
+               enable-method = "psci";
+               cpu-idle-states = <&CPU_SLEEP_0>;
+               clocks = <&infracfg CLK_INFRA_CA53SEL>,
+                        <&apmixedsys CLK_APMIXED_MAINPLL>;
+               clock-names = "cpu", "intermediate";
+       };
+
+       cpu2: cpu@100 {
+               device_type = "cpu";
+               compatible = "arm,cortex-a57";
+               reg = <0x100>;
+               enable-method = "psci";
+               cpu-idle-states = <&CPU_SLEEP_0>;
+               clocks = <&infracfg CLK_INFRA_CA57SEL>,
+                        <&apmixedsys CLK_APMIXED_MAINPLL>;
+               clock-names = "cpu", "intermediate";
+       };
+
+       cpu3: cpu@101 {
+               device_type = "cpu";
+               compatible = "arm,cortex-a57";
+               reg = <0x101>;
+               enable-method = "psci";
+               cpu-idle-states = <&CPU_SLEEP_0>;
+               clocks = <&infracfg CLK_INFRA_CA57SEL>,
+                        <&apmixedsys CLK_APMIXED_MAINPLL>;
+               clock-names = "cpu", "intermediate";
+       };
+
+       &cpu0 {
+               proc-supply = <&mt6397_vpca15_reg>;
+       };
+
+       &cpu1 {
+               proc-supply = <&mt6397_vpca15_reg>;
+       };
+
+       &cpu2 {
+               proc-supply = <&da9211_vcpu_reg>;
+               sram-supply = <&mt6397_vsramca7_reg>;
+       };
+
+       &cpu3 {
+               proc-supply = <&da9211_vcpu_reg>;
+               sram-supply = <&mt6397_vsramca7_reg>;
+       };
index b54bf3a..3e36c1d 100644 (file)
@@ -11,15 +11,14 @@ to various devfreq devices. The devfreq devices would use the event data when
 derterming the current state of each IP.
 
 Required properties:
-- compatible: Should be "samsung,exynos-ppmu".
+- compatible: Should be "samsung,exynos-ppmu" or "samsung,exynos-ppmu-v2.
 - reg: physical base address of each PPMU and length of memory mapped region.
 
 Optional properties:
 - clock-names : the name of clock used by the PPMU, "ppmu"
 - clocks : phandles for clock specified in "clock-names" property
-- #clock-cells: should be 1.
 
-Example1 : PPMU nodes in exynos3250.dtsi are listed below.
+Example1 : PPMUv1 nodes in exynos3250.dtsi are listed below.
 
                ppmu_dmc0: ppmu_dmc0@106a0000 {
                        compatible = "samsung,exynos-ppmu";
@@ -108,3 +107,41 @@ Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
                        };
                };
        };
+
+Example3 : PPMUv2 nodes in exynos5433.dtsi are listed below.
+
+               ppmu_d0_cpu: ppmu_d0_cpu@10480000 {
+                       compatible = "samsung,exynos-ppmu-v2";
+                       reg = <0x10480000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_d0_general: ppmu_d0_general@10490000 {
+                       compatible = "samsung,exynos-ppmu-v2";
+                       reg = <0x10490000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_d0_rt: ppmu_d0_rt@104a0000 {
+                       compatible = "samsung,exynos-ppmu-v2";
+                       reg = <0x104a0000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_d1_cpu: ppmu_d1_cpu@104b0000 {
+                       compatible = "samsung,exynos-ppmu-v2";
+                       reg = <0x104b0000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_d1_general: ppmu_d1_general@104c0000 {
+                       compatible = "samsung,exynos-ppmu-v2";
+                       reg = <0x104c0000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_d1_rt: ppmu_d1_rt@104d0000 {
+                       compatible = "samsung,exynos-ppmu-v2";
+                       reg = <0x104d0000 0x2000>;
+                       status = "disabled";
+               };
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
new file mode 100644 (file)
index 0000000..0cb44dc
--- /dev/null
@@ -0,0 +1,465 @@
+Generic OPP (Operating Performance Points) Bindings
+----------------------------------------------------
+
+Devices work at voltage-current-frequency combinations and some implementations
+have the liberty of choosing these. These combinations are called Operating
+Performance Points aka OPPs. This document defines bindings for these OPPs
+applicable across wide range of devices. For illustration purpose, this document
+uses CPU as a device.
+
+This document contain multiple versions of OPP binding and only one of them
+should be used per device.
+
+Binding 1: operating-points
+============================
+
+This binding only supports voltage-frequency pairs.
+
+Properties:
+- operating-points: An array of 2-tuples items, and each item consists
+  of frequency and voltage like <freq-kHz vol-uV>.
+       freq: clock frequency in kHz
+       vol: voltage in microvolt
+
+Examples:
+
+cpu@0 {
+       compatible = "arm,cortex-a9";
+       reg = <0>;
+       next-level-cache = <&L2>;
+       operating-points = <
+               /* kHz    uV */
+               792000  1100000
+               396000  950000
+               198000  850000
+       >;
+};
+
+
+Binding 2: operating-points-v2
+============================
+
+* Property: operating-points-v2
+
+Devices supporting OPPs must set their "operating-points-v2" property with
+phandle to a OPP table in their DT node. The OPP core will use this phandle to
+find the operating points for the device.
+
+Devices may want to choose OPP tables at runtime and so can provide a list of
+phandles here. But only *one* of them should be chosen at runtime. This must be
+accompanied by a corresponding "operating-points-names" property, to uniquely
+identify the OPP tables.
+
+If required, this can be extended for SoC vendor specfic bindings. Such bindings
+should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
+and should have a compatible description like: "operating-points-v2-<vendor>".
+
+Optional properties:
+- operating-points-names: Names of OPP tables (required if multiple OPP
+  tables are present), to uniquely identify them. The same list must be present
+  for all the CPUs which are sharing clock/voltage rails and hence the OPP
+  tables.
+
+* OPP Table Node
+
+This describes the OPPs belonging to a device. This node can have following
+properties:
+
+Required properties:
+- compatible: Allow OPPs to express their compatibility. It should be:
+  "operating-points-v2".
+
+- OPP nodes: One or more OPP nodes describing voltage-current-frequency
+  combinations. Their name isn't significant but their phandle can be used to
+  reference an OPP.
+
+Optional properties:
+- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
+  switch their DVFS state together, i.e. they share clock/voltage/current lines.
+  Missing property means devices have independent clock/voltage/current lines,
+  but they share OPP tables.
+
+- status: Marks the OPP table enabled/disabled.
+
+
+* OPP Node
+
+This defines voltage-current-frequency combinations along with other related
+properties.
+
+Required properties:
+- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer.
+
+Optional properties:
+- opp-microvolt: voltage in micro Volts.
+
+  A single regulator's voltage is specified with an array of size one or three.
+  Single entry is for target voltage and three entries are for <target min max>
+  voltages.
+
+  Entries for multiple regulators must be present in the same order as
+  regulators are specified in device's DT node.
+
+- opp-microamp: The maximum current drawn by the device in microamperes
+  considering system specific parameters (such as transients, process, aging,
+  maximum operating temperature range etc.) as necessary. This may be used to
+  set the most efficient regulator operating mode.
+
+  Should only be set if opp-microvolt is set for the OPP.
+
+  Entries for multiple regulators must be present in the same order as
+  regulators are specified in device's DT node. If this property isn't required
+  for few regulators, then this should be marked as zero for them. If it isn't
+  required for any regulator, then this property need not be present.
+
+- clock-latency-ns: Specifies the maximum possible transition latency (in
+  nanoseconds) for switching to this OPP from any other OPP.
+
+- turbo-mode: Marks the OPP to be used only for turbo modes. Turbo mode is
+  available on some platforms, where the device can run over its operating
+  frequency for a short duration of time limited by the device's power, current
+  and thermal limits.
+
+- opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
+  the table should have this.
+
+- status: Marks the node enabled/disabled.
+
+Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
+
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a9";
+                       reg = <0>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 0>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply0>;
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+
+               cpu@1 {
+                       compatible = "arm,cortex-a9";
+                       reg = <1>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 0>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply0>;
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+       };
+
+       cpu0_opp_table: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+
+               opp00 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <970000 975000 985000>;
+                       opp-microamp = <70000>;
+                       clock-latency-ns = <300000>;
+                       opp-suspend;
+               };
+               opp01 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <980000 1000000 1010000>;
+                       opp-microamp = <80000>;
+                       clock-latency-ns = <310000>;
+               };
+               opp02 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1025000>;
+                       clock-latency-ns = <290000>;
+                       turbo-mode;
+               };
+       };
+};
+
+Example 2: Single cluster, Quad-core Qualcom-krait, switches DVFS states
+independently.
+
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "qcom,krait";
+                       reg = <0>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 0>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply0>;
+                       operating-points-v2 = <&cpu_opp_table>;
+               };
+
+               cpu@1 {
+                       compatible = "qcom,krait";
+                       reg = <1>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 1>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply1>;
+                       operating-points-v2 = <&cpu_opp_table>;
+               };
+
+               cpu@2 {
+                       compatible = "qcom,krait";
+                       reg = <2>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 2>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply2>;
+                       operating-points-v2 = <&cpu_opp_table>;
+               };
+
+               cpu@3 {
+                       compatible = "qcom,krait";
+                       reg = <3>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 3>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply3>;
+                       operating-points-v2 = <&cpu_opp_table>;
+               };
+       };
+
+       cpu_opp_table: opp_table {
+               compatible = "operating-points-v2";
+
+               /*
+                * Missing opp-shared property means CPUs switch DVFS states
+                * independently.
+                */
+
+               opp00 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <970000 975000 985000>;
+                       opp-microamp = <70000>;
+                       clock-latency-ns = <300000>;
+                       opp-suspend;
+               };
+               opp01 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <980000 1000000 1010000>;
+                       opp-microamp = <80000>;
+                       clock-latency-ns = <310000>;
+               };
+               opp02 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1025000>;
+                       opp-microamp = <90000;
+                       lock-latency-ns = <290000>;
+                       turbo-mode;
+               };
+       };
+};
+
+Example 3: Dual-cluster, Dual-core per cluster. CPUs within a cluster switch
+DVFS state together.
+
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a7";
+                       reg = <0>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 0>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply0>;
+                       operating-points-v2 = <&cluster0_opp>;
+               };
+
+               cpu@1 {
+                       compatible = "arm,cortex-a7";
+                       reg = <1>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 0>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply0>;
+                       operating-points-v2 = <&cluster0_opp>;
+               };
+
+               cpu@100 {
+                       compatible = "arm,cortex-a15";
+                       reg = <100>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 1>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply1>;
+                       operating-points-v2 = <&cluster1_opp>;
+               };
+
+               cpu@101 {
+                       compatible = "arm,cortex-a15";
+                       reg = <101>;
+                       next-level-cache = <&L2>;
+                       clocks = <&clk_controller 1>;
+                       clock-names = "cpu";
+                       cpu-supply = <&cpu_supply1>;
+                       operating-points-v2 = <&cluster1_opp>;
+               };
+       };
+
+       cluster0_opp: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+
+               opp00 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <970000 975000 985000>;
+                       opp-microamp = <70000>;
+                       clock-latency-ns = <300000>;
+                       opp-suspend;
+               };
+               opp01 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <980000 1000000 1010000>;
+                       opp-microamp = <80000>;
+                       clock-latency-ns = <310000>;
+               };
+               opp02 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1025000>;
+                       opp-microamp = <90000>;
+                       clock-latency-ns = <290000>;
+                       turbo-mode;
+               };
+       };
+
+       cluster1_opp: opp_table1 {
+               compatible = "operating-points-v2";
+               opp-shared;
+
+               opp10 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1045000 1050000 1055000>;
+                       opp-microamp = <95000>;
+                       clock-latency-ns = <400000>;
+                       opp-suspend;
+               };
+               opp11 {
+                       opp-hz = /bits/ 64 <1400000000>;
+                       opp-microvolt = <1075000>;
+                       opp-microamp = <100000>;
+                       clock-latency-ns = <400000>;
+               };
+               opp12 {
+                       opp-hz = /bits/ 64 <1500000000>;
+                       opp-microvolt = <1010000 1100000 1110000>;
+                       opp-microamp = <95000>;
+                       clock-latency-ns = <400000>;
+                       turbo-mode;
+               };
+       };
+};
+
+Example 4: Handling multiple regulators
+
+/ {
+       cpus {
+               cpu@0 {
+                       compatible = "arm,cortex-a7";
+                       ...
+
+                       cpu-supply = <&cpu_supply0>, <&cpu_supply1>, <&cpu_supply2>;
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+       };
+
+       cpu0_opp_table: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+
+               opp00 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <970000>, /* Supply 0 */
+                                       <960000>, /* Supply 1 */
+                                       <960000>; /* Supply 2 */
+                       opp-microamp =  <70000>,  /* Supply 0 */
+                                       <70000>,  /* Supply 1 */
+                                       <70000>;  /* Supply 2 */
+                       clock-latency-ns = <300000>;
+               };
+
+               /* OR */
+
+               opp00 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <970000 975000 985000>, /* Supply 0 */
+                                       <960000 965000 975000>, /* Supply 1 */
+                                       <960000 965000 975000>; /* Supply 2 */
+                       opp-microamp =  <70000>,                /* Supply 0 */
+                                       <70000>,                /* Supply 1 */
+                                       <70000>;                /* Supply 2 */
+                       clock-latency-ns = <300000>;
+               };
+
+               /* OR */
+
+               opp00 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <970000 975000 985000>, /* Supply 0 */
+                                       <960000 965000 975000>, /* Supply 1 */
+                                       <960000 965000 975000>; /* Supply 2 */
+                       opp-microamp =  <70000>,                /* Supply 0 */
+                                       <0>,                    /* Supply 1 doesn't need this */
+                                       <70000>;                /* Supply 2 */
+                       clock-latency-ns = <300000>;
+               };
+       };
+};
+
+Example 5: Multiple OPP tables
+
+/ {
+       cpus {
+               cpu@0 {
+                       compatible = "arm,cortex-a7";
+                       ...
+
+                       cpu-supply = <&cpu_supply>
+                       operating-points-v2 = <&cpu0_opp_table_slow>, <&cpu0_opp_table_fast>;
+                       operating-points-names = "slow", "fast";
+               };
+       };
+
+       cpu0_opp_table_slow: opp_table_slow {
+               compatible = "operating-points-v2";
+               status = "okay";
+               opp-shared;
+
+               opp00 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       ...
+               };
+
+               opp01 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       ...
+               };
+       };
+
+       cpu0_opp_table_fast: opp_table_fast {
+               compatible = "operating-points-v2";
+               status = "okay";
+               opp-shared;
+
+               opp10 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       ...
+               };
+
+               opp11 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       ...
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
deleted file mode 100644 (file)
index 0d5e7c9..0000000
+++ /dev/null
@@ -1,465 +0,0 @@
-Generic OPP (Operating Performance Points) Bindings
-----------------------------------------------------
-
-Devices work at voltage-current-frequency combinations and some implementations
-have the liberty of choosing these. These combinations are called Operating
-Performance Points aka OPPs. This document defines bindings for these OPPs
-applicable across wide range of devices. For illustration purpose, this document
-uses CPU as a device.
-
-This document contain multiple versions of OPP binding and only one of them
-should be used per device.
-
-Binding 1: operating-points
-============================
-
-This binding only supports voltage-frequency pairs.
-
-Properties:
-- operating-points: An array of 2-tuples items, and each item consists
-  of frequency and voltage like <freq-kHz vol-uV>.
-       freq: clock frequency in kHz
-       vol: voltage in microvolt
-
-Examples:
-
-cpu@0 {
-       compatible = "arm,cortex-a9";
-       reg = <0>;
-       next-level-cache = <&L2>;
-       operating-points = <
-               /* kHz    uV */
-               792000  1100000
-               396000  950000
-               198000  850000
-       >;
-};
-
-
-Binding 2: operating-points-v2
-============================
-
-* Property: operating-points-v2
-
-Devices supporting OPPs must set their "operating-points-v2" property with
-phandle to a OPP table in their DT node. The OPP core will use this phandle to
-find the operating points for the device.
-
-Devices may want to choose OPP tables at runtime and so can provide a list of
-phandles here. But only *one* of them should be chosen at runtime. This must be
-accompanied by a corresponding "operating-points-names" property, to uniquely
-identify the OPP tables.
-
-If required, this can be extended for SoC vendor specfic bindings. Such bindings
-should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
-and should have a compatible description like: "operating-points-v2-<vendor>".
-
-Optional properties:
-- operating-points-names: Names of OPP tables (required if multiple OPP
-  tables are present), to uniquely identify them. The same list must be present
-  for all the CPUs which are sharing clock/voltage rails and hence the OPP
-  tables.
-
-* OPP Table Node
-
-This describes the OPPs belonging to a device. This node can have following
-properties:
-
-Required properties:
-- compatible: Allow OPPs to express their compatibility. It should be:
-  "operating-points-v2".
-
-- OPP nodes: One or more OPP nodes describing voltage-current-frequency
-  combinations. Their name isn't significant but their phandle can be used to
-  reference an OPP.
-
-Optional properties:
-- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
-  switch their DVFS state together, i.e. they share clock/voltage/current lines.
-  Missing property means devices have independent clock/voltage/current lines,
-  but they share OPP tables.
-
-- status: Marks the OPP table enabled/disabled.
-
-
-* OPP Node
-
-This defines voltage-current-frequency combinations along with other related
-properties.
-
-Required properties:
-- opp-hz: Frequency in Hz
-
-Optional properties:
-- opp-microvolt: voltage in micro Volts.
-
-  A single regulator's voltage is specified with an array of size one or three.
-  Single entry is for target voltage and three entries are for <target min max>
-  voltages.
-
-  Entries for multiple regulators must be present in the same order as
-  regulators are specified in device's DT node.
-
-- opp-microamp: The maximum current drawn by the device in microamperes
-  considering system specific parameters (such as transients, process, aging,
-  maximum operating temperature range etc.) as necessary. This may be used to
-  set the most efficient regulator operating mode.
-
-  Should only be set if opp-microvolt is set for the OPP.
-
-  Entries for multiple regulators must be present in the same order as
-  regulators are specified in device's DT node. If this property isn't required
-  for few regulators, then this should be marked as zero for them. If it isn't
-  required for any regulator, then this property need not be present.
-
-- clock-latency-ns: Specifies the maximum possible transition latency (in
-  nanoseconds) for switching to this OPP from any other OPP.
-
-- turbo-mode: Marks the OPP to be used only for turbo modes. Turbo mode is
-  available on some platforms, where the device can run over its operating
-  frequency for a short duration of time limited by the device's power, current
-  and thermal limits.
-
-- opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
-  the table should have this.
-
-- status: Marks the node enabled/disabled.
-
-Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
-
-/ {
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               cpu@0 {
-                       compatible = "arm,cortex-a9";
-                       reg = <0>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 0>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply0>;
-                       operating-points-v2 = <&cpu0_opp_table>;
-               };
-
-               cpu@1 {
-                       compatible = "arm,cortex-a9";
-                       reg = <1>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 0>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply0>;
-                       operating-points-v2 = <&cpu0_opp_table>;
-               };
-       };
-
-       cpu0_opp_table: opp_table0 {
-               compatible = "operating-points-v2";
-               opp-shared;
-
-               opp00 {
-                       opp-hz = <1000000000>;
-                       opp-microvolt = <970000 975000 985000>;
-                       opp-microamp = <70000>;
-                       clock-latency-ns = <300000>;
-                       opp-suspend;
-               };
-               opp01 {
-                       opp-hz = <1100000000>;
-                       opp-microvolt = <980000 1000000 1010000>;
-                       opp-microamp = <80000>;
-                       clock-latency-ns = <310000>;
-               };
-               opp02 {
-                       opp-hz = <1200000000>;
-                       opp-microvolt = <1025000>;
-                       clock-latency-ns = <290000>;
-                       turbo-mode;
-               };
-       };
-};
-
-Example 2: Single cluster, Quad-core Qualcom-krait, switches DVFS states
-independently.
-
-/ {
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               cpu@0 {
-                       compatible = "qcom,krait";
-                       reg = <0>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 0>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply0>;
-                       operating-points-v2 = <&cpu_opp_table>;
-               };
-
-               cpu@1 {
-                       compatible = "qcom,krait";
-                       reg = <1>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 1>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply1>;
-                       operating-points-v2 = <&cpu_opp_table>;
-               };
-
-               cpu@2 {
-                       compatible = "qcom,krait";
-                       reg = <2>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 2>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply2>;
-                       operating-points-v2 = <&cpu_opp_table>;
-               };
-
-               cpu@3 {
-                       compatible = "qcom,krait";
-                       reg = <3>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 3>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply3>;
-                       operating-points-v2 = <&cpu_opp_table>;
-               };
-       };
-
-       cpu_opp_table: opp_table {
-               compatible = "operating-points-v2";
-
-               /*
-                * Missing opp-shared property means CPUs switch DVFS states
-                * independently.
-                */
-
-               opp00 {
-                       opp-hz = <1000000000>;
-                       opp-microvolt = <970000 975000 985000>;
-                       opp-microamp = <70000>;
-                       clock-latency-ns = <300000>;
-                       opp-suspend;
-               };
-               opp01 {
-                       opp-hz = <1100000000>;
-                       opp-microvolt = <980000 1000000 1010000>;
-                       opp-microamp = <80000>;
-                       clock-latency-ns = <310000>;
-               };
-               opp02 {
-                       opp-hz = <1200000000>;
-                       opp-microvolt = <1025000>;
-                       opp-microamp = <90000;
-                       lock-latency-ns = <290000>;
-                       turbo-mode;
-               };
-       };
-};
-
-Example 3: Dual-cluster, Dual-core per cluster. CPUs within a cluster switch
-DVFS state together.
-
-/ {
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               cpu@0 {
-                       compatible = "arm,cortex-a7";
-                       reg = <0>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 0>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply0>;
-                       operating-points-v2 = <&cluster0_opp>;
-               };
-
-               cpu@1 {
-                       compatible = "arm,cortex-a7";
-                       reg = <1>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 0>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply0>;
-                       operating-points-v2 = <&cluster0_opp>;
-               };
-
-               cpu@100 {
-                       compatible = "arm,cortex-a15";
-                       reg = <100>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 1>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply1>;
-                       operating-points-v2 = <&cluster1_opp>;
-               };
-
-               cpu@101 {
-                       compatible = "arm,cortex-a15";
-                       reg = <101>;
-                       next-level-cache = <&L2>;
-                       clocks = <&clk_controller 1>;
-                       clock-names = "cpu";
-                       cpu-supply = <&cpu_supply1>;
-                       operating-points-v2 = <&cluster1_opp>;
-               };
-       };
-
-       cluster0_opp: opp_table0 {
-               compatible = "operating-points-v2";
-               opp-shared;
-
-               opp00 {
-                       opp-hz = <1000000000>;
-                       opp-microvolt = <970000 975000 985000>;
-                       opp-microamp = <70000>;
-                       clock-latency-ns = <300000>;
-                       opp-suspend;
-               };
-               opp01 {
-                       opp-hz = <1100000000>;
-                       opp-microvolt = <980000 1000000 1010000>;
-                       opp-microamp = <80000>;
-                       clock-latency-ns = <310000>;
-               };
-               opp02 {
-                       opp-hz = <1200000000>;
-                       opp-microvolt = <1025000>;
-                       opp-microamp = <90000>;
-                       clock-latency-ns = <290000>;
-                       turbo-mode;
-               };
-       };
-
-       cluster1_opp: opp_table1 {
-               compatible = "operating-points-v2";
-               opp-shared;
-
-               opp10 {
-                       opp-hz = <1300000000>;
-                       opp-microvolt = <1045000 1050000 1055000>;
-                       opp-microamp = <95000>;
-                       clock-latency-ns = <400000>;
-                       opp-suspend;
-               };
-               opp11 {
-                       opp-hz = <1400000000>;
-                       opp-microvolt = <1075000>;
-                       opp-microamp = <100000>;
-                       clock-latency-ns = <400000>;
-               };
-               opp12 {
-                       opp-hz = <1500000000>;
-                       opp-microvolt = <1010000 1100000 1110000>;
-                       opp-microamp = <95000>;
-                       clock-latency-ns = <400000>;
-                       turbo-mode;
-               };
-       };
-};
-
-Example 4: Handling multiple regulators
-
-/ {
-       cpus {
-               cpu@0 {
-                       compatible = "arm,cortex-a7";
-                       ...
-
-                       cpu-supply = <&cpu_supply0>, <&cpu_supply1>, <&cpu_supply2>;
-                       operating-points-v2 = <&cpu0_opp_table>;
-               };
-       };
-
-       cpu0_opp_table: opp_table0 {
-               compatible = "operating-points-v2";
-               opp-shared;
-
-               opp00 {
-                       opp-hz = <1000000000>;
-                       opp-microvolt = <970000>, /* Supply 0 */
-                                       <960000>, /* Supply 1 */
-                                       <960000>; /* Supply 2 */
-                       opp-microamp =  <70000>,  /* Supply 0 */
-                                       <70000>,  /* Supply 1 */
-                                       <70000>;  /* Supply 2 */
-                       clock-latency-ns = <300000>;
-               };
-
-               /* OR */
-
-               opp00 {
-                       opp-hz = <1000000000>;
-                       opp-microvolt = <970000 975000 985000>, /* Supply 0 */
-                                       <960000 965000 975000>, /* Supply 1 */
-                                       <960000 965000 975000>; /* Supply 2 */
-                       opp-microamp =  <70000>,                /* Supply 0 */
-                                       <70000>,                /* Supply 1 */
-                                       <70000>;                /* Supply 2 */
-                       clock-latency-ns = <300000>;
-               };
-
-               /* OR */
-
-               opp00 {
-                       opp-hz = <1000000000>;
-                       opp-microvolt = <970000 975000 985000>, /* Supply 0 */
-                                       <960000 965000 975000>, /* Supply 1 */
-                                       <960000 965000 975000>; /* Supply 2 */
-                       opp-microamp =  <70000>,                /* Supply 0 */
-                                       <0>,                    /* Supply 1 doesn't need this */
-                                       <70000>;                /* Supply 2 */
-                       clock-latency-ns = <300000>;
-               };
-       };
-};
-
-Example 5: Multiple OPP tables
-
-/ {
-       cpus {
-               cpu@0 {
-                       compatible = "arm,cortex-a7";
-                       ...
-
-                       cpu-supply = <&cpu_supply>
-                       operating-points-v2 = <&cpu0_opp_table_slow>, <&cpu0_opp_table_fast>;
-                       operating-points-names = "slow", "fast";
-               };
-       };
-
-       cpu0_opp_table_slow: opp_table_slow {
-               compatible = "operating-points-v2";
-               status = "okay";
-               opp-shared;
-
-               opp00 {
-                       opp-hz = <600000000>;
-                       ...
-               };
-
-               opp01 {
-                       opp-hz = <800000000>;
-                       ...
-               };
-       };
-
-       cpu0_opp_table_fast: opp_table_fast {
-               compatible = "operating-points-v2";
-               status = "okay";
-               opp-shared;
-
-               opp10 {
-                       opp-hz = <1000000000>;
-                       ...
-               };
-
-               opp11 {
-                       opp-hz = <1100000000>;
-                       ...
-               };
-       };
-};
index 569568f..b60e2b2 100644 (file)
@@ -5849,6 +5849,7 @@ S:        Odd Fixes
 
 KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
 M:     "J. Bruce Fields" <bfields@fieldses.org>
+M:     Jeff Layton <jlayton@poochiereds.net>
 L:     linux-nfs@vger.kernel.org
 W:     http://nfs.sourceforge.net/
 S:     Supported
index 246053f..c361593 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index f02530e..85c5715 100644 (file)
@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
 {
        if (!(vcpu->arch.hcr_el2 & HCR_RW))
                inject_abt32(vcpu, false, addr);
-
-       inject_abt64(vcpu, false, addr);
+       else
+               inject_abt64(vcpu, false, addr);
 }
 
 /**
@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
 {
        if (!(vcpu->arch.hcr_el2 & HCR_RW))
                inject_abt32(vcpu, true, addr);
-
-       inject_abt64(vcpu, true, addr);
+       else
+               inject_abt64(vcpu, true, addr);
 }
 
 /**
@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 {
        if (!(vcpu->arch.hcr_el2 & HCR_RW))
                inject_undef32(vcpu);
-
-       inject_undef64(vcpu);
+       else
+               inject_undef64(vcpu);
 }
index e9e4c52..64dc9f5 100644 (file)
@@ -361,6 +361,7 @@ enum opal_msg_type {
        OPAL_MSG_HMI_EVT,
        OPAL_MSG_DPO,
        OPAL_MSG_PRD,
+       OPAL_MSG_OCC,
        OPAL_MSG_TYPE_MAX,
 };
 
@@ -700,6 +701,17 @@ struct opal_prd_msg_header {
 
 struct opal_prd_msg;
 
+#define OCC_RESET                       0
+#define OCC_LOAD                        1
+#define OCC_THROTTLE                    2
+#define OCC_MAX_THROTTLE_STATUS         5
+
+struct opal_occ_msg {
+       __be64 type;
+       __be64 chip;
+       __be64 throttle_status;
+};
+
 /*
  * SG entries
  *
index 42e02a2..efc3fa5 100644 (file)
@@ -191,6 +191,9 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
 
        pci_device_add(dev, bus);
 
+       /* Setup MSI caps & disable MSI/MSI-X interrupts */
+       pci_msi_setup_pci_dev(dev);
+
        return dev;
 }
 EXPORT_SYMBOL(of_create_pci_dev);
index e49ee24..9393896 100644 (file)
@@ -445,6 +445,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
        mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+       acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
        /*
         * stash over-ride to indicate we've been here
index dcb5285..cde732c 100644 (file)
@@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void)
 {
        u64 msr;
 
-       if (cpu_has_apic)
+       if (!cpu_has_apic)
                return;
 
        rdmsrl(MSR_IA32_APICBASE, msr);
@@ -1483,10 +1483,13 @@ void x2apic_setup(void)
 
 static __init void x2apic_disable(void)
 {
-       u32 x2apic_id;
+       u32 x2apic_id, state = x2apic_state;
 
-       if (x2apic_state != X2APIC_ON)
-               goto out;
+       x2apic_mode = 0;
+       x2apic_state = X2APIC_DISABLED;
+
+       if (state != X2APIC_ON)
+               return;
 
        x2apic_id = read_apic_id();
        if (x2apic_id >= 255)
@@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void)
 
        __x2apic_disable();
        register_lapic_address(mp_lapic_addr);
-out:
-       x2apic_state = X2APIC_DISABLED;
-       x2apic_mode = 0;
 }
 
 static __init void x2apic_enable(void)
index 114cf48..54e9729 100644 (file)
@@ -189,17 +189,24 @@ config ACPI_DOCK
          This driver supports ACPI-controlled docking stations and removable
          drive bays such as the IBM Ultrabay and the Dell Module Bay.
 
-config ACPI_PROCESSOR
-       tristate "Processor"
+config ACPI_CPU_FREQ_PSS
+       bool
        select THERMAL
+
+config ACPI_PROCESSOR_IDLE
+       bool
        select CPU_IDLE
+
+config ACPI_PROCESSOR
+       tristate "Processor"
        depends on X86 || IA64
+       select ACPI_PROCESSOR_IDLE
+       select ACPI_CPU_FREQ_PSS
        default y
        help
-         This driver installs ACPI as the idle handler for Linux and uses
-         ACPI C2 and C3 processor states to save power on systems that
-         support it.  It is required by several flavors of cpufreq
-         performance-state drivers.
+         This driver adds support for the ACPI Processor package. It is required
+         by several flavors of cpufreq performance-state, thermal, throttling and
+         idle drivers.
 
          To compile this driver as a module, choose M here:
          the module will be called processor.
index 8321430..b5e7cd8 100644 (file)
@@ -24,7 +24,7 @@ acpi-y                                += nvs.o
 # Power management related files
 acpi-y                         += wakeup.o
 acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
-acpi-y                         += device_pm.o
+acpi-y                         += device_sysfs.o device_pm.o
 acpi-$(CONFIG_ACPI_SLEEP)      += proc.o
 
 
@@ -80,8 +80,10 @@ obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
 obj-$(CONFIG_ACPI_BGRT)                += bgrt.o
 
 # processor has its own "processor." module_param namespace
-processor-y                    := processor_driver.o processor_throttling.o
-processor-y                    += processor_idle.o processor_thermal.o
+processor-y                    := processor_driver.o
+processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o
+processor-$(CONFIG_ACPI_CPU_FREQ_PSS)  += processor_throttling.o       \
+       processor_thermal.o
 processor-$(CONFIG_CPU_FREQ)   += processor_perflib.o
 
 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
index 9b5354a..f71b756 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index ac0f52f..f77956c 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 46b58ab..10020e0 100644 (file)
@@ -60,6 +60,7 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_CLK_DIVIDER               BIT(2)
 #define LPSS_LTR                       BIT(3)
 #define LPSS_SAVE_CTX                  BIT(4)
+#define LPSS_NO_D3_DELAY               BIT(5)
 
 struct lpss_private_data;
 
@@ -156,6 +157,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = {
        .flags = LPSS_SAVE_CTX,
 };
 
+static const struct lpss_device_desc bsw_pwm_dev_desc = {
+       .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+};
+
 static const struct lpss_device_desc byt_uart_dev_desc = {
        .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .clk_con_id = "baudclk",
@@ -163,6 +168,14 @@ static const struct lpss_device_desc byt_uart_dev_desc = {
        .setup = lpss_uart_setup,
 };
 
+static const struct lpss_device_desc bsw_uart_dev_desc = {
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
+                       | LPSS_NO_D3_DELAY,
+       .clk_con_id = "baudclk",
+       .prv_offset = 0x800,
+       .setup = lpss_uart_setup,
+};
+
 static const struct lpss_device_desc byt_spi_dev_desc = {
        .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x400,
@@ -178,8 +191,15 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
        .setup = byt_i2c_setup,
 };
 
+static const struct lpss_device_desc bsw_i2c_dev_desc = {
+       .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+       .prv_offset = 0x800,
+       .setup = byt_i2c_setup,
+};
+
 static struct lpss_device_desc bsw_spi_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
+                       | LPSS_NO_D3_DELAY,
        .prv_offset = 0x400,
        .setup = lpss_deassert_reset,
 };
@@ -214,11 +234,12 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        { "INT33FC", },
 
        /* Braswell LPSS devices */
-       { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
-       { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
+       { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
+       { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
        { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
-       { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
+       { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
 
+       /* Broadwell LPSS devices */
        { "INT3430", LPSS_ADDR(lpt_dev_desc) },
        { "INT3431", LPSS_ADDR(lpt_dev_desc) },
        { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
@@ -558,9 +579,14 @@ static void acpi_lpss_restore_ctx(struct device *dev,
         * The following delay is needed or the subsequent write operations may
         * fail. The LPSS devices are actually PCI devices and the PCI spec
         * expects 10ms delay before the device can be accessed after D3 to D0
-        * transition.
+        * transition. However some platforms like BSW does not need this delay.
         */
-       msleep(10);
+       unsigned int delay = 10;        /* default 10ms delay */
+
+       if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
+               delay = 0;
+
+       msleep(delay);
 
        for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
                unsigned long offset = i * sizeof(u32);
index ee28f4d..6b0d3ef 100644 (file)
  * NON INFRINGEMENT.  See the GNU General Public License for more
  * details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
  * ACPI based HotPlug driver that supports Memory Hotplug
  * This driver fields notifications from firmware for memory add
  * and remove operations and alerts the VM of the affected memory
index 00b3980..ae307ff 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
 
 #include <linux/kernel.h>
index 92a5f73..985b8a8 100644 (file)
@@ -485,7 +485,7 @@ static const struct acpi_device_id processor_device_ids[] = {
        { }
 };
 
-static struct acpi_scan_handler __refdata processor_handler = {
+static struct acpi_scan_handler processor_handler = {
        .ids = processor_device_ids,
        .attach = acpi_processor_add,
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
index 8c2fe2f..5778e8e 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index c1a9635..fedcc16 100644 (file)
@@ -11,6 +11,7 @@ obj-y += acpi.o
 acpi-y :=              \
        dsargs.o        \
        dscontrol.o     \
+       dsdebug.o       \
        dsfield.o       \
        dsinit.o        \
        dsmethod.o      \
@@ -164,6 +165,7 @@ acpi-y +=           \
        utmath.o        \
        utmisc.o        \
        utmutex.o       \
+       utnonansi.o     \
        utobject.o      \
        utosi.o         \
        utownerid.o     \
index 43685dd..eb2e926 100644 (file)
@@ -67,9 +67,6 @@ struct acpi_db_execute_walk {
 };
 
 #define PARAM_LIST(pl)                  pl
-#define DBTEST_OUTPUT_LEVEL(lvl)        if (acpi_gbl_db_opt_verbose)
-#define VERBOSE_PRINT(fp)               DBTEST_OUTPUT_LEVEL(lvl) {\
-                         acpi_os_printf PARAM_LIST(fp);}
 
 #define EX_NO_SINGLE_STEP               1
 #define EX_SINGLE_STEP                  2
@@ -77,10 +74,6 @@ struct acpi_db_execute_walk {
 /*
  * dbxface - external debugger interfaces
  */
-acpi_status acpi_db_initialize(void);
-
-void acpi_db_terminate(void);
-
 acpi_status
 acpi_db_single_step(struct acpi_walk_state *walk_state,
                    union acpi_parse_object *op, u32 op_type);
@@ -102,6 +95,8 @@ void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
 
 acpi_status acpi_db_sleep(char *object_arg);
 
+void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg);
+
 void acpi_db_display_locks(void);
 
 void acpi_db_display_resources(char *object_arg);
@@ -261,6 +256,23 @@ acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op);
 char *acpi_db_get_next_token(char *string,
                             char **next, acpi_object_type * return_type);
 
+/*
+ * dbobject
+ */
+void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc);
+
+void
+acpi_db_display_internal_object(union acpi_operand_object *obj_desc,
+                               struct acpi_walk_state *walk_state);
+
+void acpi_db_decode_arguments(struct acpi_walk_state *walk_state);
+
+void acpi_db_decode_locals(struct acpi_walk_state *walk_state);
+
+void
+acpi_db_dump_method_info(acpi_status status,
+                        struct acpi_walk_state *walk_state);
+
 /*
  * dbstats - Generation and display of ACPI table statistics
  */
index 408f04b..7094dc8 100644 (file)
@@ -354,4 +354,12 @@ acpi_status
 acpi_ds_result_push(union acpi_operand_object *object,
                    struct acpi_walk_state *walk_state);
 
+/*
+ * dsdebug - parser debugging routines
+ */
+void
+acpi_ds_dump_method_stack(acpi_status status,
+                         struct acpi_walk_state *walk_state,
+                         union acpi_parse_object *op);
+
 #endif                         /* _ACDISPAT_H_ */
index 53f96a3..09f37b5 100644 (file)
@@ -58,11 +58,12 @@ ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
 
 ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
 ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
 
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32);
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64);
 
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
@@ -235,6 +236,10 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
 
 ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
 
+/* Maximum number of While() loop iterations before forced abort */
+
+ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
+
 /* Control method single step flag */
 
 ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -290,8 +295,6 @@ ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
 
 ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
 ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
-ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
-ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
 
 /*****************************************************************************
  *
@@ -309,9 +312,10 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
 
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
 ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
 ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
 ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
@@ -346,8 +350,8 @@ ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
 /*
  * Statistic globals
  */
-ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
-ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
 ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
 ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
 ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
index 7ac9800..e820ed8 100644 (file)
@@ -131,6 +131,28 @@ void
 acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                        u32 level, u32 index);
 
+void
+acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
+                          union acpi_operand_object *obj_desc,
+                          struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
+                         union acpi_operand_object *obj_desc,
+                         struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_start_trace_opcode(union acpi_parse_object *op,
+                          struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
+                         struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_trace_point(acpi_trace_event_type type,
+                   u8 begin, u8 *aml, char *pathname);
+
 /*
  * exfield - ACPI AML (p-code) execution - field manipulation
  */
index bc60096..6f70826 100644 (file)
@@ -174,8 +174,12 @@ struct acpi_namespace_node {
         */
 #ifdef ACPI_LARGE_NAMESPACE_NODE
        union acpi_parse_object *op;
+       void *method_locals;
+       void *method_args;
        u32 value;
        u32 length;
+       u8 arg_count;
+
 #endif
 };
 
@@ -209,11 +213,9 @@ struct acpi_table_list {
 #define ACPI_ROOT_ORIGIN_ALLOCATED      (1)
 #define ACPI_ROOT_ALLOW_RESIZE          (2)
 
-/* Predefined (fixed) table indexes */
+/* Predefined table indexes */
 
-#define ACPI_TABLE_INDEX_DSDT           (0)
-#define ACPI_TABLE_INDEX_FACS           (1)
-#define ACPI_TABLE_INDEX_X_FACS         (2)
+#define ACPI_INVALID_TABLE_INDEX        (0xFFFFFFFF)
 
 struct acpi_find_context {
        char *search_for;
@@ -404,6 +406,13 @@ struct acpi_simple_repair_info {
 
 #define ACPI_NUM_RTYPES                 5      /* Number of actual object types */
 
+/* Info for running the _REG methods */
+
+struct acpi_reg_walk_info {
+       acpi_adr_space_type space_id;
+       u32 reg_run_count;
+};
+
 /*****************************************************************************
  *
  * Event typedefs and structs
@@ -715,7 +724,7 @@ union acpi_parse_value {
        union acpi_parse_object *arg;   /* arguments and contained ops */
 };
 
-#ifdef ACPI_DISASSEMBLER
+#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
 #define ACPI_DISASM_ONLY_MEMBERS(a)     a;
 #else
 #define ACPI_DISASM_ONLY_MEMBERS(a)
@@ -726,7 +735,7 @@ union acpi_parse_value {
        u8                              descriptor_type; /* To differentiate various internal objs */\
        u8                              flags;          /* Type of Op */\
        u16                             aml_opcode;     /* AML opcode */\
-       u32                             aml_offset;     /* Offset of declaration in AML */\
+       u8                              *aml;           /* Address of declaration in AML */\
        union acpi_parse_object         *next;          /* Next op */\
        struct acpi_namespace_node      *node;          /* For use by interpreter */\
        union acpi_parse_value          value;          /* Value or args associated with the opcode */\
@@ -1103,6 +1112,9 @@ struct acpi_db_method_info {
         *   Index of current thread inside all them created.
         */
        char init_args;
+#ifdef ACPI_DEBUGGER
+       acpi_object_type arg_types[4];
+#endif
        char *arguments[4];
        char num_threads_str[11];
        char id_of_thread_str[11];
@@ -1119,6 +1131,10 @@ struct acpi_integrity_info {
 #define ACPI_DB_CONSOLE_OUTPUT          0x02
 #define ACPI_DB_DUPLICATE_OUTPUT        0x03
 
+struct acpi_object_info {
+       u32 types[ACPI_TOTAL_TYPES];
+};
+
 /*****************************************************************************
  *
  * Debug
index c240bdf..e85366c 100644 (file)
 #define ACPI_MUL_32(a)                  _ACPI_MUL(a, 5)
 #define ACPI_MOD_32(a)                  _ACPI_MOD(a, 32)
 
+/* Test for ASCII character */
+
+#define ACPI_IS_ASCII(c)                ((c) < 0x80)
+
+/* Signed integers */
+
+#define ACPI_SIGN_POSITIVE              0
+#define ACPI_SIGN_NEGATIVE              1
+
 /*
  * Rounding macros (Power of two boundaries only)
  */
index 0dd0882..ea0d907 100644 (file)
@@ -272,17 +272,20 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
  */
 u32 acpi_ns_opens_scope(acpi_object_type type);
 
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
-                           acpi_size size, char *name_buffer);
-
 char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
 
+u32
+acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
+                             char *full_path, u32 path_size, u8 no_trailing);
+
+char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
+                                     u8 no_trailing);
+
 char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
 
 acpi_status
 acpi_ns_handle_to_pathname(acpi_handle target_handle,
-                          struct acpi_buffer *buffer);
+                          struct acpi_buffer *buffer, u8 no_trailing);
 
 u8
 acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
index c81d98d..0bd02c4 100644 (file)
@@ -176,6 +176,7 @@ struct acpi_object_method {
        u8 param_count;
        u8 sync_level;
        union acpi_operand_object *mutex;
+       union acpi_operand_object *node;
        u8 *aml_start;
        union {
                acpi_internal_method implementation;
index 0cdd2fc..6021ccf 100644 (file)
@@ -225,11 +225,11 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *root);
 /*
  * psutils - parser utilities
  */
-union acpi_parse_object *acpi_ps_create_scope_op(void);
+union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml);
 
 void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode);
 
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode);
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml);
 
 void acpi_ps_free_op(union acpi_parse_object *op);
 
index 44997ca..f9992dc 100644 (file)
@@ -85,7 +85,7 @@ struct acpi_walk_state {
        u8 namespace_override;  /* Override existing objects */
        u8 result_size;         /* Total elements for the result stack */
        u8 result_count;        /* Current number of occupied elements of result stack */
-       u32 aml_offset;
+       u8 *aml;
        u32 arg_types;
        u32 method_breakpoint;  /* For single stepping */
        u32 user_breakpoint;    /* User AML breakpoint */
index 7e0b6f1..f7731f2 100644 (file)
@@ -154,14 +154,20 @@ void acpi_tb_check_dsdt_header(void);
 struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
 
 void
-acpi_tb_install_table_with_override(u32 table_index,
-                                   struct acpi_table_desc *new_table_desc,
-                                   u8 override);
+acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
+                                   u8 override, u32 *table_index);
 
 acpi_status
 acpi_tb_install_fixed_table(acpi_physical_address address,
-                           char *signature, u32 table_index);
+                           char *signature, u32 *table_index);
 
 acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
 
+u8 acpi_is_valid_signature(char *signature);
+
+/*
+ * tbxfload
+ */
+acpi_status acpi_tb_load_namespace(void);
+
 #endif                         /* __ACTABLES_H__ */
index 6de0d35..fb2aa50 100644 (file)
@@ -166,6 +166,17 @@ struct acpi_pkg_info {
 #define DB_DWORD_DISPLAY    4
 #define DB_QWORD_DISPLAY    8
 
+/*
+ * utnonansi - Non-ANSI C library functions
+ */
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+
 /*
  * utglobal - Global data structures and procedures
  */
@@ -205,8 +216,6 @@ acpi_status acpi_ut_hardware_initialize(void);
 
 void acpi_ut_subsystem_shutdown(void);
 
-#define ACPI_IS_ASCII(c)  ((c) < 0x80)
-
 /*
  * utcopy - Object construction and conversion interfaces
  */
@@ -508,7 +517,7 @@ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
 
 u8 acpi_ut_is_pci_root_bridge(char *id);
 
-#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
 u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
 #endif
 
@@ -567,16 +576,6 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
 /*
  * utstring - String and character utilities
  */
-void acpi_ut_strupr(char *src_string);
-
-#ifdef ACPI_ASL_COMPILER
-void acpi_ut_strlwr(char *src_string);
-
-int acpi_ut_stricmp(char *string1, char *string2);
-#endif
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
-
 void acpi_ut_print_string(char *string, u16 max_length);
 
 #if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
index 3e69897..e2ab59e 100644 (file)
@@ -86,7 +86,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
 
        /* Allocate a new parser op to be the root of the parsed tree */
 
-       op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+       op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
        if (!op) {
                return_ACPI_STATUS(AE_NO_MEMORY);
        }
@@ -129,7 +129,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
 
        /* Evaluate the deferred arguments */
 
-       op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+       op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
        if (!op) {
                return_ACPI_STATUS(AE_NO_MEMORY);
        }
index 39da9da..435fc16 100644 (file)
@@ -212,7 +212,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
                         */
                        control_state->control.loop_count++;
                        if (control_state->control.loop_count >
-                           ACPI_MAX_LOOP_ITERATIONS) {
+                           acpi_gbl_max_loop_iterations) {
                                status = AE_AML_INFINITE_LOOP;
                                break;
                        }
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
new file mode 100644 (file)
index 0000000..309556e
--- /dev/null
@@ -0,0 +1,231 @@
+/******************************************************************************
+ *
+ * Module Name: dsdebug - Parser/Interpreter interface - debugging
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+#ifdef ACPI_DISASSEMBLER
+#include "acdisasm.h"
+#endif
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsdebug")
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void
+acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
+                           const char *message);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_print_node_pathname
+ *
+ * PARAMETERS:  node            - Object
+ *              message         - Prefix message
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ *              Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
+                           const char *message)
+{
+       struct acpi_buffer buffer;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(ds_print_node_pathname);
+
+       if (!node) {
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[NULL NAME]"));
+               return_VOID;
+       }
+
+       /* Convert handle to full pathname and print it (with supplied message) */
+
+       buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+       status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
+       if (ACPI_SUCCESS(status)) {
+               if (message) {
+                       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "%s ",
+                                             message));
+               }
+
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[%s] (Node %p)",
+                                     (char *)buffer.pointer, node));
+               ACPI_FREE(buffer.pointer);
+       }
+
+       return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_dump_method_stack
+ *
+ * PARAMETERS:  status          - Method execution status
+ *              walk_state      - Current state of the parse tree walk
+ *              op              - Executing parse op
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Called when a method has been aborted because of an error.
+ *              Dumps the method execution stack.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_dump_method_stack(acpi_status status,
+                         struct acpi_walk_state *walk_state,
+                         union acpi_parse_object *op)
+{
+       union acpi_parse_object *next;
+       struct acpi_thread_state *thread;
+       struct acpi_walk_state *next_walk_state;
+       struct acpi_namespace_node *previous_method = NULL;
+       union acpi_operand_object *method_desc;
+
+       ACPI_FUNCTION_TRACE(ds_dump_method_stack);
+
+       /* Ignore control codes, they are not errors */
+
+       if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+               return_VOID;
+       }
+
+       /* We may be executing a deferred opcode */
+
+       if (walk_state->deferred_node) {
+               ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                                 "Executing subtree for Buffer/Package/Region\n"));
+               return_VOID;
+       }
+
+       /*
+        * If there is no Thread, we are not actually executing a method.
+        * This can happen when the iASL compiler calls the interpreter
+        * to perform constant folding.
+        */
+       thread = walk_state->thread;
+       if (!thread) {
+               return_VOID;
+       }
+
+       /* Display exception and method name */
+
+       ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                         "\n**** Exception %s during execution of method ",
+                         acpi_format_exception(status)));
+       acpi_ds_print_node_pathname(walk_state->method_node, NULL);
+
+       /* Display stack of executing methods */
+
+       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
+                             "\n\nMethod Execution Stack:\n"));
+       next_walk_state = thread->walk_state_list;
+
+       /* Walk list of linked walk states */
+
+       while (next_walk_state) {
+               method_desc = next_walk_state->method_desc;
+               if (method_desc) {
+                       acpi_ex_stop_trace_method((struct acpi_namespace_node *)
+                                                 method_desc->method.node,
+                                                 method_desc, walk_state);
+               }
+
+               ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                                 "    Method [%4.4s] executing: ",
+                                 acpi_ut_get_node_name(next_walk_state->
+                                                       method_node)));
+
+               /* First method is the currently executing method */
+
+               if (next_walk_state == walk_state) {
+                       if (op) {
+
+                               /* Display currently executing ASL statement */
+
+                               next = op->common.next;
+                               op->common.next = NULL;
+
+#ifdef ACPI_DISASSEMBLER
+                               acpi_dm_disassemble(next_walk_state, op,
+                                                   ACPI_UINT32_MAX);
+#endif
+                               op->common.next = next;
+                       }
+               } else {
+                       /*
+                        * This method has called another method
+                        * NOTE: the method call parse subtree is already deleted at this
+                        * point, so we cannot disassemble the method invocation.
+                        */
+                       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
+                                             "Call to method "));
+                       acpi_ds_print_node_pathname(previous_method, NULL);
+               }
+
+               previous_method = next_walk_state->method_node;
+               next_walk_state = next_walk_state->next;
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "\n"));
+       }
+
+       return_VOID;
+}
+
+#else
+void
+acpi_ds_dump_method_stack(acpi_status status,
+                         struct acpi_walk_state *walk_state,
+                         union acpi_parse_object *op)
+{
+       return;
+}
+
+#endif
index 95779e8..920f1b1 100644 (file)
@@ -237,12 +237,22 @@ acpi_ds_initialize_objects(u32 table_index,
                return_ACPI_STATUS(status);
        }
 
+       /* DSDT is always the first AML table */
+
+       if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+                                     "\nInitializing Namespace objects:\n"));
+       }
+
+       /* Summary of objects initialized */
+
        ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-                             "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, "
-                             "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n",
-                             table->signature, owner_id, info.object_count,
-                             info.device_count, info.op_region_count,
-                             info.method_count, info.serial_method_count,
+                             "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, "
+                             "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n",
+                             table->signature, table->oem_table_id, owner_id,
+                             info.object_count, info.device_count,
+                             info.op_region_count, info.method_count,
+                             info.serial_method_count,
                              info.non_serial_method_count,
                              info.serialized_method_count));
 
index 85bb951..bc32f31 100644 (file)
 #include "acdispat.h"
 #include "acinterp.h"
 #include "acnamesp.h"
-#ifdef ACPI_DISASSEMBLER
-#include "acdisasm.h"
-#endif
 #include "acparser.h"
 #include "amlcode.h"
+#include "acdebug.h"
 
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dsmethod")
@@ -103,7 +101,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
 
        /* Create/Init a root op for the method parse tree */
 
-       op = acpi_ps_alloc_op(AML_METHOD_OP);
+       op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
        if (!op) {
                return_ACPI_STATUS(AE_NO_MEMORY);
        }
@@ -205,7 +203,7 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
  * RETURN:      Status
  *
  * DESCRIPTION: Called on method error. Invoke the global exception handler if
- *              present, dump the method data if the disassembler is configured
+ *              present, dump the method data if the debugger is configured
  *
  *              Note: Allows the exception handler to change the status code
  *
@@ -214,6 +212,8 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
 acpi_status
 acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
 {
+       u32 aml_offset;
+
        ACPI_FUNCTION_ENTRY();
 
        /* Ignore AE_OK and control exception codes */
@@ -234,26 +234,30 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
                 * Handler can map the exception code to anything it wants, including
                 * AE_OK, in which case the executing method will not be aborted.
                 */
+               aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
+                                               walk_state->parser_state.
+                                               aml_start);
+
                status = acpi_gbl_exception_handler(status,
                                                    walk_state->method_node ?
                                                    walk_state->method_node->
                                                    name.integer : 0,
                                                    walk_state->opcode,
-                                                   walk_state->aml_offset,
-                                                   NULL);
+                                                   aml_offset, NULL);
                acpi_ex_enter_interpreter();
        }
 
        acpi_ds_clear_implicit_return(walk_state);
 
-#ifdef ACPI_DISASSEMBLER
        if (ACPI_FAILURE(status)) {
+               acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
 
-               /* Display method locals/args if disassembler is present */
+               /* Display method locals/args if debugger is present */
 
-               acpi_dm_dump_method_info(status, walk_state, walk_state->op);
-       }
+#ifdef ACPI_DEBUGGER
+               acpi_db_dump_method_info(status, walk_state);
 #endif
+       }
 
        return (status);
 }
@@ -328,6 +332,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
                return_ACPI_STATUS(AE_NULL_ENTRY);
        }
 
+       acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
+
        /* Prevent wraparound of thread count */
 
        if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
@@ -574,9 +580,7 @@ cleanup:
        /* On error, we must terminate the method properly */
 
        acpi_ds_terminate_control_method(obj_desc, next_walk_state);
-       if (next_walk_state) {
-               acpi_ds_delete_walk_state(next_walk_state);
-       }
+       acpi_ds_delete_walk_state(next_walk_state);
 
        return_ACPI_STATUS(status);
 }
@@ -826,5 +830,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
                }
        }
 
+       acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
+                                 method.node, method_desc, walk_state);
+
        return_VOID;
 }
index ea0cc4e..81d7b98 100644 (file)
@@ -480,8 +480,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
        union acpi_operand_object **operand;
        struct acpi_namespace_node *node;
        union acpi_parse_object *next_op;
-       u32 table_index;
        struct acpi_table_header *table;
+       u32 table_index;
 
        ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
 
@@ -504,6 +504,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
                return_ACPI_STATUS(status);
        }
 
+       operand = &walk_state->operands[0];
+
        /*
         * Resolve the Signature string, oem_id string,
         * and oem_table_id string operands
@@ -511,32 +513,34 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
        status = acpi_ex_resolve_operands(op->common.aml_opcode,
                                          ACPI_WALK_OPERANDS, walk_state);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               goto cleanup;
        }
 
-       operand = &walk_state->operands[0];
-
        /* Find the ACPI table */
 
        status = acpi_tb_find_table(operand[0]->string.pointer,
                                    operand[1]->string.pointer,
                                    operand[2]->string.pointer, &table_index);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               if (status == AE_NOT_FOUND) {
+                       ACPI_ERROR((AE_INFO,
+                                   "ACPI Table [%4.4s] OEM:(%s, %s) not found in RSDT/XSDT",
+                                   operand[0]->string.pointer,
+                                   operand[1]->string.pointer,
+                                   operand[2]->string.pointer));
+               }
+               goto cleanup;
        }
 
-       acpi_ut_remove_reference(operand[0]);
-       acpi_ut_remove_reference(operand[1]);
-       acpi_ut_remove_reference(operand[2]);
-
        status = acpi_get_table_by_index(table_index, &table);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               goto cleanup;
        }
 
        obj_desc = acpi_ns_get_attached_object(node);
        if (!obj_desc) {
-               return_ACPI_STATUS(AE_NOT_EXIST);
+               status = AE_NOT_EXIST;
+               goto cleanup;
        }
 
        obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
@@ -551,6 +555,11 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
 
        obj_desc->region.flags |= AOPOBJ_DATA_VALID;
 
+cleanup:
+       acpi_ut_remove_reference(operand[0]);
+       acpi_ut_remove_reference(operand[1]);
+       acpi_ut_remove_reference(operand[2]);
+
        return_ACPI_STATUS(status);
 }
 
index 845ff44..097188a 100644 (file)
@@ -388,7 +388,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
 
                /* Create a new op */
 
-               op = acpi_ps_alloc_op(walk_state->opcode);
+               op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
                if (!op) {
                        return_ACPI_STATUS(AE_NO_MEMORY);
                }
index fcaa30c..e2c08cd 100644 (file)
@@ -335,7 +335,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
 
                /* Create a new op */
 
-               op = acpi_ps_alloc_op(walk_state->opcode);
+               op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
                if (!op) {
                        return_ACPI_STATUS(AE_NO_MEMORY);
                }
index 2ba28a6..5ee79a1 100644 (file)
@@ -626,9 +626,17 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
                            acpi_adr_space_type space_id)
 {
        acpi_status status;
+       struct acpi_reg_walk_info info;
 
        ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
 
+       info.space_id = space_id;
+       info.reg_run_count = 0;
+
+       ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
+                             "    Running _REG methods for SpaceId %s\n",
+                             acpi_ut_get_region_name(info.space_id)));
+
        /*
         * Run all _REG methods for all Operation Regions for this space ID. This
         * is a separate walk in order to handle any interdependencies between
@@ -637,7 +645,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
         */
        status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
                                        ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
-                                       NULL, &space_id, NULL);
+                                       NULL, &info, NULL);
 
        /* Special case for EC: handle "orphan" _REG methods with no region */
 
@@ -645,6 +653,11 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
                acpi_ev_orphan_ec_reg_method(node);
        }
 
+       ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
+                             "    Executed %u _REG methods for SpaceId %s\n",
+                             info.reg_run_count,
+                             acpi_ut_get_region_name(info.space_id)));
+
        return_ACPI_STATUS(status);
 }
 
@@ -664,10 +677,10 @@ acpi_ev_reg_run(acpi_handle obj_handle,
 {
        union acpi_operand_object *obj_desc;
        struct acpi_namespace_node *node;
-       acpi_adr_space_type space_id;
        acpi_status status;
+       struct acpi_reg_walk_info *info;
 
-       space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
+       info = ACPI_CAST_PTR(struct acpi_reg_walk_info, context);
 
        /* Convert and validate the device handle */
 
@@ -696,13 +709,14 @@ acpi_ev_reg_run(acpi_handle obj_handle,
 
        /* Object is a Region */
 
-       if (obj_desc->region.space_id != space_id) {
+       if (obj_desc->region.space_id != info->space_id) {
 
                /* This region is for a different address space, just ignore it */
 
                return (AE_OK);
        }
 
+       info->reg_run_count++;
        status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
        return (status);
 }
index 24a4c5c..b540913 100644 (file)
@@ -162,14 +162,6 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
 
        ACPI_FUNCTION_TRACE(ex_load_table_op);
 
-       /* Validate lengths for the Signature, oem_id, and oem_table_id strings */
-
-       if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
-           (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
-           (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
-               return_ACPI_STATUS(AE_AML_STRING_LIMIT);
-       }
-
        /* Find the ACPI table in the RSDT/XSDT */
 
        status = acpi_tb_find_table(operand[0]->string.pointer,
index aaeea48..ccb7219 100644 (file)
@@ -486,6 +486,7 @@ acpi_ex_create_method(u8 * aml_start,
 
        obj_desc->method.aml_start = aml_start;
        obj_desc->method.aml_length = aml_length;
+       obj_desc->method.node = operand[0];
 
        /*
         * Disassemble the method flags. Split off the arg_count, Serialized
index 815442b..de92458 100644 (file)
 
 #include <acpi/acpi.h>
 #include "accommon.h"
+#include "acnamesp.h"
 #include "acinterp.h"
+#include "acparser.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exdebug")
 
+static union acpi_operand_object *acpi_gbl_trace_method_object = NULL;
+
+/* Local prototypes */
+
+#ifdef ACPI_DEBUG_OUTPUT
+static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type);
+#endif
+
 #ifndef ACPI_NO_ERROR_MESSAGES
 /*******************************************************************************
  *
@@ -70,6 +80,7 @@ ACPI_MODULE_NAME("exdebug")
  * enabled if necessary.
  *
  ******************************************************************************/
+
 void
 acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                        u32 level, u32 index)
@@ -308,3 +319,316 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
        return_VOID;
 }
 #endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_interpreter_trace_enabled
+ *
+ * PARAMETERS:  name                - Whether method name should be matched,
+ *                                    this should be checked before starting
+ *                                    the tracer
+ *
+ * RETURN:      TRUE if interpreter trace is enabled.
+ *
+ * DESCRIPTION: Check whether interpreter trace is enabled
+ *
+ ******************************************************************************/
+
+static u8 acpi_ex_interpreter_trace_enabled(char *name)
+{
+
+       /* Check if tracing is enabled */
+
+       if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) {
+               return (FALSE);
+       }
+
+       /*
+        * Check if tracing is filtered:
+        *
+        * 1. If the tracer is started, acpi_gbl_trace_method_object should have
+        *    been filled by the trace starter
+        * 2. If the tracer is not started, acpi_gbl_trace_method_name should be
+        *    matched if it is specified
+        * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should
+        *    not be cleared by the trace stopper during the first match
+        */
+       if (acpi_gbl_trace_method_object) {
+               return (TRUE);
+       }
+       if (name &&
+           (acpi_gbl_trace_method_name &&
+            strcmp(acpi_gbl_trace_method_name, name))) {
+               return (FALSE);
+       }
+       if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) &&
+           !acpi_gbl_trace_method_name) {
+               return (FALSE);
+       }
+
+       return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_get_trace_event_name
+ *
+ * PARAMETERS:  type            - Trace event type
+ *
+ * RETURN:      Trace event name.
+ *
+ * DESCRIPTION: Used to obtain the full trace event name.
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_DEBUG_OUTPUT
+
+static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type)
+{
+       switch (type) {
+       case ACPI_TRACE_AML_METHOD:
+
+               return "Method";
+
+       case ACPI_TRACE_AML_OPCODE:
+
+               return "Opcode";
+
+       case ACPI_TRACE_AML_REGION:
+
+               return "Region";
+
+       default:
+
+               return "";
+       }
+}
+
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_trace_point
+ *
+ * PARAMETERS:  type                - Trace event type
+ *              begin               - TRUE if before execution
+ *              aml                 - Executed AML address
+ *              pathname            - Object path
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Internal interpreter execution trace.
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_point(acpi_trace_event_type type,
+                   u8 begin, u8 *aml, char *pathname)
+{
+
+       ACPI_FUNCTION_NAME(ex_trace_point);
+
+       if (pathname) {
+               ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
+                                 "%s %s [0x%p:%s] execution.\n",
+                                 acpi_ex_get_trace_event_name(type),
+                                 begin ? "Begin" : "End", aml, pathname));
+       } else {
+               ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
+                                 "%s %s [0x%p] execution.\n",
+                                 acpi_ex_get_trace_event_name(type),
+                                 begin ? "Begin" : "End", aml));
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_start_trace_method
+ *
+ * PARAMETERS:  method_node         - Node of the method
+ *              obj_desc            - The method object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Start control method execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
+                          union acpi_operand_object *obj_desc,
+                          struct acpi_walk_state *walk_state)
+{
+       acpi_status status;
+       char *pathname = NULL;
+       u8 enabled = FALSE;
+
+       ACPI_FUNCTION_NAME(ex_start_trace_method);
+
+       if (method_node) {
+               pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               goto exit;
+       }
+
+       enabled = acpi_ex_interpreter_trace_enabled(pathname);
+       if (enabled && !acpi_gbl_trace_method_object) {
+               acpi_gbl_trace_method_object = obj_desc;
+               acpi_gbl_original_dbg_level = acpi_dbg_level;
+               acpi_gbl_original_dbg_layer = acpi_dbg_layer;
+               acpi_dbg_level = ACPI_TRACE_LEVEL_ALL;
+               acpi_dbg_layer = ACPI_TRACE_LAYER_ALL;
+
+               if (acpi_gbl_trace_dbg_level) {
+                       acpi_dbg_level = acpi_gbl_trace_dbg_level;
+               }
+               if (acpi_gbl_trace_dbg_layer) {
+                       acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
+               }
+       }
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+exit:
+       if (enabled) {
+               ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE,
+                                obj_desc ? obj_desc->method.aml_start : NULL,
+                                pathname);
+       }
+       if (pathname) {
+               ACPI_FREE(pathname);
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_stop_trace_method
+ *
+ * PARAMETERS:  method_node         - Node of the method
+ *              obj_desc            - The method object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Stop control method execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
+                         union acpi_operand_object *obj_desc,
+                         struct acpi_walk_state *walk_state)
+{
+       acpi_status status;
+       char *pathname = NULL;
+       u8 enabled;
+
+       ACPI_FUNCTION_NAME(ex_stop_trace_method);
+
+       if (method_node) {
+               pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               goto exit_path;
+       }
+
+       enabled = acpi_ex_interpreter_trace_enabled(NULL);
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+       if (enabled) {
+               ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE,
+                                obj_desc ? obj_desc->method.aml_start : NULL,
+                                pathname);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               goto exit_path;
+       }
+
+       /* Check whether the tracer should be stopped */
+
+       if (acpi_gbl_trace_method_object == obj_desc) {
+
+               /* Disable further tracing if type is one-shot */
+
+               if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) {
+                       acpi_gbl_trace_method_name = NULL;
+               }
+
+               acpi_dbg_level = acpi_gbl_original_dbg_level;
+               acpi_dbg_layer = acpi_gbl_original_dbg_layer;
+               acpi_gbl_trace_method_object = NULL;
+       }
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+exit_path:
+       if (pathname) {
+               ACPI_FREE(pathname);
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_start_trace_opcode
+ *
+ * PARAMETERS:  op                  - The parser opcode object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Start opcode execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_start_trace_opcode(union acpi_parse_object *op,
+                          struct acpi_walk_state *walk_state)
+{
+
+       ACPI_FUNCTION_NAME(ex_start_trace_opcode);
+
+       if (acpi_ex_interpreter_trace_enabled(NULL) &&
+           (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
+               ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE,
+                                op->common.aml, op->common.aml_op_name);
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_stop_trace_opcode
+ *
+ * PARAMETERS:  op                  - The parser opcode object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Stop opcode execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
+                         struct acpi_walk_state *walk_state)
+{
+
+       ACPI_FUNCTION_NAME(ex_stop_trace_opcode);
+
+       if (acpi_ex_interpreter_trace_enabled(NULL) &&
+           (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
+               ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE,
+                                op->common.aml, op->common.aml_op_name);
+       }
+}
index 401e7ed..d836f88 100644 (file)
@@ -995,9 +995,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
        if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
                acpi_os_printf(" %p ", obj_desc->reference.node);
 
-               status =
-                   acpi_ns_handle_to_pathname(obj_desc->reference.node,
-                                              &ret_buf);
+               status = acpi_ns_handle_to_pathname(obj_desc->reference.node,
+                                                   &ret_buf, TRUE);
                if (ACPI_FAILURE(status)) {
                        acpi_os_printf(" Could not convert name to pathname\n");
                } else {
index c7e3b92..1b372ef 100644 (file)
@@ -126,7 +126,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
        if (!source_desc) {
                ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p",
                            node->name.ascii, node));
-               return_ACPI_STATUS(AE_AML_NO_OPERAND);
+               return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
        }
 
        /*
index b6b7f3a..7b10912 100644 (file)
@@ -337,8 +337,9 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
                         acpi_object_type * return_type,
                         union acpi_operand_object **return_desc)
 {
-       union acpi_operand_object *obj_desc = (void *)operand;
-       struct acpi_namespace_node *node;
+       union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
+       struct acpi_namespace_node *node =
+           ACPI_CAST_PTR(struct acpi_namespace_node, operand);
        acpi_object_type type;
        acpi_status status;
 
@@ -355,9 +356,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
        case ACPI_DESC_TYPE_NAMED:
 
                type = ((struct acpi_namespace_node *)obj_desc)->type;
-               obj_desc =
-                   acpi_ns_get_attached_object((struct acpi_namespace_node *)
-                                               obj_desc);
+               obj_desc = acpi_ns_get_attached_object(node);
 
                /* If we had an Alias node, use the attached object for type info */
 
@@ -368,6 +367,13 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
                                                         acpi_namespace_node *)
                                                        obj_desc);
                }
+
+               if (!obj_desc) {
+                       ACPI_ERROR((AE_INFO,
+                                   "[%4.4s] Node is unresolved or uninitialized",
+                                   acpi_ut_get_node_name(node)));
+                       return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
+               }
                break;
 
        default:
index 52dfd0d..d62a616 100644 (file)
@@ -160,19 +160,8 @@ acpi_set_firmware_waking_vectors(acpi_physical_address physical_address,
 
        ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors);
 
-       /* If Hardware Reduced flag is set, there is no FACS */
-
-       if (acpi_gbl_reduced_hardware) {
-               return_ACPI_STATUS (AE_OK);
-       }
-
-       if (acpi_gbl_facs32) {
-               (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32,
-                                                         physical_address,
-                                                         physical_address64);
-       }
-       if (acpi_gbl_facs64) {
-               (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64,
+       if (acpi_gbl_FACS) {
+               (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_FACS,
                                                          physical_address,
                                                          physical_address64);
        }
index 80670cb..7eba578 100644 (file)
@@ -274,6 +274,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
                acpi_ex_exit_interpreter();
 
                if (ACPI_FAILURE(status)) {
+                       info->return_object = NULL;
                        goto cleanup;
                }
 
@@ -464,7 +465,8 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
 
        status = acpi_ns_evaluate(info);
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n",
+       ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES,
+                         "Executed module-level code at %p\n",
                          method_obj->method.aml_start));
 
        /* Delete a possible implicit return value (in slack mode) */
index bd6cd4a..14ab836 100644 (file)
@@ -111,7 +111,21 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
        if (ACPI_SUCCESS(status)) {
                acpi_tb_set_table_loaded_flag(table_index, TRUE);
        } else {
-               (void)acpi_tb_release_owner_id(table_index);
+               /*
+                * On error, delete any namespace objects created by this table.
+                * We cannot initialize these objects, so delete them. There are
+                * a couple of expecially bad cases:
+                * AE_ALREADY_EXISTS - namespace collision.
+                * AE_NOT_FOUND - the target of a Scope operator does not
+                * exist. This target of Scope must already exist in the
+                * namespace, as per the ACPI specification.
+                */
+               (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+               acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
+                                                 tables[table_index].owner_id);
+               acpi_tb_release_owner_id(table_index);
+
+               return_ACPI_STATUS(status);
        }
 
 unlock:
index d293d97..8934b4e 100644 (file)
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsnames")
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_build_external_path
- *
- * PARAMETERS:  node            - NS node whose pathname is needed
- *              size            - Size of the pathname
- *              *name_buffer    - Where to return the pathname
- *
- * RETURN:      Status
- *              Places the pathname into the name_buffer, in external format
- *              (name segments separated by path separators)
- *
- * DESCRIPTION: Generate a full pathaname
- *
- ******************************************************************************/
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
-                           acpi_size size, char *name_buffer)
-{
-       acpi_size index;
-       struct acpi_namespace_node *parent_node;
-
-       ACPI_FUNCTION_ENTRY();
-
-       /* Special case for root */
-
-       index = size - 1;
-       if (index < ACPI_NAME_SIZE) {
-               name_buffer[0] = AML_ROOT_PREFIX;
-               name_buffer[1] = 0;
-               return (AE_OK);
-       }
-
-       /* Store terminator byte, then build name backwards */
-
-       parent_node = node;
-       name_buffer[index] = 0;
-
-       while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
-               index -= ACPI_NAME_SIZE;
-
-               /* Put the name into the buffer */
-
-               ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
-               parent_node = parent_node->parent;
-
-               /* Prefix name with the path separator */
-
-               index--;
-               name_buffer[index] = ACPI_PATH_SEPARATOR;
-       }
-
-       /* Overwrite final separator with the root prefix character */
-
-       name_buffer[index] = AML_ROOT_PREFIX;
-
-       if (index != 0) {
-               ACPI_ERROR((AE_INFO,
-                           "Could not construct external pathname; index=%u, size=%u, Path=%s",
-                           (u32) index, (u32) size, &name_buffer[size]));
-
-               return (AE_BAD_PARAMETER);
-       }
-
-       return (AE_OK);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_get_external_pathname
@@ -130,37 +63,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
  *              for error and debug statements.
  *
  ******************************************************************************/
-
 char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
 {
-       acpi_status status;
        char *name_buffer;
-       acpi_size size;
 
        ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
 
-       /* Calculate required buffer size based on depth below root */
-
-       size = acpi_ns_get_pathname_length(node);
-       if (!size) {
-               return_PTR(NULL);
-       }
-
-       /* Allocate a buffer to be returned to caller */
-
-       name_buffer = ACPI_ALLOCATE_ZEROED(size);
-       if (!name_buffer) {
-               ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
-               return_PTR(NULL);
-       }
-
-       /* Build the path in the allocated buffer */
-
-       status = acpi_ns_build_external_path(node, size, name_buffer);
-       if (ACPI_FAILURE(status)) {
-               ACPI_FREE(name_buffer);
-               return_PTR(NULL);
-       }
+       name_buffer = acpi_ns_get_normalized_pathname(node, FALSE);
 
        return_PTR(name_buffer);
 }
@@ -180,33 +89,12 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
 acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
 {
        acpi_size size;
-       struct acpi_namespace_node *next_node;
 
        ACPI_FUNCTION_ENTRY();
 
-       /*
-        * Compute length of pathname as 5 * number of name segments.
-        * Go back up the parent tree to the root
-        */
-       size = 0;
-       next_node = node;
+       size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
 
-       while (next_node && (next_node != acpi_gbl_root_node)) {
-               if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
-                       ACPI_ERROR((AE_INFO,
-                                   "Invalid Namespace Node (%p) while traversing namespace",
-                                   next_node));
-                       return (0);
-               }
-               size += ACPI_PATH_SEGMENT_LENGTH;
-               next_node = next_node->parent;
-       }
-
-       if (!size) {
-               size = 1;       /* Root node case */
-       }
-
-       return (size + 1);      /* +1 for null string terminator */
+       return (size);
 }
 
 /*******************************************************************************
@@ -216,6 +104,8 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
  * PARAMETERS:  target_handle           - Handle of named object whose name is
  *                                        to be found
  *              buffer                  - Where the pathname is returned
+ *              no_trailing             - Remove trailing '_' for each name
+ *                                        segment
  *
  * RETURN:      Status, Buffer is filled with pathname if status is AE_OK
  *
@@ -225,7 +115,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
 
 acpi_status
 acpi_ns_handle_to_pathname(acpi_handle target_handle,
-                          struct acpi_buffer * buffer)
+                          struct acpi_buffer * buffer, u8 no_trailing)
 {
        acpi_status status;
        struct acpi_namespace_node *node;
@@ -240,7 +130,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
 
        /* Determine size required for the caller buffer */
 
-       required_size = acpi_ns_get_pathname_length(node);
+       required_size =
+           acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
        if (!required_size) {
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
@@ -254,8 +145,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
 
        /* Build the path in the caller buffer */
 
-       status =
-           acpi_ns_build_external_path(node, required_size, buffer->pointer);
+       (void)acpi_ns_build_normalized_path(node, buffer->pointer,
+                                           required_size, no_trailing);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
@@ -264,3 +155,149 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
                          (char *)buffer->pointer, (u32) required_size));
        return_ACPI_STATUS(AE_OK);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_build_normalized_path
+ *
+ * PARAMETERS:  node        - Namespace node
+ *              full_path   - Where the path name is returned
+ *              path_size   - Size of returned path name buffer
+ *              no_trailing - Remove trailing '_' from each name segment
+ *
+ * RETURN:      Return 1 if the AML path is empty, otherwise returning (length
+ *              of pathname + 1) which means the 'FullPath' contains a trailing
+ *              null.
+ *
+ * DESCRIPTION: Build and return a full namespace pathname.
+ *              Note that if the size of 'FullPath' isn't large enough to
+ *              contain the namespace node's path name, the actual required
+ *              buffer length is returned, and it should be greater than
+ *              'PathSize'. So callers are able to check the returning value
+ *              to determine the buffer size of 'FullPath'.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
+                             char *full_path, u32 path_size, u8 no_trailing)
+{
+       u32 length = 0, i;
+       char name[ACPI_NAME_SIZE];
+       u8 do_no_trailing;
+       char c, *left, *right;
+       struct acpi_namespace_node *next_node;
+
+       ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node);
+
+#define ACPI_PATH_PUT8(path, size, byte, length)    \
+       do {                                            \
+               if ((length) < (size))                      \
+               {                                           \
+                       (path)[(length)] = (byte);              \
+               }                                           \
+               (length)++;                                 \
+       } while (0)
+
+       /*
+        * Make sure the path_size is correct, so that we don't need to
+        * validate both full_path and path_size.
+        */
+       if (!full_path) {
+               path_size = 0;
+       }
+
+       if (!node) {
+               goto build_trailing_null;
+       }
+
+       next_node = node;
+       while (next_node && next_node != acpi_gbl_root_node) {
+               if (next_node != node) {
+                       ACPI_PATH_PUT8(full_path, path_size,
+                                      AML_DUAL_NAME_PREFIX, length);
+               }
+               ACPI_MOVE_32_TO_32(name, &next_node->name);
+               do_no_trailing = no_trailing;
+               for (i = 0; i < 4; i++) {
+                       c = name[4 - i - 1];
+                       if (do_no_trailing && c != '_') {
+                               do_no_trailing = FALSE;
+                       }
+                       if (!do_no_trailing) {
+                               ACPI_PATH_PUT8(full_path, path_size, c, length);
+                       }
+               }
+               next_node = next_node->parent;
+       }
+       ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length);
+
+       /* Reverse the path string */
+
+       if (length <= path_size) {
+               left = full_path;
+               right = full_path + length - 1;
+               while (left < right) {
+                       c = *left;
+                       *left++ = *right;
+                       *right-- = c;
+               }
+       }
+
+       /* Append the trailing null */
+
+build_trailing_null:
+       ACPI_PATH_PUT8(full_path, path_size, '\0', length);
+
+#undef ACPI_PATH_PUT8
+
+       return_UINT32(length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_normalized_pathname
+ *
+ * PARAMETERS:  node            - Namespace node whose pathname is needed
+ *              no_trailing     - Remove trailing '_' from each name segment
+ *
+ * RETURN:      Pointer to storage containing the fully qualified name of
+ *              the node, In external format (name segments separated by path
+ *              separators.)
+ *
+ * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
+ *              for error and debug statements. All trailing '_' will be
+ *              removed from the full pathname if 'NoTrailing' is specified..
+ *
+ ******************************************************************************/
+
+char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
+                                     u8 no_trailing)
+{
+       char *name_buffer;
+       acpi_size size;
+
+       ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node);
+
+       /* Calculate required buffer size based on depth below root */
+
+       size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
+       if (!size) {
+               return_PTR(NULL);
+       }
+
+       /* Allocate a buffer to be returned to caller */
+
+       name_buffer = ACPI_ALLOCATE_ZEROED(size);
+       if (!name_buffer) {
+               ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
+               return_PTR(NULL);
+       }
+
+       /* Build the path in the allocated buffer */
+
+       (void)acpi_ns_build_normalized_path(node, name_buffer, size,
+                                           no_trailing);
+
+       return_PTR(name_buffer);
+}
index 57a4cfe..3736d43 100644 (file)
@@ -70,7 +70,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
 {
        union acpi_parse_object *parse_root;
        acpi_status status;
-       u32 aml_length;
+       u32 aml_length;
        u8 *aml_start;
        struct acpi_walk_state *walk_state;
        struct acpi_table_header *table;
@@ -78,6 +78,20 @@ acpi_ns_one_complete_parse(u32 pass_number,
 
        ACPI_FUNCTION_TRACE(ns_one_complete_parse);
 
+       status = acpi_get_table_by_index(table_index, &table);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Table must consist of at least a complete header */
+
+       if (table->length < sizeof(struct acpi_table_header)) {
+               return_ACPI_STATUS(AE_BAD_HEADER);
+       }
+
+       aml_start = (u8 *)table + sizeof(struct acpi_table_header);
+       aml_length = table->length - sizeof(struct acpi_table_header);
+
        status = acpi_tb_get_owner_id(table_index, &owner_id);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
@@ -85,7 +99,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
 
        /* Create and init a Root Node */
 
-       parse_root = acpi_ps_create_scope_op();
+       parse_root = acpi_ps_create_scope_op(aml_start);
        if (!parse_root) {
                return_ACPI_STATUS(AE_NO_MEMORY);
        }
@@ -98,23 +112,12 @@ acpi_ns_one_complete_parse(u32 pass_number,
                return_ACPI_STATUS(AE_NO_MEMORY);
        }
 
-       status = acpi_get_table_by_index(table_index, &table);
+       status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
+                                      aml_start, aml_length, NULL,
+                                      (u8)pass_number);
        if (ACPI_FAILURE(status)) {
                acpi_ds_delete_walk_state(walk_state);
-               acpi_ps_free_op(parse_root);
-               return_ACPI_STATUS(status);
-       }
-
-       /* Table must consist of at least a complete header */
-
-       if (table->length < sizeof(struct acpi_table_header)) {
-               status = AE_BAD_HEADER;
-       } else {
-               aml_start = (u8 *) table + sizeof(struct acpi_table_header);
-               aml_length = table->length - sizeof(struct acpi_table_header);
-               status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
-                                              aml_start, aml_length, NULL,
-                                              (u8) pass_number);
+               goto cleanup;
        }
 
        /* Found OSDT table, enable the namespace override feature */
@@ -124,11 +127,6 @@ acpi_ns_one_complete_parse(u32 pass_number,
                walk_state->namespace_override = TRUE;
        }
 
-       if (ACPI_FAILURE(status)) {
-               acpi_ds_delete_walk_state(walk_state);
-               goto cleanup;
-       }
-
        /* start_node is the default location to load the table */
 
        if (start_node && start_node != acpi_gbl_root_node) {
index 8d8104b..de325ae 100644 (file)
@@ -83,7 +83,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
 
        buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
 
-       status = acpi_ns_handle_to_pathname(node, &buffer);
+       status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
        if (ACPI_SUCCESS(status)) {
                if (message) {
                        acpi_os_printf("%s ", message);
@@ -596,6 +596,23 @@ void acpi_ns_terminate(void)
 
        ACPI_FUNCTION_TRACE(ns_terminate);
 
+#ifdef ACPI_EXEC_APP
+       {
+               union acpi_operand_object *prev;
+               union acpi_operand_object *next;
+
+               /* Delete any module-level code blocks */
+
+               next = acpi_gbl_module_code_list;
+               while (next) {
+                       prev = next;
+                       next = next->method.mutex;
+                       prev->method.mutex = NULL;      /* Clear the Mutex (cheated) field */
+                       acpi_ut_remove_reference(prev);
+               }
+       }
+#endif
+
        /*
         * Free the entire namespace -- all nodes and all objects
         * attached to the nodes
index 9ff643b..4b4d2f4 100644 (file)
@@ -172,11 +172,15 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
                return (status);
        }
 
-       if (name_type == ACPI_FULL_PATHNAME) {
+       if (name_type == ACPI_FULL_PATHNAME ||
+           name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
 
                /* Get the full pathname (From the namespace root) */
 
-               status = acpi_ns_handle_to_pathname(handle, buffer);
+               status = acpi_ns_handle_to_pathname(handle, buffer,
+                                                   name_type ==
+                                                   ACPI_FULL_PATHNAME ? FALSE :
+                                                   TRUE);
                return (status);
        }
 
index 6d03877..29d8b7b 100644 (file)
@@ -287,7 +287,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
                                  "Control Method - %p Desc %p Path=%p\n", node,
                                  method_desc, path));
 
-               name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+               name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start);
                if (!name_op) {
                        return_ACPI_STATUS(AE_NO_MEMORY);
                }
@@ -484,7 +484,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
 static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
                                                       *parser_state)
 {
-       u32 aml_offset;
+       u8 *aml;
        union acpi_parse_object *field;
        union acpi_parse_object *arg = NULL;
        u16 opcode;
@@ -498,8 +498,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 
        ACPI_FUNCTION_TRACE(ps_get_next_field);
 
-       aml_offset =
-           (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+       aml = parser_state->aml;
 
        /* Determine field type */
 
@@ -536,13 +535,11 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 
        /* Allocate a new field op */
 
-       field = acpi_ps_alloc_op(opcode);
+       field = acpi_ps_alloc_op(opcode, aml);
        if (!field) {
                return_PTR(NULL);
        }
 
-       field->common.aml_offset = aml_offset;
-
        /* Decode the field type */
 
        switch (opcode) {
@@ -604,6 +601,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
                 * Argument for Connection operator can be either a Buffer
                 * (resource descriptor), or a name_string.
                 */
+               aml = parser_state->aml;
                if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
                        parser_state->aml++;
 
@@ -616,7 +614,8 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 
                                /* Non-empty list */
 
-                               arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+                               arg =
+                                   acpi_ps_alloc_op(AML_INT_BYTELIST_OP, aml);
                                if (!arg) {
                                        acpi_ps_free_op(field);
                                        return_PTR(NULL);
@@ -665,7 +664,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 
                        parser_state->aml = pkg_end;
                } else {
-                       arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+                       arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, aml);
                        if (!arg) {
                                acpi_ps_free_op(field);
                                return_PTR(NULL);
@@ -730,7 +729,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
 
                /* Constants, strings, and namestrings are all the same size */
 
-               arg = acpi_ps_alloc_op(AML_BYTE_OP);
+               arg = acpi_ps_alloc_op(AML_BYTE_OP, parser_state->aml);
                if (!arg) {
                        return_ACPI_STATUS(AE_NO_MEMORY);
                }
@@ -777,7 +776,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
 
                        /* Non-empty list */
 
-                       arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+                       arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP,
+                                              parser_state->aml);
                        if (!arg) {
                                return_ACPI_STATUS(AE_NO_MEMORY);
                        }
@@ -807,7 +807,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
 
                        /* null_name or name_string */
 
-                       arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+                       arg =
+                           acpi_ps_alloc_op(AML_INT_NAMEPATH_OP,
+                                            parser_state->aml);
                        if (!arg) {
                                return_ACPI_STATUS(AE_NO_MEMORY);
                        }
index 9043722..03ac8c9 100644 (file)
@@ -51,6 +51,7 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
+#include "acinterp.h"
 #include "acparser.h"
 #include "acdispat.h"
 #include "amlcode.h"
@@ -125,10 +126,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
                 */
                while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
                       && !walk_state->arg_count) {
-                       walk_state->aml_offset =
-                           (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
-                                               walk_state->parser_state.
-                                               aml_start);
+                       walk_state->aml = walk_state->parser_state.aml;
 
                        status =
                            acpi_ps_get_next_arg(walk_state,
@@ -140,7 +138,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
                        }
 
                        if (arg) {
-                               arg->common.aml_offset = walk_state->aml_offset;
                                acpi_ps_append_arg(op, arg);
                        }
 
@@ -324,6 +321,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
        union acpi_operand_object *method_obj;
        struct acpi_namespace_node *parent_node;
 
+       ACPI_FUNCTION_TRACE(ps_link_module_code);
+
        /* Get the tail of the list */
 
        prev = next = acpi_gbl_module_code_list;
@@ -343,9 +342,13 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
 
                method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
                if (!method_obj) {
-                       return;
+                       return_VOID;
                }
 
+               ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+                                 "Create/Link new code block: %p\n",
+                                 method_obj));
+
                if (parent_op->common.node) {
                        parent_node = parent_op->common.node;
                } else {
@@ -370,8 +373,14 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
                        prev->method.mutex = method_obj;
                }
        } else {
+               ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+                                 "Appending to existing code block: %p\n",
+                                 prev));
+
                prev->method.aml_length += aml_length;
        }
+
+       return_VOID;
 }
 
 /*******************************************************************************
@@ -494,16 +503,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                                continue;
                        }
 
-                       op->common.aml_offset = walk_state->aml_offset;
-
-                       if (walk_state->op_info) {
-                               ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-                                                 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
-                                                 (u32) op->common.aml_opcode,
-                                                 walk_state->op_info->name, op,
-                                                 parser_state->aml,
-                                                 op->common.aml_offset));
-                       }
+                       acpi_ex_start_trace_opcode(op, walk_state);
                }
 
                /*
index 2f5ddd8..e54bc2a 100644 (file)
@@ -66,12 +66,11 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
 
 static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
 {
+       u32 aml_offset;
 
        ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
 
-       walk_state->aml_offset =
-           (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
-                              walk_state->parser_state.aml_start);
+       walk_state->aml = walk_state->parser_state.aml;
        walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
 
        /*
@@ -98,10 +97,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
                /* The opcode is unrecognized. Complain and skip unknown opcodes */
 
                if (walk_state->pass_number == 2) {
+                       aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
+                                                       walk_state->
+                                                       parser_state.aml_start);
+
                        ACPI_ERROR((AE_INFO,
                                    "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
                                    walk_state->opcode,
-                                   (u32)(walk_state->aml_offset +
+                                   (u32)(aml_offset +
                                          sizeof(struct acpi_table_header))));
 
                        ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
@@ -115,14 +118,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
                        acpi_os_printf
                            ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
                             walk_state->opcode,
-                            (u32)(walk_state->aml_offset +
+                            (u32)(aml_offset +
                                   sizeof(struct acpi_table_header)));
 
                        /* Dump the context surrounding the invalid opcode */
 
                        acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
                                             aml - 16), 48, DB_BYTE_DISPLAY,
-                                           (walk_state->aml_offset +
+                                           (aml_offset +
                                             sizeof(struct acpi_table_header) -
                                             16));
                        acpi_os_printf(" */\n");
@@ -294,7 +297,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
        /* Create Op structure and append to parent's argument list */
 
        walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
-       op = acpi_ps_alloc_op(walk_state->opcode);
+       op = acpi_ps_alloc_op(walk_state->opcode, aml_op_start);
        if (!op) {
                return_ACPI_STATUS(AE_NO_MEMORY);
        }
index a555f7f..98001d7 100644 (file)
@@ -147,6 +147,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
                return_ACPI_STATUS(AE_OK);      /* OK for now */
        }
 
+       acpi_ex_stop_trace_opcode(op, walk_state);
+
        /* Delete this op and the subtree below it if asked to */
 
        if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
@@ -185,7 +187,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
                         * op must be replaced by a placeholder return op
                         */
                        replacement_op =
-                           acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+                           acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+                                            op->common.aml);
                        if (!replacement_op) {
                                status = AE_NO_MEMORY;
                        }
@@ -209,7 +212,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
                            || (op->common.parent->common.aml_opcode ==
                                AML_VAR_PACKAGE_OP)) {
                                replacement_op =
-                                   acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+                                   acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+                                                    op->common.aml);
                                if (!replacement_op) {
                                        status = AE_NO_MEMORY;
                                }
@@ -224,7 +228,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
                                        AML_VAR_PACKAGE_OP)) {
                                        replacement_op =
                                            acpi_ps_alloc_op(op->common.
-                                                            aml_opcode);
+                                                            aml_opcode,
+                                                            op->common.aml);
                                        if (!replacement_op) {
                                                status = AE_NO_MEMORY;
                                        } else {
@@ -240,7 +245,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
                default:
 
                        replacement_op =
-                           acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+                           acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+                                            op->common.aml);
                        if (!replacement_op) {
                                status = AE_NO_MEMORY;
                        }
index 3244091..183cc1e 100644 (file)
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("psutils")
  * DESCRIPTION: Create a Scope and associated namepath op with the root name
  *
  ******************************************************************************/
-union acpi_parse_object *acpi_ps_create_scope_op(void)
+union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml)
 {
        union acpi_parse_object *scope_op;
 
-       scope_op = acpi_ps_alloc_op(AML_SCOPE_OP);
+       scope_op = acpi_ps_alloc_op(AML_SCOPE_OP, aml);
        if (!scope_op) {
                return (NULL);
        }
@@ -103,6 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
  * FUNCTION:    acpi_ps_alloc_op
  *
  * PARAMETERS:  opcode          - Opcode that will be stored in the new Op
+ *              aml             - Address of the opcode
  *
  * RETURN:      Pointer to the new Op, null on failure
  *
@@ -112,7 +113,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
  *
  ******************************************************************************/
 
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
 {
        union acpi_parse_object *op;
        const struct acpi_opcode_info *op_info;
@@ -149,6 +150,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
 
        if (op) {
                acpi_ps_init_op(op, opcode);
+               op->common.aml = aml;
                op->common.flags = flags;
        }
 
index 841a5ea..4254805 100644 (file)
 #include "acdispat.h"
 #include "acinterp.h"
 #include "actables.h"
+#include "acnamesp.h"
 
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psxface")
 
 /* Local Prototypes */
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
-
 static void
 acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
 
@@ -76,7 +73,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
  ******************************************************************************/
 
 acpi_status
-acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
+acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags)
 {
        acpi_status status;
 
@@ -85,108 +82,14 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
                return (status);
        }
 
-       /* TBDs: Validate name, allow full path or just nameseg */
-
-       acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
+       acpi_gbl_trace_method_name = name;
        acpi_gbl_trace_flags = flags;
-
-       if (debug_level) {
-               acpi_gbl_trace_dbg_level = debug_level;
-       }
-       if (debug_layer) {
-               acpi_gbl_trace_dbg_layer = debug_layer;
-       }
+       acpi_gbl_trace_dbg_level = debug_level;
+       acpi_gbl_trace_dbg_layer = debug_layer;
+       status = AE_OK;
 
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-       return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_start_trace
- *
- * PARAMETERS:  info        - Method info struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Start control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_ENTRY();
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return;
-       }
-
-       if ((!acpi_gbl_trace_method_name) ||
-           (acpi_gbl_trace_method_name != info->node->name.integer)) {
-               goto exit;
-       }
-
-       acpi_gbl_original_dbg_level = acpi_dbg_level;
-       acpi_gbl_original_dbg_layer = acpi_dbg_layer;
-
-       acpi_dbg_level = 0x00FFFFFF;
-       acpi_dbg_layer = ACPI_UINT32_MAX;
-
-       if (acpi_gbl_trace_dbg_level) {
-               acpi_dbg_level = acpi_gbl_trace_dbg_level;
-       }
-       if (acpi_gbl_trace_dbg_layer) {
-               acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
-       }
-
-exit:
-       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_stop_trace
- *
- * PARAMETERS:  info        - Method info struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Stop control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_ENTRY();
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return;
-       }
-
-       if ((!acpi_gbl_trace_method_name) ||
-           (acpi_gbl_trace_method_name != info->node->name.integer)) {
-               goto exit;
-       }
-
-       /* Disable further tracing if type is one-shot */
-
-       if (acpi_gbl_trace_flags & 1) {
-               acpi_gbl_trace_method_name = 0;
-               acpi_gbl_trace_dbg_level = 0;
-               acpi_gbl_trace_dbg_layer = 0;
-       }
-
-       acpi_dbg_level = acpi_gbl_original_dbg_level;
-       acpi_dbg_layer = acpi_gbl_original_dbg_layer;
-
-exit:
-       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+       return (status);
 }
 
 /*******************************************************************************
@@ -212,7 +115,7 @@ exit:
  *
  ******************************************************************************/
 
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
 {
        acpi_status status;
        union acpi_parse_object *op;
@@ -243,10 +146,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
         */
        acpi_ps_update_parameter_list(info, REF_INCREMENT);
 
-       /* Begin tracing if requested */
-
-       acpi_ps_start_trace(info);
-
        /*
         * Execute the method. Performs parse simultaneously
         */
@@ -256,7 +155,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
 
        /* Create and init a Root Node */
 
-       op = acpi_ps_create_scope_op();
+       op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start);
        if (!op) {
                status = AE_NO_MEMORY;
                goto cleanup;
@@ -326,10 +225,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
 cleanup:
        acpi_ps_delete_parse_tree(op);
 
-       /* End optional tracing */
-
-       acpi_ps_stop_trace(info);
-
        /* Take away the extra reference that we gave the parameters above */
 
        acpi_ps_update_parameter_list(info, REF_DECREMENT);
index 3fa829e..a534442 100644 (file)
@@ -348,7 +348,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                                status =
                                    acpi_ns_handle_to_pathname((acpi_handle)
                                                               node,
-                                                              &path_buffer);
+                                                              &path_buffer,
+                                                              FALSE);
 
                                /* +1 to include null terminator */
 
index 6253001..455a070 100644 (file)
@@ -345,7 +345,7 @@ void acpi_tb_parse_fadt(u32 table_index)
        /* Obtain the DSDT and FACS tables via their addresses within the FADT */
 
        acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
-                                   ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
+                                   ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
 
        /* If Hardware Reduced flag is set, there is no FACS */
 
@@ -354,13 +354,13 @@ void acpi_tb_parse_fadt(u32 table_index)
                        acpi_tb_install_fixed_table((acpi_physical_address)
                                                    acpi_gbl_FADT.facs,
                                                    ACPI_SIG_FACS,
-                                                   ACPI_TABLE_INDEX_FACS);
+                                                   &acpi_gbl_facs_index);
                }
                if (acpi_gbl_FADT.Xfacs) {
                        acpi_tb_install_fixed_table((acpi_physical_address)
                                                    acpi_gbl_FADT.Xfacs,
                                                    ACPI_SIG_FACS,
-                                                   ACPI_TABLE_INDEX_X_FACS);
+                                                   &acpi_gbl_xfacs_index);
                }
        }
 }
index 119c84a..405529d 100644 (file)
@@ -68,12 +68,25 @@ acpi_status
 acpi_tb_find_table(char *signature,
                   char *oem_id, char *oem_table_id, u32 *table_index)
 {
-       u32 i;
        acpi_status status;
        struct acpi_table_header header;
+       u32 i;
 
        ACPI_FUNCTION_TRACE(tb_find_table);
 
+       /* Validate the input table signature */
+
+       if (!acpi_is_valid_signature(signature)) {
+               return_ACPI_STATUS(AE_BAD_SIGNATURE);
+       }
+
+       /* Don't allow the OEM strings to be too long */
+
+       if ((strlen(oem_id) > ACPI_OEM_ID_SIZE) ||
+           (strlen(oem_table_id) > ACPI_OEM_TABLE_ID_SIZE)) {
+               return_ACPI_STATUS(AE_AML_STRING_LIMIT);
+       }
+
        /* Normalize the input strings */
 
        memset(&header, 0, sizeof(struct acpi_table_header));
index 15ea98e..6319b42 100644 (file)
@@ -100,9 +100,9 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
  *
  * FUNCTION:    acpi_tb_install_table_with_override
  *
- * PARAMETERS:  table_index             - Index into root table array
- *              new_table_desc          - New table descriptor to install
+ * PARAMETERS:  new_table_desc          - New table descriptor to install
  *              override                - Whether override should be performed
+ *              table_index             - Where the table index is returned
  *
  * RETURN:      None
  *
@@ -114,12 +114,14 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
  ******************************************************************************/
 
 void
-acpi_tb_install_table_with_override(u32 table_index,
-                                   struct acpi_table_desc *new_table_desc,
-                                   u8 override)
+acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
+                                   u8 override, u32 *table_index)
 {
+       u32 i;
+       acpi_status status;
 
-       if (table_index >= acpi_gbl_root_table_list.current_table_count) {
+       status = acpi_tb_get_next_table_descriptor(&i, NULL);
+       if (ACPI_FAILURE(status)) {
                return;
        }
 
@@ -134,8 +136,7 @@ acpi_tb_install_table_with_override(u32 table_index,
                acpi_tb_override_table(new_table_desc);
        }
 
-       acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
-                                     tables[table_index],
+       acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.tables[i],
                                      new_table_desc->address,
                                      new_table_desc->flags,
                                      new_table_desc->pointer);
@@ -143,9 +144,13 @@ acpi_tb_install_table_with_override(u32 table_index,
        acpi_tb_print_table_header(new_table_desc->address,
                                   new_table_desc->pointer);
 
+       /* This synchronizes acpi_gbl_dsdt_index */
+
+       *table_index = i;
+
        /* Set the global integer width (based upon revision of the DSDT) */
 
-       if (table_index == ACPI_TABLE_INDEX_DSDT) {
+       if (i == acpi_gbl_dsdt_index) {
                acpi_ut_set_integer_width(new_table_desc->pointer->revision);
        }
 }
@@ -157,7 +162,7 @@ acpi_tb_install_table_with_override(u32 table_index,
  * PARAMETERS:  address                 - Physical address of DSDT or FACS
  *              signature               - Table signature, NULL if no need to
  *                                        match
- *              table_index             - Index into root table array
+ *              table_index             - Where the table index is returned
  *
  * RETURN:      Status
  *
@@ -168,7 +173,7 @@ acpi_tb_install_table_with_override(u32 table_index,
 
 acpi_status
 acpi_tb_install_fixed_table(acpi_physical_address address,
-                           char *signature, u32 table_index)
+                           char *signature, u32 *table_index)
 {
        struct acpi_table_desc new_table_desc;
        acpi_status status;
@@ -200,7 +205,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
                goto release_and_exit;
        }
 
-       acpi_tb_install_table_with_override(table_index, &new_table_desc, TRUE);
+       /* Add the table to the global root table list */
+
+       acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index);
 
 release_and_exit:
 
@@ -355,13 +362,8 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
        /* Add the table to the global root table list */
 
-       status = acpi_tb_get_next_table_descriptor(&i, NULL);
-       if (ACPI_FAILURE(status)) {
-               goto release_and_exit;
-       }
-
-       *table_index = i;
-       acpi_tb_install_table_with_override(i, &new_table_desc, override);
+       acpi_tb_install_table_with_override(&new_table_desc, override,
+                                           table_index);
 
 release_and_exit:
 
index 568ac0e..4337990 100644 (file)
@@ -68,28 +68,27 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
 
 acpi_status acpi_tb_initialize_facs(void)
 {
+       struct acpi_table_facs *facs;
 
        /* If Hardware Reduced flag is set, there is no FACS */
 
        if (acpi_gbl_reduced_hardware) {
                acpi_gbl_FACS = NULL;
                return (AE_OK);
-       }
-
-       (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-                                     ACPI_CAST_INDIRECT_PTR(struct
-                                                            acpi_table_header,
-                                                            &acpi_gbl_facs32));
-       (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
-                                     ACPI_CAST_INDIRECT_PTR(struct
-                                                            acpi_table_header,
-                                                            &acpi_gbl_facs64));
-
-       if (acpi_gbl_facs64
-           && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) {
-               acpi_gbl_FACS = acpi_gbl_facs64;
-       } else if (acpi_gbl_facs32) {
-               acpi_gbl_FACS = acpi_gbl_facs32;
+       } else if (acpi_gbl_FADT.Xfacs &&
+                  (!acpi_gbl_FADT.facs
+                   || !acpi_gbl_use32_bit_facs_addresses)) {
+               (void)acpi_get_table_by_index(acpi_gbl_xfacs_index,
+                                             ACPI_CAST_INDIRECT_PTR(struct
+                                                                    acpi_table_header,
+                                                                    &facs));
+               acpi_gbl_FACS = facs;
+       } else if (acpi_gbl_FADT.facs) {
+               (void)acpi_get_table_by_index(acpi_gbl_facs_index,
+                                             ACPI_CAST_INDIRECT_PTR(struct
+                                                                    acpi_table_header,
+                                                                    &facs));
+               acpi_gbl_FACS = facs;
        }
 
        /* If there is no FACS, just continue. There was already an error msg */
@@ -192,7 +191,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
        acpi_tb_uninstall_table(table_desc);
 
        acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
-                                     tables[ACPI_TABLE_INDEX_DSDT],
+                                     tables[acpi_gbl_dsdt_index],
                                      ACPI_PTR_TO_PHYSADDR(new_table),
                                      ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
                                      new_table);
@@ -369,13 +368,6 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
                            table_entry_size);
        table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
 
-       /*
-        * First three entries in the table array are reserved for the DSDT
-        * and 32bit/64bit FACS, which are not actually present in the
-        * RSDT/XSDT - they come from the FADT
-        */
-       acpi_gbl_root_table_list.current_table_count = 3;
-
        /* Initialize the root table array from the RSDT/XSDT */
 
        for (i = 0; i < table_count; i++) {
@@ -412,3 +404,36 @@ next_table:
 
        return_ACPI_STATUS(AE_OK);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_is_valid_signature
+ *
+ * PARAMETERS:  signature           - Sig string to be validated
+ *
+ * RETURN:      TRUE if signature is correct length and has valid characters
+ *
+ * DESCRIPTION: Validate an ACPI table signature.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_signature(char *signature)
+{
+       u32 i;
+
+       /* Validate the signature length */
+
+       if (strlen(signature) != ACPI_NAME_SIZE) {
+               return (FALSE);
+       }
+
+       /* Validate each character in the signature */
+
+       for (i = 0; i < ACPI_NAME_SIZE; i++) {
+               if (!acpi_ut_valid_acpi_char(signature[i], i)) {
+                       return (FALSE);
+               }
+       }
+
+       return (TRUE);
+}
index 9682d40..55ee14c 100644 (file)
@@ -51,9 +51,6 @@
 #define _COMPONENT          ACPI_TABLES
 ACPI_MODULE_NAME("tbxfload")
 
-/* Local prototypes */
-static acpi_status acpi_tb_load_namespace(void);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_load_tables
@@ -65,7 +62,6 @@ static acpi_status acpi_tb_load_namespace(void);
  * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
  *
  ******************************************************************************/
-
 acpi_status __init acpi_load_tables(void)
 {
        acpi_status status;
@@ -75,6 +71,13 @@ acpi_status __init acpi_load_tables(void)
        /* Load the namespace from the tables */
 
        status = acpi_tb_load_namespace();
+
+       /* Don't let single failures abort the load */
+
+       if (status == AE_CTRL_TERMINATE) {
+               status = AE_OK;
+       }
+
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status,
                                "While loading namespace from ACPI tables"));
@@ -97,11 +100,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
  *              the RSDT/XSDT.
  *
  ******************************************************************************/
-static acpi_status acpi_tb_load_namespace(void)
+acpi_status acpi_tb_load_namespace(void)
 {
        acpi_status status;
        u32 i;
        struct acpi_table_header *new_dsdt;
+       struct acpi_table_desc *table;
+       u32 tables_loaded = 0;
+       u32 tables_failed = 0;
 
        ACPI_FUNCTION_TRACE(tb_load_namespace);
 
@@ -111,15 +117,11 @@ static acpi_status acpi_tb_load_namespace(void)
         * Load the namespace. The DSDT is required, but any SSDT and
         * PSDT tables are optional. Verify the DSDT.
         */
+       table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
+
        if (!acpi_gbl_root_table_list.current_table_count ||
-           !ACPI_COMPARE_NAME(&
-                              (acpi_gbl_root_table_list.
-                               tables[ACPI_TABLE_INDEX_DSDT].signature),
-                              ACPI_SIG_DSDT)
-           ||
-           ACPI_FAILURE(acpi_tb_validate_table
-                        (&acpi_gbl_root_table_list.
-                         tables[ACPI_TABLE_INDEX_DSDT]))) {
+           !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
+           ACPI_FAILURE(acpi_tb_validate_table(table))) {
                status = AE_NO_ACPI_TABLES;
                goto unlock_and_exit;
        }
@@ -130,8 +132,7 @@ static acpi_status acpi_tb_load_namespace(void)
         * array can change dynamically as tables are loaded at run-time. Note:
         * .Pointer field is not validated until after call to acpi_tb_validate_table.
         */
-       acpi_gbl_DSDT =
-           acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
+       acpi_gbl_DSDT = table->pointer;
 
        /*
         * Optionally copy the entire DSDT to local memory (instead of simply
@@ -140,7 +141,7 @@ static acpi_status acpi_tb_load_namespace(void)
         * the DSDT.
         */
        if (acpi_gbl_copy_dsdt_locally) {
-               new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+               new_dsdt = acpi_tb_copy_dsdt(acpi_gbl_dsdt_index);
                if (new_dsdt) {
                        acpi_gbl_DSDT = new_dsdt;
                }
@@ -157,41 +158,65 @@ static acpi_status acpi_tb_load_namespace(void)
 
        /* Load and parse tables */
 
-       status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+       status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed"));
+               tables_failed++;
+       } else {
+               tables_loaded++;
        }
 
        /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
 
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
        for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+               table = &acpi_gbl_root_table_list.tables[i];
+
                if (!acpi_gbl_root_table_list.tables[i].address ||
-                   (!ACPI_COMPARE_NAME
-                    (&(acpi_gbl_root_table_list.tables[i].signature),
-                     ACPI_SIG_SSDT)
-                    &&
-                    !ACPI_COMPARE_NAME(&
-                                       (acpi_gbl_root_table_list.tables[i].
-                                        signature), ACPI_SIG_PSDT)
-                    &&
-                    !ACPI_COMPARE_NAME(&
-                                       (acpi_gbl_root_table_list.tables[i].
-                                        signature), ACPI_SIG_OSDT))
-                   ||
-                   ACPI_FAILURE(acpi_tb_validate_table
-                                (&acpi_gbl_root_table_list.tables[i]))) {
+                   (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
+                    && !ACPI_COMPARE_NAME(table->signature.ascii,
+                                          ACPI_SIG_PSDT)
+                    && !ACPI_COMPARE_NAME(table->signature.ascii,
+                                          ACPI_SIG_OSDT))
+                   || ACPI_FAILURE(acpi_tb_validate_table(table))) {
                        continue;
                }
 
                /* Ignore errors while loading tables, get as many as possible */
 
                (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-               (void)acpi_ns_load_table(i, acpi_gbl_root_node);
+               status = acpi_ns_load_table(i, acpi_gbl_root_node);
+               if (ACPI_FAILURE(status)) {
+                       ACPI_EXCEPTION((AE_INFO, status,
+                                       "(%4.4s:%8.8s) while loading table",
+                                       table->signature.ascii,
+                                       table->pointer->oem_table_id));
+                       tables_failed++;
+
+                       ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+                                             "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n",
+                                             table->signature.ascii,
+                                             table->pointer->oem_table_id));
+               } else {
+                       tables_loaded++;
+               }
+
                (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
        }
 
-       ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
+       if (!tables_failed) {
+               ACPI_INFO((AE_INFO,
+                          "%u ACPI AML tables successfully acquired and loaded",
+                          tables_loaded));
+       } else {
+               ACPI_ERROR((AE_INFO,
+                           "%u table load failures, %u successful",
+                           tables_failed, tables_loaded));
+
+               /* Indicate at least one failure */
+
+               status = AE_CTRL_TERMINATE;
+       }
 
 unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
index cd02693..4146229 100644 (file)
@@ -45,6 +45,7 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utdebug")
@@ -560,8 +561,37 @@ acpi_ut_ptr_exit(u32 line_number,
        }
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_trace_point
+ *
+ * PARAMETERS:  type                - Trace event type
+ *              begin               - TRUE if before execution
+ *              aml                 - Executed AML address
+ *              pathname            - Object path
+ *              pointer             - Pointer to the related object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Interpreter execution trace.
+ *
+ ******************************************************************************/
+
+void
+acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)
+{
+
+       ACPI_FUNCTION_ENTRY();
+
+       acpi_ex_trace_point(type, begin, aml, pathname);
+
+#ifdef ACPI_USE_SYSTEM_TRACER
+       acpi_os_trace_point(type, begin, aml, pathname);
 #endif
+}
 
+ACPI_EXPORT_SYMBOL(acpi_trace_point)
+#endif
 #ifdef ACPI_APPLICATION
 /*******************************************************************************
  *
@@ -575,7 +605,6 @@ acpi_ut_ptr_exit(u32 line_number,
  * DESCRIPTION: Print error message to the console, used by applications.
  *
  ******************************************************************************/
-
 void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
 {
        va_list args;
index 71fce38..1638312 100644 (file)
@@ -209,6 +209,9 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                        acpi_ut_delete_object_desc(object->method.mutex);
                        object->method.mutex = NULL;
                }
+               if (object->method.node) {
+                       object->method.node = NULL;
+               }
                break;
 
        case ACPI_TYPE_REGION:
index 857af82..75a94f5 100644 (file)
@@ -312,7 +312,7 @@ acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
        /* Get the entire file */
 
        fprintf(stderr,
-               "Reading ACPI table from file %10s - Length %.8u (0x%06X)\n",
+               "Reading ACPI table from file %12s - Length %.8u (0x%06X)\n",
                filename, file_size, file_size);
 
        status = acpi_ut_read_table(file, table, &table_length);
index e402e07..28ab3a1 100644 (file)
@@ -204,11 +204,10 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_acpi_hardware_present = TRUE;
        acpi_gbl_last_owner_id_index = 0;
        acpi_gbl_next_owner_id_offset = 0;
-       acpi_gbl_trace_dbg_level = 0;
-       acpi_gbl_trace_dbg_layer = 0;
        acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
        acpi_gbl_osi_mutex = NULL;
        acpi_gbl_reg_methods_executed = FALSE;
+       acpi_gbl_max_loop_iterations = 0xFFFF;
 
        /* Hardware oriented */
 
index 71b6653..bd4443b 100644 (file)
@@ -75,7 +75,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
        return (FALSE);
 }
 
-#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_is_aml_table
@@ -376,7 +376,7 @@ acpi_ut_display_init_pathname(u8 type,
        /* Get the full pathname to the node */
 
        buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-       status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+       status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
        if (ACPI_FAILURE(status)) {
                return;
        }
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
new file mode 100644 (file)
index 0000000..1d5f6b1
--- /dev/null
@@ -0,0 +1,380 @@
+/*******************************************************************************
+ *
+ * Module Name: utnonansi - Non-ansi C library functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utnonansi")
+
+/*
+ * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
+ * version of strtoul.
+ */
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS:  src_string      - The source string to convert
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert a string to lowercase
+ *
+ ******************************************************************************/
+void acpi_ut_strlwr(char *src_string)
+{
+       char *string;
+
+       ACPI_FUNCTION_ENTRY();
+
+       if (!src_string) {
+               return;
+       }
+
+       /* Walk entire string, lowercasing the letters */
+
+       for (string = src_string; *string; string++) {
+               *string = (char)tolower((int)*string);
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS:  src_string      - The source string to convert
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert a string to uppercase
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+       char *string;
+
+       ACPI_FUNCTION_ENTRY();
+
+       if (!src_string) {
+               return;
+       }
+
+       /* Walk entire string, uppercasing the letters */
+
+       for (string = src_string; *string; string++) {
+               *string = (char)toupper((int)*string);
+       }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_stricmp (stricmp)
+ *
+ * PARAMETERS:  string1             - first string to compare
+ *              string2             - second string to compare
+ *
+ * RETURN:      int that signifies string relationship. Zero means strings
+ *              are equal.
+ *
+ * DESCRIPTION: Case-insensitive string compare. Implementation of the
+ *              non-ANSI stricmp function.
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+       int c1;
+       int c2;
+
+       do {
+               c1 = tolower((int)*string1);
+               c2 = tolower((int)*string2);
+
+               string1++;
+               string2++;
+       }
+       while ((c1 == c2) && (c1));
+
+       return (c1 - c2);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strtoul64
+ *
+ * PARAMETERS:  string          - Null terminated string
+ *              base            - Radix of the string: 16 or ACPI_ANY_BASE;
+ *                                ACPI_ANY_BASE means 'in behalf of to_integer'
+ *              ret_integer     - Where the converted integer is returned
+ *
+ * RETURN:      Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ *              32-bit or 64-bit conversion, depending on the current mode
+ *              of the interpreter.
+ *
+ * NOTE:        Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+{
+       u32 this_digit = 0;
+       u64 return_value = 0;
+       u64 quotient;
+       u64 dividend;
+       u32 to_integer_op = (base == ACPI_ANY_BASE);
+       u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+       u8 valid_digits = 0;
+       u8 sign_of0x = 0;
+       u8 term = 0;
+
+       ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+       switch (base) {
+       case ACPI_ANY_BASE:
+       case 16:
+
+               break;
+
+       default:
+
+               /* Invalid Base */
+
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       if (!string) {
+               goto error_exit;
+       }
+
+       /* Skip over any white space in the buffer */
+
+       while ((*string) && (isspace((int)*string) || *string == '\t')) {
+               string++;
+       }
+
+       if (to_integer_op) {
+               /*
+                * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+                * We need to determine if it is decimal or hexadecimal.
+                */
+               if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
+                       sign_of0x = 1;
+                       base = 16;
+
+                       /* Skip over the leading '0x' */
+                       string += 2;
+               } else {
+                       base = 10;
+               }
+       }
+
+       /* Any string left? Check that '0x' is not followed by white space. */
+
+       if (!(*string) || isspace((int)*string) || *string == '\t') {
+               if (to_integer_op) {
+                       goto error_exit;
+               } else {
+                       goto all_done;
+               }
+       }
+
+       /*
+        * Perform a 32-bit or 64-bit conversion, depending upon the current
+        * execution mode of the interpreter
+        */
+       dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+       /* Main loop: convert the string to a 32- or 64-bit integer */
+
+       while (*string) {
+               if (isdigit((int)*string)) {
+
+                       /* Convert ASCII 0-9 to Decimal value */
+
+                       this_digit = ((u8)*string) - '0';
+               } else if (base == 10) {
+
+                       /* Digit is out of range; possible in to_integer case only */
+
+                       term = 1;
+               } else {
+                       this_digit = (u8)toupper((int)*string);
+                       if (isxdigit((int)this_digit)) {
+
+                               /* Convert ASCII Hex char to value */
+
+                               this_digit = this_digit - 'A' + 10;
+                       } else {
+                               term = 1;
+                       }
+               }
+
+               if (term) {
+                       if (to_integer_op) {
+                               goto error_exit;
+                       } else {
+                               break;
+                       }
+               } else if ((valid_digits == 0) && (this_digit == 0)
+                          && !sign_of0x) {
+
+                       /* Skip zeros */
+                       string++;
+                       continue;
+               }
+
+               valid_digits++;
+
+               if (sign_of0x
+                   && ((valid_digits > 16)
+                       || ((valid_digits > 8) && mode32))) {
+                       /*
+                        * This is to_integer operation case.
+                        * No any restrictions for string-to-integer conversion,
+                        * see ACPI spec.
+                        */
+                       goto error_exit;
+               }
+
+               /* Divide the digit into the correct position */
+
+               (void)acpi_ut_short_divide((dividend - (u64)this_digit),
+                                          base, &quotient, NULL);
+
+               if (return_value > quotient) {
+                       if (to_integer_op) {
+                               goto error_exit;
+                       } else {
+                               break;
+                       }
+               }
+
+               return_value *= base;
+               return_value += this_digit;
+               string++;
+       }
+
+       /* All done, normal exit */
+
+all_done:
+
+       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+                         ACPI_FORMAT_UINT64(return_value)));
+
+       *ret_integer = return_value;
+       return_ACPI_STATUS(AE_OK);
+
+error_exit:
+       /* Base was set/validated above */
+
+       if (base == 10) {
+               return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+       } else {
+               return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+       }
+}
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
+ *              functions. This is the size of the Destination buffer.
+ *
+ * RETURN:      TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ *              the result of the operation will not overflow the output string
+ *              buffer.
+ *
+ * NOTE:        These functions are typically only helpful for processing
+ *              user input and command lines. For most ACPICA code, the
+ *              required buffer length is precisely calculated before buffer
+ *              allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+       if (strlen(source) >= dest_size) {
+               return (TRUE);
+       }
+
+       strcpy(dest, source);
+       return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+       if ((strlen(dest) + strlen(source)) >= dest_size) {
+               return (TRUE);
+       }
+
+       strcat(dest, source);
+       return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+                    acpi_size dest_size,
+                    char *source, acpi_size max_transfer_length)
+{
+       acpi_size actual_transfer_length;
+
+       actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+
+       if ((strlen(dest) + actual_transfer_length) >= dest_size) {
+               return (TRUE);
+       }
+
+       strncat(dest, source, max_transfer_length);
+       return (FALSE);
+}
+#endif
index 8f3c883..4ddd105 100644 (file)
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utstring")
 
-/*
- * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
- * version of strtoul.
- */
-#ifdef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strlwr (strlwr)
- *
- * PARAMETERS:  src_string      - The source string to convert
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert string to lowercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-void acpi_ut_strlwr(char *src_string)
-{
-       char *string;
-
-       ACPI_FUNCTION_ENTRY();
-
-       if (!src_string) {
-               return;
-       }
-
-       /* Walk entire string, lowercasing the letters */
-
-       for (string = src_string; *string; string++) {
-               *string = (char)tolower((int)*string);
-       }
-
-       return;
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ut_stricmp (stricmp)
- *
- * PARAMETERS:  string1             - first string to compare
- *              string2             - second string to compare
- *
- * RETURN:      int that signifies string relationship. Zero means strings
- *              are equal.
- *
- * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
- *              strings with no case sensitivity)
- *
- ******************************************************************************/
-
-int acpi_ut_stricmp(char *string1, char *string2)
-{
-       int c1;
-       int c2;
-
-       do {
-               c1 = tolower((int)*string1);
-               c2 = tolower((int)*string2);
-
-               string1++;
-               string2++;
-       }
-       while ((c1 == c2) && (c1));
-
-       return (c1 - c2);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strupr (strupr)
- *
- * PARAMETERS:  src_string      - The source string to convert
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert string to uppercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strupr(char *src_string)
-{
-       char *string;
-
-       ACPI_FUNCTION_ENTRY();
-
-       if (!src_string) {
-               return;
-       }
-
-       /* Walk entire string, uppercasing the letters */
-
-       for (string = src_string; *string; string++) {
-               *string = (char)toupper((int)*string);
-       }
-
-       return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strtoul64
- *
- * PARAMETERS:  string          - Null terminated string
- *              base            - Radix of the string: 16 or ACPI_ANY_BASE;
- *                                ACPI_ANY_BASE means 'in behalf of to_integer'
- *              ret_integer     - Where the converted integer is returned
- *
- * RETURN:      Status and Converted value
- *
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- *              32-bit or 64-bit conversion, depending on the current mode
- *              of the interpreter.
- *              NOTE: Does not support Octal strings, not needed.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
-{
-       u32 this_digit = 0;
-       u64 return_value = 0;
-       u64 quotient;
-       u64 dividend;
-       u32 to_integer_op = (base == ACPI_ANY_BASE);
-       u32 mode32 = (acpi_gbl_integer_byte_width == 4);
-       u8 valid_digits = 0;
-       u8 sign_of0x = 0;
-       u8 term = 0;
-
-       ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
-
-       switch (base) {
-       case ACPI_ANY_BASE:
-       case 16:
-
-               break;
-
-       default:
-
-               /* Invalid Base */
-
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
-       }
-
-       if (!string) {
-               goto error_exit;
-       }
-
-       /* Skip over any white space in the buffer */
-
-       while ((*string) && (isspace((int)*string) || *string == '\t')) {
-               string++;
-       }
-
-       if (to_integer_op) {
-               /*
-                * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
-                * We need to determine if it is decimal or hexadecimal.
-                */
-               if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
-                       sign_of0x = 1;
-                       base = 16;
-
-                       /* Skip over the leading '0x' */
-                       string += 2;
-               } else {
-                       base = 10;
-               }
-       }
-
-       /* Any string left? Check that '0x' is not followed by white space. */
-
-       if (!(*string) || isspace((int)*string) || *string == '\t') {
-               if (to_integer_op) {
-                       goto error_exit;
-               } else {
-                       goto all_done;
-               }
-       }
-
-       /*
-        * Perform a 32-bit or 64-bit conversion, depending upon the current
-        * execution mode of the interpreter
-        */
-       dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
-
-       /* Main loop: convert the string to a 32- or 64-bit integer */
-
-       while (*string) {
-               if (isdigit((int)*string)) {
-
-                       /* Convert ASCII 0-9 to Decimal value */
-
-                       this_digit = ((u8)*string) - '0';
-               } else if (base == 10) {
-
-                       /* Digit is out of range; possible in to_integer case only */
-
-                       term = 1;
-               } else {
-                       this_digit = (u8)toupper((int)*string);
-                       if (isxdigit((int)this_digit)) {
-
-                               /* Convert ASCII Hex char to value */
-
-                               this_digit = this_digit - 'A' + 10;
-                       } else {
-                               term = 1;
-                       }
-               }
-
-               if (term) {
-                       if (to_integer_op) {
-                               goto error_exit;
-                       } else {
-                               break;
-                       }
-               } else if ((valid_digits == 0) && (this_digit == 0)
-                          && !sign_of0x) {
-
-                       /* Skip zeros */
-                       string++;
-                       continue;
-               }
-
-               valid_digits++;
-
-               if (sign_of0x
-                   && ((valid_digits > 16)
-                       || ((valid_digits > 8) && mode32))) {
-                       /*
-                        * This is to_integer operation case.
-                        * No any restrictions for string-to-integer conversion,
-                        * see ACPI spec.
-                        */
-                       goto error_exit;
-               }
-
-               /* Divide the digit into the correct position */
-
-               (void)acpi_ut_short_divide((dividend - (u64)this_digit),
-                                          base, &quotient, NULL);
-
-               if (return_value > quotient) {
-                       if (to_integer_op) {
-                               goto error_exit;
-                       } else {
-                               break;
-                       }
-               }
-
-               return_value *= base;
-               return_value += this_digit;
-               string++;
-       }
-
-       /* All done, normal exit */
-
-all_done:
-
-       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
-                         ACPI_FORMAT_UINT64(return_value)));
-
-       *ret_integer = return_value;
-       return_ACPI_STATUS(AE_OK);
-
-error_exit:
-       /* Base was set/validated above */
-
-       if (base == 10) {
-               return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
-       } else {
-               return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
-       }
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_print_string
@@ -342,7 +62,6 @@ error_exit:
  *              sequences.
  *
  ******************************************************************************/
-
 void acpi_ut_print_string(char *string, u16 max_length)
 {
        u32 i;
@@ -584,64 +303,3 @@ void ut_convert_backslashes(char *pathname)
        }
 }
 #endif
-
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
- *
- * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
- *              functions. This is the size of the Destination buffer.
- *
- * RETURN:      TRUE if the operation would overflow the destination buffer.
- *
- * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
- *              the result of the operation will not overflow the output string
- *              buffer.
- *
- * NOTE:        These functions are typically only helpful for processing
- *              user input and command lines. For most ACPICA code, the
- *              required buffer length is precisely calculated before buffer
- *              allocation, so the use of these functions is unnecessary.
- *
- ******************************************************************************/
-
-u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
-{
-
-       if (strlen(source) >= dest_size) {
-               return (TRUE);
-       }
-
-       strcpy(dest, source);
-       return (FALSE);
-}
-
-u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
-{
-
-       if ((strlen(dest) + strlen(source)) >= dest_size) {
-               return (TRUE);
-       }
-
-       strcat(dest, source);
-       return (FALSE);
-}
-
-u8
-acpi_ut_safe_strncat(char *dest,
-                    acpi_size dest_size,
-                    char *source, acpi_size max_transfer_length)
-{
-       acpi_size actual_transfer_length;
-
-       actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
-
-       if ((strlen(dest) + actual_transfer_length) >= dest_size) {
-               return (TRUE);
-       }
-
-       strncat(dest, source, max_transfer_length);
-       return (FALSE);
-}
-#endif
index 51cf52d..4f33281 100644 (file)
@@ -92,13 +92,6 @@ acpi_status __init acpi_terminate(void)
 
        acpi_ut_mutex_terminate();
 
-#ifdef ACPI_DEBUGGER
-
-       /* Shut down the debugger */
-
-       acpi_db_terminate();
-#endif
-
        /* Now we can shutdown the OS-dependent layer */
 
        status = acpi_os_terminate();
@@ -517,7 +510,8 @@ acpi_decode_pld_buffer(u8 *in_buffer,
 
        /* Parameter validation */
 
-       if (!in_buffer || !return_buffer || (length < 16)) {
+       if (!in_buffer || !return_buffer
+           || (length < ACPI_PLD_REV1_BUFFER_SIZE)) {
                return (AE_BAD_PARAMETER);
        }
 
@@ -567,7 +561,7 @@ acpi_decode_pld_buffer(u8 *in_buffer,
        pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword);
        pld_info->order = ACPI_PLD_GET_ORDER(&dword);
 
-       if (length >= ACPI_PLD_BUFFER_SIZE) {
+       if (length >= ACPI_PLD_REV2_BUFFER_SIZE) {
 
                /* Fifth 32-bit DWord (Revision 2 of _PLD) */
 
index 42a32a6..a7137ec 100644 (file)
@@ -124,17 +124,6 @@ acpi_status __init acpi_initialize_subsystem(void)
                return_ACPI_STATUS(status);
        }
 
-       /* If configured, initialize the AML debugger */
-
-#ifdef ACPI_DEBUGGER
-       status = acpi_db_initialize();
-       if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status,
-                               "During Debugger initialization"));
-               return_ACPI_STATUS(status);
-       }
-#endif
-
        return_ACPI_STATUS(AE_OK);
 }
 
index a85ac07..a2c8d7a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index a095d4f..0431883 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index 04ab5c9..6330f55 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index 3670bba..6682c5d 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index 2bfd53c..23981ac 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index 06e9b41..20b3fcf 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index b3628cc..b719ab3 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 278dc4b..96809cd 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 513e723..46506e7 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -423,6 +419,406 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
        acpi_evaluate_ost(handle, type, ost_code, NULL);
 }
 
+static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
+{
+       struct acpi_device *device = data;
+
+       device->driver->ops.notify(device, event);
+}
+
+static void acpi_device_notify_fixed(void *data)
+{
+       struct acpi_device *device = data;
+
+       /* Fixed hardware devices have no handles */
+       acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
+}
+
+static u32 acpi_device_fixed_event(void *data)
+{
+       acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
+       return ACPI_INTERRUPT_HANDLED;
+}
+
+static int acpi_device_install_notify_handler(struct acpi_device *device)
+{
+       acpi_status status;
+
+       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+               status =
+                   acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+                                                    acpi_device_fixed_event,
+                                                    device);
+       else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+               status =
+                   acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+                                                    acpi_device_fixed_event,
+                                                    device);
+       else
+               status = acpi_install_notify_handler(device->handle,
+                                                    ACPI_DEVICE_NOTIFY,
+                                                    acpi_device_notify,
+                                                    device);
+
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
+       return 0;
+}
+
+static void acpi_device_remove_notify_handler(struct acpi_device *device)
+{
+       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+               acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+                                               acpi_device_fixed_event);
+       else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+               acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+                                               acpi_device_fixed_event);
+       else
+               acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+                                          acpi_device_notify);
+}
+
+/* --------------------------------------------------------------------------
+                             Device Matching
+   -------------------------------------------------------------------------- */
+
+static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
+                                                     const struct device *dev)
+{
+       struct mutex *physical_node_lock = &adev->physical_node_lock;
+
+       mutex_lock(physical_node_lock);
+       if (list_empty(&adev->physical_node_list)) {
+               adev = NULL;
+       } else {
+               const struct acpi_device_physical_node *node;
+
+               node = list_first_entry(&adev->physical_node_list,
+                                       struct acpi_device_physical_node, node);
+               if (node->dev != dev)
+                       adev = NULL;
+       }
+       mutex_unlock(physical_node_lock);
+       return adev;
+}
+
+/**
+ * acpi_device_is_first_physical_node - Is given dev first physical node
+ * @adev: ACPI companion device
+ * @dev: Physical device to check
+ *
+ * Function checks if given @dev is the first physical devices attached to
+ * the ACPI companion device. This distinction is needed in some cases
+ * where the same companion device is shared between many physical devices.
+ *
+ * Note that the caller have to provide valid @adev pointer.
+ */
+bool acpi_device_is_first_physical_node(struct acpi_device *adev,
+                                       const struct device *dev)
+{
+       return !!acpi_primary_dev_companion(adev, dev);
+}
+
+/*
+ * acpi_companion_match() - Can we match via ACPI companion device
+ * @dev: Device in question
+ *
+ * Check if the given device has an ACPI companion and if that companion has
+ * a valid list of PNP IDs, and if the device is the first (primary) physical
+ * device associated with it.  Return the companion pointer if that's the case
+ * or NULL otherwise.
+ *
+ * If multiple physical devices are attached to a single ACPI companion, we need
+ * to be careful.  The usage scenario for this kind of relationship is that all
+ * of the physical devices in question use resources provided by the ACPI
+ * companion.  A typical case is an MFD device where all the sub-devices share
+ * the parent's ACPI companion.  In such cases we can only allow the primary
+ * (first) physical device to be matched with the help of the companion's PNP
+ * IDs.
+ *
+ * Additional physical devices sharing the ACPI companion can still use
+ * resources available from it but they will be matched normally using functions
+ * provided by their bus types (and analogously for their modalias).
+ */
+struct acpi_device *acpi_companion_match(const struct device *dev)
+{
+       struct acpi_device *adev;
+
+       adev = ACPI_COMPANION(dev);
+       if (!adev)
+               return NULL;
+
+       if (list_empty(&adev->pnp.ids))
+               return NULL;
+
+       return acpi_primary_dev_companion(adev, dev);
+}
+
+/**
+ * acpi_of_match_device - Match device object using the "compatible" property.
+ * @adev: ACPI device object to match.
+ * @of_match_table: List of device IDs to match against.
+ *
+ * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
+ * identifiers and a _DSD object with the "compatible" property, use that
+ * property to match against the given list of identifiers.
+ */
+static bool acpi_of_match_device(struct acpi_device *adev,
+                                const struct of_device_id *of_match_table)
+{
+       const union acpi_object *of_compatible, *obj;
+       int i, nval;
+
+       if (!adev)
+               return false;
+
+       of_compatible = adev->data.of_compatible;
+       if (!of_match_table || !of_compatible)
+               return false;
+
+       if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+               nval = of_compatible->package.count;
+               obj = of_compatible->package.elements;
+       } else { /* Must be ACPI_TYPE_STRING. */
+               nval = 1;
+               obj = of_compatible;
+       }
+       /* Now we can look for the driver DT compatible strings */
+       for (i = 0; i < nval; i++, obj++) {
+               const struct of_device_id *id;
+
+               for (id = of_match_table; id->compatible[0]; id++)
+                       if (!strcasecmp(obj->string.pointer, id->compatible))
+                               return true;
+       }
+
+       return false;
+}
+
+static bool __acpi_match_device_cls(const struct acpi_device_id *id,
+                                   struct acpi_hardware_id *hwid)
+{
+       int i, msk, byte_shift;
+       char buf[3];
+
+       if (!id->cls)
+               return false;
+
+       /* Apply class-code bitmask, before checking each class-code byte */
+       for (i = 1; i <= 3; i++) {
+               byte_shift = 8 * (3 - i);
+               msk = (id->cls_msk >> byte_shift) & 0xFF;
+               if (!msk)
+                       continue;
+
+               sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
+               if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
+                       return false;
+       }
+       return true;
+}
+
+static const struct acpi_device_id *__acpi_match_device(
+       struct acpi_device *device,
+       const struct acpi_device_id *ids,
+       const struct of_device_id *of_ids)
+{
+       const struct acpi_device_id *id;
+       struct acpi_hardware_id *hwid;
+
+       /*
+        * If the device is not present, it is unnecessary to load device
+        * driver for it.
+        */
+       if (!device || !device->status.present)
+               return NULL;
+
+       list_for_each_entry(hwid, &device->pnp.ids, list) {
+               /* First, check the ACPI/PNP IDs provided by the caller. */
+               for (id = ids; id->id[0] || id->cls; id++) {
+                       if (id->id[0] && !strcmp((char *) id->id, hwid->id))
+                               return id;
+                       else if (id->cls && __acpi_match_device_cls(id, hwid))
+                               return id;
+               }
+
+               /*
+                * Next, check ACPI_DT_NAMESPACE_HID and try to match the
+                * "compatible" property if found.
+                *
+                * The id returned by the below is not valid, but the only
+                * caller passing non-NULL of_ids here is only interested in
+                * whether or not the return value is NULL.
+                */
+               if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
+                   && acpi_of_match_device(device, of_ids))
+                       return id;
+       }
+       return NULL;
+}
+
+/**
+ * acpi_match_device - Match a struct device against a given list of ACPI IDs
+ * @ids: Array of struct acpi_device_id object to match against.
+ * @dev: The device structure to match.
+ *
+ * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
+ * object for that handle and use that object to match against a given list of
+ * device IDs.
+ *
+ * Return a pointer to the first matching ID on success or %NULL on failure.
+ */
+const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
+                                              const struct device *dev)
+{
+       return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_match_device);
+
+int acpi_match_device_ids(struct acpi_device *device,
+                         const struct acpi_device_id *ids)
+{
+       return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
+}
+EXPORT_SYMBOL(acpi_match_device_ids);
+
+bool acpi_driver_match_device(struct device *dev,
+                             const struct device_driver *drv)
+{
+       if (!drv->acpi_match_table)
+               return acpi_of_match_device(ACPI_COMPANION(dev),
+                                           drv->of_match_table);
+
+       return !!__acpi_match_device(acpi_companion_match(dev),
+                                    drv->acpi_match_table, drv->of_match_table);
+}
+EXPORT_SYMBOL_GPL(acpi_driver_match_device);
+
+/* --------------------------------------------------------------------------
+                              ACPI Driver Management
+   -------------------------------------------------------------------------- */
+
+/**
+ * acpi_bus_register_driver - register a driver with the ACPI bus
+ * @driver: driver being registered
+ *
+ * Registers a driver with the ACPI bus.  Searches the namespace for all
+ * devices that match the driver's criteria and binds.  Returns zero for
+ * success or a negative error status for failure.
+ */
+int acpi_bus_register_driver(struct acpi_driver *driver)
+{
+       int ret;
+
+       if (acpi_disabled)
+               return -ENODEV;
+       driver->drv.name = driver->name;
+       driver->drv.bus = &acpi_bus_type;
+       driver->drv.owner = driver->owner;
+
+       ret = driver_register(&driver->drv);
+       return ret;
+}
+
+EXPORT_SYMBOL(acpi_bus_register_driver);
+
+/**
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
+ * @driver: driver to unregister
+ *
+ * Unregisters a driver with the ACPI bus.  Searches the namespace for all
+ * devices that match the driver's criteria and unbinds.
+ */
+void acpi_bus_unregister_driver(struct acpi_driver *driver)
+{
+       driver_unregister(&driver->drv);
+}
+
+EXPORT_SYMBOL(acpi_bus_unregister_driver);
+
+/* --------------------------------------------------------------------------
+                              ACPI Bus operations
+   -------------------------------------------------------------------------- */
+
+static int acpi_bus_match(struct device *dev, struct device_driver *drv)
+{
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       struct acpi_driver *acpi_drv = to_acpi_driver(drv);
+
+       return acpi_dev->flags.match_driver
+               && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
+}
+
+static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
+}
+
+static int acpi_device_probe(struct device *dev)
+{
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+       int ret;
+
+       if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
+               return -EINVAL;
+
+       if (!acpi_drv->ops.add)
+               return -ENOSYS;
+
+       ret = acpi_drv->ops.add(acpi_dev);
+       if (ret)
+               return ret;
+
+       acpi_dev->driver = acpi_drv;
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                         "Driver [%s] successfully bound to device [%s]\n",
+                         acpi_drv->name, acpi_dev->pnp.bus_id));
+
+       if (acpi_drv->ops.notify) {
+               ret = acpi_device_install_notify_handler(acpi_dev);
+               if (ret) {
+                       if (acpi_drv->ops.remove)
+                               acpi_drv->ops.remove(acpi_dev);
+
+                       acpi_dev->driver = NULL;
+                       acpi_dev->driver_data = NULL;
+                       return ret;
+               }
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
+                         acpi_drv->name, acpi_dev->pnp.bus_id));
+       get_device(dev);
+       return 0;
+}
+
+static int acpi_device_remove(struct device * dev)
+{
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+       if (acpi_drv) {
+               if (acpi_drv->ops.notify)
+                       acpi_device_remove_notify_handler(acpi_dev);
+               if (acpi_drv->ops.remove)
+                       acpi_drv->ops.remove(acpi_dev);
+       }
+       acpi_dev->driver = NULL;
+       acpi_dev->driver_data = NULL;
+
+       put_device(dev);
+       return 0;
+}
+
+struct bus_type acpi_bus_type = {
+       .name           = "acpi",
+       .match          = acpi_bus_match,
+       .probe          = acpi_device_probe,
+       .remove         = acpi_device_remove,
+       .uevent         = acpi_device_uevent,
+};
+
 /* --------------------------------------------------------------------------
                              Initialization/Cleanup
    -------------------------------------------------------------------------- */
@@ -661,7 +1057,9 @@ static int __init acpi_bus_init(void)
         */
        acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
 
-       return 0;
+       result = bus_register(&acpi_bus_type);
+       if (!result)
+               return 0;
 
        /* Mimic structured exception handling */
       error1:
index 6d5d183..5c3b091 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 6c9ee68..d0918d4 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index c8ead9f..12c2409 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <linux/acpi.h>
index 6b1919f..68bb305 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/debugfs.h>
 #include <linux/acpi.h>
 
+#include "internal.h"
+
 #define _COMPONENT             ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("debugfs");
 
index 88dbbb1..4806b7f 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -1123,6 +1119,14 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
        if (dev->pm_domain)
                return -EEXIST;
 
+       /*
+        * Only attach the power domain to the first device if the
+        * companion is shared by multiple. This is to prevent doing power
+        * management twice.
+        */
+       if (!acpi_device_is_first_physical_node(adev, dev))
+               return -EBUSY;
+
        acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
        dev->pm_domain = &acpi_general_pm_domain;
        if (power_on) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
new file mode 100644 (file)
index 0000000..4ab4582
--- /dev/null
@@ -0,0 +1,521 @@
+/*
+ * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias.
+ *
+ * Copyright (C) 2015, Intel Corp.
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/nls.h>
+
+#include "internal.h"
+
+/**
+ * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Creates hid/cid(s) string needed for modalias and uevent
+ * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * char *modalias: "acpi:IBM0001:ACPI0001"
+ * Return: 0: no _HID and no _CID
+ *         -EINVAL: output error
+ *         -ENOMEM: output is truncated
+*/
+static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+                              int size)
+{
+       int len;
+       int count;
+       struct acpi_hardware_id *id;
+
+       /*
+        * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
+        * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
+        * device's list.
+        */
+       count = 0;
+       list_for_each_entry(id, &acpi_dev->pnp.ids, list)
+               if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
+                       count++;
+
+       if (!count)
+               return 0;
+
+       len = snprintf(modalias, size, "acpi:");
+       if (len <= 0)
+               return len;
+
+       size -= len;
+
+       list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
+               if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
+                       continue;
+
+               count = snprintf(&modalias[len], size, "%s:", id->id);
+               if (count < 0)
+                       return -EINVAL;
+
+               if (count >= size)
+                       return -ENOMEM;
+
+               len += count;
+               size -= count;
+       }
+       modalias[len] = '\0';
+       return len;
+}
+
+/**
+ * create_of_modalias - Creates DT compatible string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Expose DT compatible modalias as of:NnameTCcompatible.  This function should
+ * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
+ * ACPI/PNP IDs.
+ */
+static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+                             int size)
+{
+       struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+       const union acpi_object *of_compatible, *obj;
+       int len, count;
+       int i, nval;
+       char *c;
+
+       acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+       /* DT strings are all in lower case */
+       for (c = buf.pointer; *c != '\0'; c++)
+               *c = tolower(*c);
+
+       len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+       ACPI_FREE(buf.pointer);
+
+       if (len <= 0)
+               return len;
+
+       of_compatible = acpi_dev->data.of_compatible;
+       if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+               nval = of_compatible->package.count;
+               obj = of_compatible->package.elements;
+       } else { /* Must be ACPI_TYPE_STRING. */
+               nval = 1;
+               obj = of_compatible;
+       }
+       for (i = 0; i < nval; i++, obj++) {
+               count = snprintf(&modalias[len], size, "C%s",
+                                obj->string.pointer);
+               if (count < 0)
+                       return -EINVAL;
+
+               if (count >= size)
+                       return -ENOMEM;
+
+               len += count;
+               size -= count;
+       }
+       modalias[len] = '\0';
+       return len;
+}
+
+int __acpi_device_uevent_modalias(struct acpi_device *adev,
+                                 struct kobj_uevent_env *env)
+{
+       int len;
+
+       if (!adev)
+               return -ENODEV;
+
+       if (list_empty(&adev->pnp.ids))
+               return 0;
+
+       if (add_uevent_var(env, "MODALIAS="))
+               return -ENOMEM;
+
+       len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+                                 sizeof(env->buf) - env->buflen);
+       if (len < 0)
+               return len;
+
+       env->buflen += len;
+       if (!adev->data.of_compatible)
+               return 0;
+
+       if (len > 0 && add_uevent_var(env, "MODALIAS="))
+               return -ENOMEM;
+
+       len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+                                sizeof(env->buf) - env->buflen);
+       if (len < 0)
+               return len;
+
+       env->buflen += len;
+
+       return 0;
+}
+
+/**
+ * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
+ *
+ * Create the uevent modalias field for ACPI-enumerated devices.
+ *
+ * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
+ * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
+ */
+int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+{
+       return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
+}
+EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
+
+static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
+{
+       int len, count;
+
+       if (!adev)
+               return -ENODEV;
+
+       if (list_empty(&adev->pnp.ids))
+               return 0;
+
+       len = create_pnp_modalias(adev, buf, size - 1);
+       if (len < 0) {
+               return len;
+       } else if (len > 0) {
+               buf[len++] = '\n';
+               size -= len;
+       }
+       if (!adev->data.of_compatible)
+               return len;
+
+       count = create_of_modalias(adev, buf + len, size - 1);
+       if (count < 0) {
+               return count;
+       } else if (count > 0) {
+               len += count;
+               buf[len++] = '\n';
+       }
+
+       return len;
+}
+
+/**
+ * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
+ *
+ * Create the modalias sysfs attribute for ACPI-enumerated devices.
+ *
+ * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
+ * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
+ */
+int acpi_device_modalias(struct device *dev, char *buf, int size)
+{
+       return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
+}
+EXPORT_SYMBOL_GPL(acpi_device_modalias);
+
+static ssize_t
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+       return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
+}
+static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+
+static ssize_t real_power_state_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct acpi_device *adev = to_acpi_device(dev);
+       int state;
+       int ret;
+
+       ret = acpi_device_get_power(adev, &state);
+       if (ret)
+               return ret;
+
+       return sprintf(buf, "%s\n", acpi_power_state_string(state));
+}
+
+static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
+
+static ssize_t power_state_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct acpi_device *adev = to_acpi_device(dev);
+
+       return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
+}
+
+static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
+
+static ssize_t
+acpi_eject_store(struct device *d, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct acpi_device *acpi_device = to_acpi_device(d);
+       acpi_object_type not_used;
+       acpi_status status;
+
+       if (!count || buf[0] != '1')
+               return -EINVAL;
+
+       if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
+           && !acpi_device->driver)
+               return -ENODEV;
+
+       status = acpi_get_type(acpi_device->handle, &not_used);
+       if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
+               return -ENODEV;
+
+       get_device(&acpi_device->dev);
+       status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
+       if (ACPI_SUCCESS(status))
+               return count;
+
+       put_device(&acpi_device->dev);
+       acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+                         ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+       return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
+}
+
+static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
+
+static ssize_t
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+       return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
+}
+static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+
+static ssize_t acpi_device_uid_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+       return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
+}
+static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+
+static ssize_t acpi_device_adr_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+       return sprintf(buf, "0x%08x\n",
+                      (unsigned int)(acpi_dev->pnp.bus_address));
+}
+static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+
+static ssize_t
+acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
+       int result;
+
+       result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
+       if (result)
+               goto end;
+
+       result = sprintf(buf, "%s\n", (char*)path.pointer);
+       kfree(path.pointer);
+end:
+       return result;
+}
+static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+
+/* sysfs file that shows description text from the ACPI _STR method */
+static ssize_t description_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf) {
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       int result;
+
+       if (acpi_dev->pnp.str_obj == NULL)
+               return 0;
+
+       /*
+        * The _STR object contains a Unicode identifier for a device.
+        * We need to convert to utf-8 so it can be displayed.
+        */
+       result = utf16s_to_utf8s(
+               (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
+               acpi_dev->pnp.str_obj->buffer.length,
+               UTF16_LITTLE_ENDIAN, buf,
+               PAGE_SIZE);
+
+       buf[result++] = '\n';
+
+       return result;
+}
+static DEVICE_ATTR(description, 0444, description_show, NULL);
+
+static ssize_t
+acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
+                    char *buf) {
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       acpi_status status;
+       unsigned long long sun;
+
+       status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       return sprintf(buf, "%llu\n", sun);
+}
+static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+                               char *buf) {
+       struct acpi_device *acpi_dev = to_acpi_device(dev);
+       acpi_status status;
+       unsigned long long sta;
+
+       status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       return sprintf(buf, "%llu\n", sta);
+}
+static DEVICE_ATTR_RO(status);
+
+/**
+ * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+int acpi_device_setup_files(struct acpi_device *dev)
+{
+       struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+       int result = 0;
+
+       /*
+        * Devices gotten from FADT don't have a "path" attribute
+        */
+       if (dev->handle) {
+               result = device_create_file(&dev->dev, &dev_attr_path);
+               if (result)
+                       goto end;
+       }
+
+       if (!list_empty(&dev->pnp.ids)) {
+               result = device_create_file(&dev->dev, &dev_attr_hid);
+               if (result)
+                       goto end;
+
+               result = device_create_file(&dev->dev, &dev_attr_modalias);
+               if (result)
+                       goto end;
+       }
+
+       /*
+        * If device has _STR, 'description' file is created
+        */
+       if (acpi_has_method(dev->handle, "_STR")) {
+               status = acpi_evaluate_object(dev->handle, "_STR",
+                                       NULL, &buffer);
+               if (ACPI_FAILURE(status))
+                       buffer.pointer = NULL;
+               dev->pnp.str_obj = buffer.pointer;
+               result = device_create_file(&dev->dev, &dev_attr_description);
+               if (result)
+                       goto end;
+       }
+
+       if (dev->pnp.type.bus_address)
+               result = device_create_file(&dev->dev, &dev_attr_adr);
+       if (dev->pnp.unique_id)
+               result = device_create_file(&dev->dev, &dev_attr_uid);
+
+       if (acpi_has_method(dev->handle, "_SUN")) {
+               result = device_create_file(&dev->dev, &dev_attr_sun);
+               if (result)
+                       goto end;
+       }
+
+       if (acpi_has_method(dev->handle, "_STA")) {
+               result = device_create_file(&dev->dev, &dev_attr_status);
+               if (result)
+                       goto end;
+       }
+
+        /*
+         * If device has _EJ0, 'eject' file is created that is used to trigger
+         * hot-removal function from userland.
+         */
+       if (acpi_has_method(dev->handle, "_EJ0")) {
+               result = device_create_file(&dev->dev, &dev_attr_eject);
+               if (result)
+                       return result;
+       }
+
+       if (dev->flags.power_manageable) {
+               result = device_create_file(&dev->dev, &dev_attr_power_state);
+               if (result)
+                       return result;
+
+               if (dev->power.flags.power_resources)
+                       result = device_create_file(&dev->dev,
+                                                   &dev_attr_real_power_state);
+       }
+
+end:
+       return result;
+}
+
+/**
+ * acpi_device_remove_files - Remove sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+void acpi_device_remove_files(struct acpi_device *dev)
+{
+       if (dev->flags.power_manageable) {
+               device_remove_file(&dev->dev, &dev_attr_power_state);
+               if (dev->power.flags.power_resources)
+                       device_remove_file(&dev->dev,
+                                          &dev_attr_real_power_state);
+       }
+
+       /*
+        * If device has _STR, remove 'description' file
+        */
+       if (acpi_has_method(dev->handle, "_STR")) {
+               kfree(dev->pnp.str_obj);
+               device_remove_file(&dev->dev, &dev_attr_description);
+       }
+       /*
+        * If device has _EJ0, remove 'eject' file.
+        */
+       if (acpi_has_method(dev->handle, "_EJ0"))
+               device_remove_file(&dev->dev, &dev_attr_eject);
+
+       if (acpi_has_method(dev->handle, "_SUN"))
+               device_remove_file(&dev->dev, &dev_attr_sun);
+
+       if (dev->pnp.unique_id)
+               device_remove_file(&dev->dev, &dev_attr_uid);
+       if (dev->pnp.type.bus_address)
+               device_remove_file(&dev->dev, &dev_attr_adr);
+       device_remove_file(&dev->dev, &dev_attr_modalias);
+       device_remove_file(&dev->dev, &dev_attr_hid);
+       if (acpi_has_method(dev->handle, "_STA"))
+               device_remove_file(&dev->dev, &dev_attr_status);
+       if (dev->handle)
+               device_remove_file(&dev->dev, &dev_attr_path);
+}
index a688aa2..e8e128d 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 9d4761d..2614a83 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -165,8 +161,16 @@ struct transaction {
        u8 flags;
 };
 
+struct acpi_ec_query {
+       struct transaction transaction;
+       struct work_struct work;
+       struct acpi_ec_query_handler *handler;
+};
+
 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
 static void advance_transaction(struct acpi_ec *ec);
+static void acpi_ec_event_handler(struct work_struct *work);
+static void acpi_ec_event_processor(struct work_struct *work);
 
 struct acpi_ec *boot_ec, *first_ec;
 EXPORT_SYMBOL(first_ec);
@@ -978,60 +982,90 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
 }
 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
 
-static void acpi_ec_run(void *cxt)
+static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
 {
-       struct acpi_ec_query_handler *handler = cxt;
+       struct acpi_ec_query *q;
+       struct transaction *t;
+
+       q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
+       if (!q)
+               return NULL;
+       INIT_WORK(&q->work, acpi_ec_event_processor);
+       t = &q->transaction;
+       t->command = ACPI_EC_COMMAND_QUERY;
+       t->rdata = pval;
+       t->rlen = 1;
+       return q;
+}
+
+static void acpi_ec_delete_query(struct acpi_ec_query *q)
+{
+       if (q) {
+               if (q->handler)
+                       acpi_ec_put_query_handler(q->handler);
+               kfree(q);
+       }
+}
+
+static void acpi_ec_event_processor(struct work_struct *work)
+{
+       struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
+       struct acpi_ec_query_handler *handler = q->handler;
 
-       if (!handler)
-               return;
        ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
        if (handler->func)
                handler->func(handler->data);
        else if (handler->handle)
                acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
        ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
-       acpi_ec_put_query_handler(handler);
+       acpi_ec_delete_query(q);
 }
 
 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
 {
        u8 value = 0;
        int result;
-       acpi_status status;
        struct acpi_ec_query_handler *handler;
-       struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
-                               .wdata = NULL, .rdata = &value,
-                               .wlen = 0, .rlen = 1};
+       struct acpi_ec_query *q;
+
+       q = acpi_ec_create_query(&value);
+       if (!q)
+               return -ENOMEM;
 
        /*
         * Query the EC to find out which _Qxx method we need to evaluate.
         * Note that successful completion of the query causes the ACPI_EC_SCI
         * bit to be cleared (and thus clearing the interrupt source).
         */
-       result = acpi_ec_transaction(ec, &t);
-       if (result)
-               return result;
-       if (data)
-               *data = value;
+       result = acpi_ec_transaction(ec, &q->transaction);
        if (!value)
-               return -ENODATA;
+               result = -ENODATA;
+       if (result)
+               goto err_exit;
 
        mutex_lock(&ec->mutex);
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
-                       /* have custom handler for this bit */
-                       handler = acpi_ec_get_query_handler(handler);
+                       q->handler = acpi_ec_get_query_handler(handler);
                        ec_dbg_evt("Query(0x%02x) scheduled",
-                                  handler->query_bit);
-                       status = acpi_os_execute((handler->func) ?
-                               OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
-                               acpi_ec_run, handler);
-                       if (ACPI_FAILURE(status))
+                                  q->handler->query_bit);
+                       /*
+                        * It is reported that _Qxx are evaluated in a
+                        * parallel way on Windows:
+                        * https://bugzilla.kernel.org/show_bug.cgi?id=94411
+                        */
+                       if (!schedule_work(&q->work))
                                result = -EBUSY;
                        break;
                }
        }
        mutex_unlock(&ec->mutex);
+
+err_exit:
+       if (result && q)
+               acpi_ec_delete_query(q);
+       if (data)
+               *data = value;
        return result;
 }
 
index bea0bba..e297a48 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index a322710..5c67a6d 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index 4683a96..9e42621 100644 (file)
@@ -13,9 +13,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #ifndef _ACPI_INTERNAL_H_
@@ -70,7 +67,7 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val);
 
 #ifdef CONFIG_DEBUG_FS
 extern struct dentry *acpi_debugfs_dir;
-int acpi_debugfs_init(void);
+void acpi_debugfs_init(void);
 #else
 static inline void acpi_debugfs_init(void) { return; }
 #endif
@@ -93,10 +90,21 @@ int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *));
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
                             int type, unsigned long long sta);
+int acpi_device_setup_files(struct acpi_device *dev);
+void acpi_device_remove_files(struct acpi_device *dev);
 void acpi_device_add_finalize(struct acpi_device *device);
 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
 bool acpi_device_is_present(struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
+bool acpi_device_is_first_physical_node(struct acpi_device *adev,
+                                       const struct device *dev);
+
+/* --------------------------------------------------------------------------
+                     Device Matching and Notification
+   -------------------------------------------------------------------------- */
+struct acpi_device *acpi_companion_match(const struct device *dev);
+int __acpi_device_uevent_modalias(struct acpi_device *adev,
+                                 struct kobj_uevent_env *env);
 
 /* --------------------------------------------------------------------------
                                   Power Resource
index 628a42c..cf0fd96 100644 (file)
@@ -702,11 +702,11 @@ static ssize_t flags_show(struct device *dev,
        u16 flags = to_nfit_memdev(dev)->flags;
 
        return sprintf(buf, "%s%s%s%s%s\n",
-                       flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
-                       flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
-                       flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
-                       flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
-                       flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
+               flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
+               flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
+               flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
+               flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
+               flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
 }
 static DEVICE_ATTR_RO(flags);
 
@@ -849,12 +849,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
                if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
                        continue;
 
-               dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
+               dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
                                nvdimm_name(nvdimm),
-                       mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
-                       mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
-                       mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
-                       mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
+                 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
+                 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
+                 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
+                 mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
 
        }
 
@@ -1024,7 +1024,7 @@ static void wmb_blk(struct nfit_blk *nfit_blk)
                wmb_pmem();
 }
 
-static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
+static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
 {
        struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
        u64 offset = nfit_blk->stat_offset + mmio->size * bw;
@@ -1032,7 +1032,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
        if (mmio->num_lines)
                offset = to_interleave_offset(offset, mmio);
 
-       return readq(mmio->base + offset);
+       return readl(mmio->base + offset);
 }
 
 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
index acaa3b4..72b6e9e 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
index 3b8963f..739a4a6 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
@@ -47,6 +43,7 @@
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
 
 #include "internal.h"
 
@@ -83,6 +80,7 @@ static void *acpi_irq_context;
 static struct workqueue_struct *kacpid_wq;
 static struct workqueue_struct *kacpi_notify_wq;
 static struct workqueue_struct *kacpi_hotplug_wq;
+static bool acpi_os_initialized;
 
 /*
  * This list of permanent mappings is for memory that may be accessed from
@@ -947,21 +945,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
 
 EXPORT_SYMBOL(acpi_os_write_port);
 
-#ifdef readq
-static inline u64 read64(const volatile void __iomem *addr)
-{
-       return readq(addr);
-}
-#else
-static inline u64 read64(const volatile void __iomem *addr)
-{
-       u64 l, h;
-       l = readl(addr);
-       h = readl(addr+4);
-       return l | (h << 32);
-}
-#endif
-
 acpi_status
 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
 {
@@ -994,7 +977,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
                *(u32 *) value = readl(virt_addr);
                break;
        case 64:
-               *(u64 *) value = read64(virt_addr);
+               *(u64 *) value = readq(virt_addr);
                break;
        default:
                BUG();
@@ -1008,19 +991,6 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
        return AE_OK;
 }
 
-#ifdef writeq
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
-       writeq(val, addr);
-}
-#else
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
-       writel(val, addr);
-       writel(val>>32, addr+4);
-}
-#endif
-
 acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
 {
@@ -1049,7 +1019,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
                writel(value, virt_addr);
                break;
        case 64:
-               write64(value, virt_addr);
+               writeq(value, virt_addr);
                break;
        default:
                BUG();
@@ -1316,6 +1286,9 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
        long jiffies;
        int ret = 0;
 
+       if (!acpi_os_initialized)
+               return AE_OK;
+
        if (!sem || (units < 1))
                return AE_BAD_PARAMETER;
 
@@ -1355,6 +1328,9 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
 {
        struct semaphore *sem = (struct semaphore *)handle;
 
+       if (!acpi_os_initialized)
+               return AE_OK;
+
        if (!sem || (units < 1))
                return AE_BAD_PARAMETER;
 
@@ -1863,6 +1839,7 @@ acpi_status __init acpi_os_initialize(void)
                rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
                pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
        }
+       acpi_os_initialized = true;
 
        return AE_OK;
 }
index 304eccb..25fff35 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index cfd7581..3b4ea98 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  * TBD: 
@@ -825,6 +821,22 @@ void acpi_penalize_isa_irq(int irq, int active)
        }
 }
 
+/*
+ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
+ * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
+ * PCI IRQs.
+ */
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+       if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+               if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+                   polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+               else
+                       acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+       }
+}
+
 /*
  * Over-ride default table to reserve additional IRQs for use by ISA
  * e.g. acpi_irq_isa=5
index 1b5569c..393706a 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 139d9e4..7188e53 100644 (file)
  *  WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include <linux/kernel.h>
index 93eac53..fcd4ce6 100644 (file)
@@ -1,8 +1,10 @@
 /*
- *  acpi_power.c - ACPI Bus Power Management ($Revision: 39 $)
+ * drivers/acpi/power.c - ACPI Power Resources management.
  *
- *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 - 2015 Intel Corp.
+ * Author: Andy Grover <andrew.grover@intel.com>
+ * Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
  * ACPI power-managed devices may be controlled in two ways:
  * 1. via "Device Specific (D-State) Control"
  * 2. via "Power Resource Control".
- * This module is used to manage devices relying on Power Resource Control.
+ * The code below deals with ACPI Power Resources control.
  * 
- * An ACPI "power resource object" describes a software controllable power
- * plane, clock plane, or other resource used by a power managed device.
+ * An ACPI "power resource object" represents a software controllable power
+ * plane, clock plane, or other resource depended on by a device.
+ *
  * A device may rely on multiple power resources, and a power resource
  * may be shared by multiple devices.
  */
index d9f7158..51e658f 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -159,38 +155,28 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata acpi_cpu_notifier = {
+static struct notifier_block acpi_cpu_notifier = {
            .notifier_call = acpi_cpu_soft_notify,
 };
 
-static int __acpi_processor_start(struct acpi_device *device)
+#ifdef CONFIG_ACPI_CPU_FREQ_PSS
+static int acpi_pss_perf_init(struct acpi_processor *pr,
+               struct acpi_device *device)
 {
-       struct acpi_processor *pr = acpi_driver_data(device);
-       acpi_status status;
        int result = 0;
 
-       if (!pr)
-               return -ENODEV;
-
-       if (pr->flags.need_hotplug_init)
-               return 0;
-
-#ifdef CONFIG_CPU_FREQ
        acpi_processor_ppc_has_changed(pr, 0);
-#endif
+
        acpi_processor_get_throttling_info(pr);
 
        if (pr->flags.throttling)
                pr->flags.limit = 1;
 
-       if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
-               acpi_processor_power_init(pr);
-
        pr->cdev = thermal_cooling_device_register("Processor", device,
                                                   &processor_cooling_ops);
        if (IS_ERR(pr->cdev)) {
                result = PTR_ERR(pr->cdev);
-               goto err_power_exit;
+               return result;
        }
 
        dev_dbg(&device->dev, "registered as cooling_device%d\n",
@@ -204,6 +190,7 @@ static int __acpi_processor_start(struct acpi_device *device)
                        "Failed to create sysfs link 'thermal_cooling'\n");
                goto err_thermal_unregister;
        }
+
        result = sysfs_create_link(&pr->cdev->device.kobj,
                                   &device->dev.kobj,
                                   "device");
@@ -213,17 +200,61 @@ static int __acpi_processor_start(struct acpi_device *device)
                goto err_remove_sysfs_thermal;
        }
 
-       status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-                                            acpi_processor_notify, device);
-       if (ACPI_SUCCESS(status))
-               return 0;
-
        sysfs_remove_link(&pr->cdev->device.kobj, "device");
  err_remove_sysfs_thermal:
        sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
  err_thermal_unregister:
        thermal_cooling_device_unregister(pr->cdev);
- err_power_exit:
+
+       return result;
+}
+
+static void acpi_pss_perf_exit(struct acpi_processor *pr,
+               struct acpi_device *device)
+{
+       if (pr->cdev) {
+               sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+               sysfs_remove_link(&pr->cdev->device.kobj, "device");
+               thermal_cooling_device_unregister(pr->cdev);
+               pr->cdev = NULL;
+       }
+}
+#else
+static inline int acpi_pss_perf_init(struct acpi_processor *pr,
+               struct acpi_device *device)
+{
+       return 0;
+}
+
+static inline void acpi_pss_perf_exit(struct acpi_processor *pr,
+               struct acpi_device *device) {}
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+
+static int __acpi_processor_start(struct acpi_device *device)
+{
+       struct acpi_processor *pr = acpi_driver_data(device);
+       acpi_status status;
+       int result = 0;
+
+       if (!pr)
+               return -ENODEV;
+
+       if (pr->flags.need_hotplug_init)
+               return 0;
+
+       if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
+               acpi_processor_power_init(pr);
+
+       result = acpi_pss_perf_init(pr, device);
+       if (result)
+               goto err_power_exit;
+
+       status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+                                            acpi_processor_notify, device);
+       if (ACPI_SUCCESS(status))
+               return 0;
+
+err_power_exit:
        acpi_processor_power_exit(pr);
        return result;
 }
@@ -252,15 +283,10 @@ static int acpi_processor_stop(struct device *dev)
        pr = acpi_driver_data(device);
        if (!pr)
                return 0;
-
        acpi_processor_power_exit(pr);
 
-       if (pr->cdev) {
-               sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-               sysfs_remove_link(&pr->cdev->device.kobj, "device");
-               thermal_cooling_device_unregister(pr->cdev);
-               pr->cdev = NULL;
-       }
+       acpi_pss_perf_exit(pr, device);
+
        return 0;
 }
 
index d540f42..175c86b 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index cfc8aba..bb01dea 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  */
 
 #include <linux/kernel.h>
@@ -87,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
        if (ignore_ppc)
                return 0;
 
-       if (event != CPUFREQ_INCOMPATIBLE)
+       if (event != CPUFREQ_ADJUST)
                return 0;
 
        mutex_lock(&performance_mutex);
@@ -784,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
 
 EXPORT_SYMBOL(acpi_processor_register_performance);
 
-void
-acpi_processor_unregister_performance(struct acpi_processor_performance
-                                     *performance, unsigned int cpu)
+void acpi_processor_unregister_performance(unsigned int cpu)
 {
        struct acpi_processor *pr;
 
index e003663..1fed84a 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 84243c3..f170d74 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 7836e2e..6d99450 100644 (file)
@@ -528,13 +528,14 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
 
        if (!val)
                return obj->package.count;
-       else if (nval <= 0)
-               return -EINVAL;
 
        if (nval > obj->package.count)
                return -EOVERFLOW;
+       else if (nval <= 0)
+               return -EINVAL;
 
        items = obj->package.elements;
+
        switch (proptype) {
        case DEV_PROP_U8:
                ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
index f1c966e..15d22db 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 01504c8..cb3dedb 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index ec25635..01136b8 100644 (file)
@@ -115,264 +115,6 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
        return 0;
 }
 
-/**
- * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
- * @acpi_dev: ACPI device object.
- * @modalias: Buffer to print into.
- * @size: Size of the buffer.
- *
- * Creates hid/cid(s) string needed for modalias and uevent
- * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
- * char *modalias: "acpi:IBM0001:ACPI0001"
- * Return: 0: no _HID and no _CID
- *         -EINVAL: output error
- *         -ENOMEM: output is truncated
-*/
-static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
-                              int size)
-{
-       int len;
-       int count;
-       struct acpi_hardware_id *id;
-
-       /*
-        * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
-        * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
-        * device's list.
-        */
-       count = 0;
-       list_for_each_entry(id, &acpi_dev->pnp.ids, list)
-               if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
-                       count++;
-
-       if (!count)
-               return 0;
-
-       len = snprintf(modalias, size, "acpi:");
-       if (len <= 0)
-               return len;
-
-       size -= len;
-
-       list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
-               if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
-                       continue;
-
-               count = snprintf(&modalias[len], size, "%s:", id->id);
-               if (count < 0)
-                       return -EINVAL;
-
-               if (count >= size)
-                       return -ENOMEM;
-
-               len += count;
-               size -= count;
-       }
-       modalias[len] = '\0';
-       return len;
-}
-
-/**
- * create_of_modalias - Creates DT compatible string for modalias and uevent
- * @acpi_dev: ACPI device object.
- * @modalias: Buffer to print into.
- * @size: Size of the buffer.
- *
- * Expose DT compatible modalias as of:NnameTCcompatible.  This function should
- * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
- * ACPI/PNP IDs.
- */
-static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
-                             int size)
-{
-       struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
-       const union acpi_object *of_compatible, *obj;
-       int len, count;
-       int i, nval;
-       char *c;
-
-       acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
-       /* DT strings are all in lower case */
-       for (c = buf.pointer; *c != '\0'; c++)
-               *c = tolower(*c);
-
-       len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
-       ACPI_FREE(buf.pointer);
-
-       if (len <= 0)
-               return len;
-
-       of_compatible = acpi_dev->data.of_compatible;
-       if (of_compatible->type == ACPI_TYPE_PACKAGE) {
-               nval = of_compatible->package.count;
-               obj = of_compatible->package.elements;
-       } else { /* Must be ACPI_TYPE_STRING. */
-               nval = 1;
-               obj = of_compatible;
-       }
-       for (i = 0; i < nval; i++, obj++) {
-               count = snprintf(&modalias[len], size, "C%s",
-                                obj->string.pointer);
-               if (count < 0)
-                       return -EINVAL;
-
-               if (count >= size)
-                       return -ENOMEM;
-
-               len += count;
-               size -= count;
-       }
-       modalias[len] = '\0';
-       return len;
-}
-
-/*
- * acpi_companion_match() - Can we match via ACPI companion device
- * @dev: Device in question
- *
- * Check if the given device has an ACPI companion and if that companion has
- * a valid list of PNP IDs, and if the device is the first (primary) physical
- * device associated with it.  Return the companion pointer if that's the case
- * or NULL otherwise.
- *
- * If multiple physical devices are attached to a single ACPI companion, we need
- * to be careful.  The usage scenario for this kind of relationship is that all
- * of the physical devices in question use resources provided by the ACPI
- * companion.  A typical case is an MFD device where all the sub-devices share
- * the parent's ACPI companion.  In such cases we can only allow the primary
- * (first) physical device to be matched with the help of the companion's PNP
- * IDs.
- *
- * Additional physical devices sharing the ACPI companion can still use
- * resources available from it but they will be matched normally using functions
- * provided by their bus types (and analogously for their modalias).
- */
-static struct acpi_device *acpi_companion_match(const struct device *dev)
-{
-       struct acpi_device *adev;
-       struct mutex *physical_node_lock;
-
-       adev = ACPI_COMPANION(dev);
-       if (!adev)
-               return NULL;
-
-       if (list_empty(&adev->pnp.ids))
-               return NULL;
-
-       physical_node_lock = &adev->physical_node_lock;
-       mutex_lock(physical_node_lock);
-       if (list_empty(&adev->physical_node_list)) {
-               adev = NULL;
-       } else {
-               const struct acpi_device_physical_node *node;
-
-               node = list_first_entry(&adev->physical_node_list,
-                                       struct acpi_device_physical_node, node);
-               if (node->dev != dev)
-                       adev = NULL;
-       }
-       mutex_unlock(physical_node_lock);
-
-       return adev;
-}
-
-static int __acpi_device_uevent_modalias(struct acpi_device *adev,
-                                        struct kobj_uevent_env *env)
-{
-       int len;
-
-       if (!adev)
-               return -ENODEV;
-
-       if (list_empty(&adev->pnp.ids))
-               return 0;
-
-       if (add_uevent_var(env, "MODALIAS="))
-               return -ENOMEM;
-
-       len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
-                                 sizeof(env->buf) - env->buflen);
-       if (len < 0)
-               return len;
-
-       env->buflen += len;
-       if (!adev->data.of_compatible)
-               return 0;
-
-       if (len > 0 && add_uevent_var(env, "MODALIAS="))
-               return -ENOMEM;
-
-       len = create_of_modalias(adev, &env->buf[env->buflen - 1],
-                                sizeof(env->buf) - env->buflen);
-       if (len < 0)
-               return len;
-
-       env->buflen += len;
-
-       return 0;
-}
-
-/*
- * Creates uevent modalias field for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
-{
-       return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
-}
-EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
-
-static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
-{
-       int len, count;
-
-       if (!adev)
-               return -ENODEV;
-
-       if (list_empty(&adev->pnp.ids))
-               return 0;
-
-       len = create_pnp_modalias(adev, buf, size - 1);
-       if (len < 0) {
-               return len;
-       } else if (len > 0) {
-               buf[len++] = '\n';
-               size -= len;
-       }
-       if (!adev->data.of_compatible)
-               return len;
-
-       count = create_of_modalias(adev, buf + len, size - 1);
-       if (count < 0) {
-               return count;
-       } else if (count > 0) {
-               len += count;
-               buf[len++] = '\n';
-       }
-
-       return len;
-}
-
-/*
- * Creates modalias sysfs attribute for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_modalias(struct device *dev, char *buf, int size)
-{
-       return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
-}
-EXPORT_SYMBOL_GPL(acpi_device_modalias);
-
-static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
-       return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
-}
-static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-
 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
 {
        struct acpi_device_physical_node *pn;
@@ -701,423 +443,6 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
        unlock_device_hotplug();
 }
 
-static ssize_t real_power_state_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct acpi_device *adev = to_acpi_device(dev);
-       int state;
-       int ret;
-
-       ret = acpi_device_get_power(adev, &state);
-       if (ret)
-               return ret;
-
-       return sprintf(buf, "%s\n", acpi_power_state_string(state));
-}
-
-static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
-
-static ssize_t power_state_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct acpi_device *adev = to_acpi_device(dev);
-
-       return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
-}
-
-static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
-
-static ssize_t
-acpi_eject_store(struct device *d, struct device_attribute *attr,
-               const char *buf, size_t count)
-{
-       struct acpi_device *acpi_device = to_acpi_device(d);
-       acpi_object_type not_used;
-       acpi_status status;
-
-       if (!count || buf[0] != '1')
-               return -EINVAL;
-
-       if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
-           && !acpi_device->driver)
-               return -ENODEV;
-
-       status = acpi_get_type(acpi_device->handle, &not_used);
-       if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
-               return -ENODEV;
-
-       get_device(&acpi_device->dev);
-       status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
-       if (ACPI_SUCCESS(status))
-               return count;
-
-       put_device(&acpi_device->dev);
-       acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
-                         ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
-       return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
-}
-
-static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
-
-static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-
-       return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
-}
-static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
-
-static ssize_t acpi_device_uid_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-
-       return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
-}
-static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
-
-static ssize_t acpi_device_adr_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-
-       return sprintf(buf, "0x%08x\n",
-                      (unsigned int)(acpi_dev->pnp.bus_address));
-}
-static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
-
-static ssize_t
-acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
-       int result;
-
-       result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
-       if (result)
-               goto end;
-
-       result = sprintf(buf, "%s\n", (char*)path.pointer);
-       kfree(path.pointer);
-end:
-       return result;
-}
-static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
-
-/* sysfs file that shows description text from the ACPI _STR method */
-static ssize_t description_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf) {
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       int result;
-
-       if (acpi_dev->pnp.str_obj == NULL)
-               return 0;
-
-       /*
-        * The _STR object contains a Unicode identifier for a device.
-        * We need to convert to utf-8 so it can be displayed.
-        */
-       result = utf16s_to_utf8s(
-               (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
-               acpi_dev->pnp.str_obj->buffer.length,
-               UTF16_LITTLE_ENDIAN, buf,
-               PAGE_SIZE);
-
-       buf[result++] = '\n';
-
-       return result;
-}
-static DEVICE_ATTR(description, 0444, description_show, NULL);
-
-static ssize_t
-acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
-                    char *buf) {
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       acpi_status status;
-       unsigned long long sun;
-
-       status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       return sprintf(buf, "%llu\n", sun);
-}
-static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
-
-static ssize_t status_show(struct device *dev, struct device_attribute *attr,
-                               char *buf) {
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       acpi_status status;
-       unsigned long long sta;
-
-       status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       return sprintf(buf, "%llu\n", sta);
-}
-static DEVICE_ATTR_RO(status);
-
-static int acpi_device_setup_files(struct acpi_device *dev)
-{
-       struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-       acpi_status status;
-       int result = 0;
-
-       /*
-        * Devices gotten from FADT don't have a "path" attribute
-        */
-       if (dev->handle) {
-               result = device_create_file(&dev->dev, &dev_attr_path);
-               if (result)
-                       goto end;
-       }
-
-       if (!list_empty(&dev->pnp.ids)) {
-               result = device_create_file(&dev->dev, &dev_attr_hid);
-               if (result)
-                       goto end;
-
-               result = device_create_file(&dev->dev, &dev_attr_modalias);
-               if (result)
-                       goto end;
-       }
-
-       /*
-        * If device has _STR, 'description' file is created
-        */
-       if (acpi_has_method(dev->handle, "_STR")) {
-               status = acpi_evaluate_object(dev->handle, "_STR",
-                                       NULL, &buffer);
-               if (ACPI_FAILURE(status))
-                       buffer.pointer = NULL;
-               dev->pnp.str_obj = buffer.pointer;
-               result = device_create_file(&dev->dev, &dev_attr_description);
-               if (result)
-                       goto end;
-       }
-
-       if (dev->pnp.type.bus_address)
-               result = device_create_file(&dev->dev, &dev_attr_adr);
-       if (dev->pnp.unique_id)
-               result = device_create_file(&dev->dev, &dev_attr_uid);
-
-       if (acpi_has_method(dev->handle, "_SUN")) {
-               result = device_create_file(&dev->dev, &dev_attr_sun);
-               if (result)
-                       goto end;
-       }
-
-       if (acpi_has_method(dev->handle, "_STA")) {
-               result = device_create_file(&dev->dev, &dev_attr_status);
-               if (result)
-                       goto end;
-       }
-
-        /*
-         * If device has _EJ0, 'eject' file is created that is used to trigger
-         * hot-removal function from userland.
-         */
-       if (acpi_has_method(dev->handle, "_EJ0")) {
-               result = device_create_file(&dev->dev, &dev_attr_eject);
-               if (result)
-                       return result;
-       }
-
-       if (dev->flags.power_manageable) {
-               result = device_create_file(&dev->dev, &dev_attr_power_state);
-               if (result)
-                       return result;
-
-               if (dev->power.flags.power_resources)
-                       result = device_create_file(&dev->dev,
-                                                   &dev_attr_real_power_state);
-       }
-
-end:
-       return result;
-}
-
-static void acpi_device_remove_files(struct acpi_device *dev)
-{
-       if (dev->flags.power_manageable) {
-               device_remove_file(&dev->dev, &dev_attr_power_state);
-               if (dev->power.flags.power_resources)
-                       device_remove_file(&dev->dev,
-                                          &dev_attr_real_power_state);
-       }
-
-       /*
-        * If device has _STR, remove 'description' file
-        */
-       if (acpi_has_method(dev->handle, "_STR")) {
-               kfree(dev->pnp.str_obj);
-               device_remove_file(&dev->dev, &dev_attr_description);
-       }
-       /*
-        * If device has _EJ0, remove 'eject' file.
-        */
-       if (acpi_has_method(dev->handle, "_EJ0"))
-               device_remove_file(&dev->dev, &dev_attr_eject);
-
-       if (acpi_has_method(dev->handle, "_SUN"))
-               device_remove_file(&dev->dev, &dev_attr_sun);
-
-       if (dev->pnp.unique_id)
-               device_remove_file(&dev->dev, &dev_attr_uid);
-       if (dev->pnp.type.bus_address)
-               device_remove_file(&dev->dev, &dev_attr_adr);
-       device_remove_file(&dev->dev, &dev_attr_modalias);
-       device_remove_file(&dev->dev, &dev_attr_hid);
-       if (acpi_has_method(dev->handle, "_STA"))
-               device_remove_file(&dev->dev, &dev_attr_status);
-       if (dev->handle)
-               device_remove_file(&dev->dev, &dev_attr_path);
-}
-/* --------------------------------------------------------------------------
-                       ACPI Bus operations
-   -------------------------------------------------------------------------- */
-
-/**
- * acpi_of_match_device - Match device object using the "compatible" property.
- * @adev: ACPI device object to match.
- * @of_match_table: List of device IDs to match against.
- *
- * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
- * identifiers and a _DSD object with the "compatible" property, use that
- * property to match against the given list of identifiers.
- */
-static bool acpi_of_match_device(struct acpi_device *adev,
-                                const struct of_device_id *of_match_table)
-{
-       const union acpi_object *of_compatible, *obj;
-       int i, nval;
-
-       if (!adev)
-               return false;
-
-       of_compatible = adev->data.of_compatible;
-       if (!of_match_table || !of_compatible)
-               return false;
-
-       if (of_compatible->type == ACPI_TYPE_PACKAGE) {
-               nval = of_compatible->package.count;
-               obj = of_compatible->package.elements;
-       } else { /* Must be ACPI_TYPE_STRING. */
-               nval = 1;
-               obj = of_compatible;
-       }
-       /* Now we can look for the driver DT compatible strings */
-       for (i = 0; i < nval; i++, obj++) {
-               const struct of_device_id *id;
-
-               for (id = of_match_table; id->compatible[0]; id++)
-                       if (!strcasecmp(obj->string.pointer, id->compatible))
-                               return true;
-       }
-
-       return false;
-}
-
-static bool __acpi_match_device_cls(const struct acpi_device_id *id,
-                                   struct acpi_hardware_id *hwid)
-{
-       int i, msk, byte_shift;
-       char buf[3];
-
-       if (!id->cls)
-               return false;
-
-       /* Apply class-code bitmask, before checking each class-code byte */
-       for (i = 1; i <= 3; i++) {
-               byte_shift = 8 * (3 - i);
-               msk = (id->cls_msk >> byte_shift) & 0xFF;
-               if (!msk)
-                       continue;
-
-               sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
-               if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
-                       return false;
-       }
-       return true;
-}
-
-static const struct acpi_device_id *__acpi_match_device(
-       struct acpi_device *device,
-       const struct acpi_device_id *ids,
-       const struct of_device_id *of_ids)
-{
-       const struct acpi_device_id *id;
-       struct acpi_hardware_id *hwid;
-
-       /*
-        * If the device is not present, it is unnecessary to load device
-        * driver for it.
-        */
-       if (!device || !device->status.present)
-               return NULL;
-
-       list_for_each_entry(hwid, &device->pnp.ids, list) {
-               /* First, check the ACPI/PNP IDs provided by the caller. */
-               for (id = ids; id->id[0] || id->cls; id++) {
-                       if (id->id[0] && !strcmp((char *) id->id, hwid->id))
-                               return id;
-                       else if (id->cls && __acpi_match_device_cls(id, hwid))
-                               return id;
-               }
-
-               /*
-                * Next, check ACPI_DT_NAMESPACE_HID and try to match the
-                * "compatible" property if found.
-                *
-                * The id returned by the below is not valid, but the only
-                * caller passing non-NULL of_ids here is only interested in
-                * whether or not the return value is NULL.
-                */
-               if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
-                   && acpi_of_match_device(device, of_ids))
-                       return id;
-       }
-       return NULL;
-}
-
-/**
- * acpi_match_device - Match a struct device against a given list of ACPI IDs
- * @ids: Array of struct acpi_device_id object to match against.
- * @dev: The device structure to match.
- *
- * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
- * object for that handle and use that object to match against a given list of
- * device IDs.
- *
- * Return a pointer to the first matching ID on success or %NULL on failure.
- */
-const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
-                                              const struct device *dev)
-{
-       return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
-}
-EXPORT_SYMBOL_GPL(acpi_match_device);
-
-int acpi_match_device_ids(struct acpi_device *device,
-                         const struct acpi_device_id *ids)
-{
-       return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
-}
-EXPORT_SYMBOL(acpi_match_device_ids);
-
-bool acpi_driver_match_device(struct device *dev,
-                             const struct device_driver *drv)
-{
-       if (!drv->acpi_match_table)
-               return acpi_of_match_device(ACPI_COMPANION(dev),
-                                           drv->of_match_table);
-
-       return !!__acpi_match_device(acpi_companion_match(dev),
-                                    drv->acpi_match_table, drv->of_match_table);
-}
-EXPORT_SYMBOL_GPL(acpi_driver_match_device);
-
 static void acpi_free_power_resources_lists(struct acpi_device *device)
 {
        int i;
@@ -1144,144 +469,6 @@ static void acpi_device_release(struct device *dev)
        kfree(acpi_dev);
 }
 
-static int acpi_bus_match(struct device *dev, struct device_driver *drv)
-{
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       struct acpi_driver *acpi_drv = to_acpi_driver(drv);
-
-       return acpi_dev->flags.match_driver
-               && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
-}
-
-static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
-}
-
-static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
-{
-       struct acpi_device *device = data;
-
-       device->driver->ops.notify(device, event);
-}
-
-static void acpi_device_notify_fixed(void *data)
-{
-       struct acpi_device *device = data;
-
-       /* Fixed hardware devices have no handles */
-       acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
-}
-
-static u32 acpi_device_fixed_event(void *data)
-{
-       acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
-       return ACPI_INTERRUPT_HANDLED;
-}
-
-static int acpi_device_install_notify_handler(struct acpi_device *device)
-{
-       acpi_status status;
-
-       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
-               status =
-                   acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
-                                                    acpi_device_fixed_event,
-                                                    device);
-       else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
-               status =
-                   acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
-                                                    acpi_device_fixed_event,
-                                                    device);
-       else
-               status = acpi_install_notify_handler(device->handle,
-                                                    ACPI_DEVICE_NOTIFY,
-                                                    acpi_device_notify,
-                                                    device);
-
-       if (ACPI_FAILURE(status))
-               return -EINVAL;
-       return 0;
-}
-
-static void acpi_device_remove_notify_handler(struct acpi_device *device)
-{
-       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
-               acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
-                                               acpi_device_fixed_event);
-       else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
-               acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
-                                               acpi_device_fixed_event);
-       else
-               acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-                                          acpi_device_notify);
-}
-
-static int acpi_device_probe(struct device *dev)
-{
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
-       int ret;
-
-       if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
-               return -EINVAL;
-
-       if (!acpi_drv->ops.add)
-               return -ENOSYS;
-
-       ret = acpi_drv->ops.add(acpi_dev);
-       if (ret)
-               return ret;
-
-       acpi_dev->driver = acpi_drv;
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Driver [%s] successfully bound to device [%s]\n",
-                         acpi_drv->name, acpi_dev->pnp.bus_id));
-
-       if (acpi_drv->ops.notify) {
-               ret = acpi_device_install_notify_handler(acpi_dev);
-               if (ret) {
-                       if (acpi_drv->ops.remove)
-                               acpi_drv->ops.remove(acpi_dev);
-
-                       acpi_dev->driver = NULL;
-                       acpi_dev->driver_data = NULL;
-                       return ret;
-               }
-       }
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
-                         acpi_drv->name, acpi_dev->pnp.bus_id));
-       get_device(dev);
-       return 0;
-}
-
-static int acpi_device_remove(struct device * dev)
-{
-       struct acpi_device *acpi_dev = to_acpi_device(dev);
-       struct acpi_driver *acpi_drv = acpi_dev->driver;
-
-       if (acpi_drv) {
-               if (acpi_drv->ops.notify)
-                       acpi_device_remove_notify_handler(acpi_dev);
-               if (acpi_drv->ops.remove)
-                       acpi_drv->ops.remove(acpi_dev);
-       }
-       acpi_dev->driver = NULL;
-       acpi_dev->driver_data = NULL;
-
-       put_device(dev);
-       return 0;
-}
-
-struct bus_type acpi_bus_type = {
-       .name           = "acpi",
-       .match          = acpi_bus_match,
-       .probe          = acpi_device_probe,
-       .remove         = acpi_device_remove,
-       .uevent         = acpi_device_uevent,
-};
-
 static void acpi_device_del(struct acpi_device *device)
 {
        mutex_lock(&acpi_device_lock);
@@ -1528,47 +715,6 @@ struct acpi_device *acpi_get_next_child(struct device *dev,
        return next == head ? NULL : list_entry(next, struct acpi_device, node);
 }
 
-/* --------------------------------------------------------------------------
-                                 Driver Management
-   -------------------------------------------------------------------------- */
-/**
- * acpi_bus_register_driver - register a driver with the ACPI bus
- * @driver: driver being registered
- *
- * Registers a driver with the ACPI bus.  Searches the namespace for all
- * devices that match the driver's criteria and binds.  Returns zero for
- * success or a negative error status for failure.
- */
-int acpi_bus_register_driver(struct acpi_driver *driver)
-{
-       int ret;
-
-       if (acpi_disabled)
-               return -ENODEV;
-       driver->drv.name = driver->name;
-       driver->drv.bus = &acpi_bus_type;
-       driver->drv.owner = driver->owner;
-
-       ret = driver_register(&driver->drv);
-       return ret;
-}
-
-EXPORT_SYMBOL(acpi_bus_register_driver);
-
-/**
- * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
- * @driver: driver to unregister
- *
- * Unregisters a driver with the ACPI bus.  Searches the namespace for all
- * devices that match the driver's criteria and unbinds.
- */
-void acpi_bus_unregister_driver(struct acpi_driver *driver)
-{
-       driver_unregister(&driver->drv);
-}
-
-EXPORT_SYMBOL(acpi_bus_unregister_driver);
-
 /* --------------------------------------------------------------------------
                                  Device Enumeration
    -------------------------------------------------------------------------- */
@@ -2744,12 +1890,6 @@ int __init acpi_scan_init(void)
 {
        int result;
 
-       result = bus_register(&acpi_bus_type);
-       if (result) {
-               /* We don't want to quit even if we failed to add suspend/resume */
-               printk(KERN_ERR PREFIX "Could not register bus type\n");
-       }
-
        acpi_pci_root_init();
        acpi_pci_link_init();
        acpi_processor_init();
index 0876d77..40a4265 100644 (file)
@@ -69,6 +69,8 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
        ACPI_DEBUG_INIT(ACPI_LV_INIT),
        ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
        ACPI_DEBUG_INIT(ACPI_LV_INFO),
+       ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
+       ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
 
        ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
        ACPI_DEBUG_INIT(ACPI_LV_PARSE),
@@ -162,55 +164,116 @@ static const struct kernel_param_ops param_ops_debug_level = {
 module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 
-static char trace_method_name[6];
-module_param_string(trace_method_name, trace_method_name, 6, 0644);
-static unsigned int trace_debug_layer;
-module_param(trace_debug_layer, uint, 0644);
-static unsigned int trace_debug_level;
-module_param(trace_debug_level, uint, 0644);
+static char trace_method_name[1024];
 
-static int param_set_trace_state(const char *val, struct kernel_param *kp)
+int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
 {
-       int result = 0;
+       u32 saved_flags = 0;
+       bool is_abs_path = true;
 
-       if (!strncmp(val, "enable", sizeof("enable") - 1)) {
-               result = acpi_debug_trace(trace_method_name, trace_debug_level,
-                                         trace_debug_layer, 0);
-               if (result)
-                       result = -EBUSY;
-               goto exit;
-       }
+       if (*val != '\\')
+               is_abs_path = false;
 
-       if (!strncmp(val, "disable", sizeof("disable") - 1)) {
-               int name = 0;
-               result = acpi_debug_trace((char *)&name, trace_debug_level,
-                                         trace_debug_layer, 0);
-               if (result)
-                       result = -EBUSY;
-               goto exit;
+       if ((is_abs_path && strlen(val) > 1023) ||
+           (!is_abs_path && strlen(val) > 1022)) {
+               pr_err("%s: string parameter too long\n", kp->name);
+               return -ENOSPC;
        }
 
-       if (!strncmp(val, "1", 1)) {
-               result = acpi_debug_trace(trace_method_name, trace_debug_level,
-                                         trace_debug_layer, 1);
-               if (result)
-                       result = -EBUSY;
-               goto exit;
+       /*
+        * It's not safe to update acpi_gbl_trace_method_name without
+        * having the tracer stopped, so we save the original tracer
+        * state and disable it.
+        */
+       saved_flags = acpi_gbl_trace_flags;
+       (void)acpi_debug_trace(NULL,
+                              acpi_gbl_trace_dbg_level,
+                              acpi_gbl_trace_dbg_layer,
+                              0);
+
+       /* This is a hack.  We can't kmalloc in early boot. */
+       if (is_abs_path)
+               strcpy(trace_method_name, val);
+       else {
+               trace_method_name[0] = '\\';
+               strcpy(trace_method_name+1, val);
        }
 
-       result = -EINVAL;
-exit:
-       return result;
+       /* Restore the original tracer state */
+       (void)acpi_debug_trace(trace_method_name,
+                              acpi_gbl_trace_dbg_level,
+                              acpi_gbl_trace_dbg_layer,
+                              saved_flags);
+
+       return 0;
+}
+
+static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
+{
+       return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
+}
+
+static const struct kernel_param_ops param_ops_trace_method = {
+       .set = param_set_trace_method_name,
+       .get = param_get_trace_method_name,
+};
+
+static const struct kernel_param_ops param_ops_trace_attrib = {
+       .set = param_set_uint,
+       .get = param_get_uint,
+};
+
+module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
+module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
+module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
+
+static int param_set_trace_state(const char *val, struct kernel_param *kp)
+{
+       acpi_status status;
+       const char *method = trace_method_name;
+       u32 flags = 0;
+
+/* So "xxx-once" comparison should go prior than "xxx" comparison */
+#define acpi_compare_param(val, key)   \
+       strncmp((val), (key), sizeof(key) - 1)
+
+       if (!acpi_compare_param(val, "enable")) {
+               method = NULL;
+               flags = ACPI_TRACE_ENABLED;
+       } else if (!acpi_compare_param(val, "disable"))
+               method = NULL;
+       else if (!acpi_compare_param(val, "method-once"))
+               flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
+       else if (!acpi_compare_param(val, "method"))
+               flags = ACPI_TRACE_ENABLED;
+       else if (!acpi_compare_param(val, "opcode-once"))
+               flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
+       else if (!acpi_compare_param(val, "opcode"))
+               flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
+       else
+               return -EINVAL;
+
+       status = acpi_debug_trace(method,
+                                 acpi_gbl_trace_dbg_level,
+                                 acpi_gbl_trace_dbg_layer,
+                                 flags);
+       if (ACPI_FAILURE(status))
+               return -EBUSY;
+
+       return 0;
 }
 
 static int param_get_trace_state(char *buffer, struct kernel_param *kp)
 {
-       if (!acpi_gbl_trace_method_name)
+       if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
                return sprintf(buffer, "disable");
        else {
-               if (acpi_gbl_trace_flags & 1)
-                       return sprintf(buffer, "1");
-               else
+               if (acpi_gbl_trace_method_name) {
+                       if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
+                               return sprintf(buffer, "method-once");
+                       else
+                               return sprintf(buffer, "method");
+               } else
                        return sprintf(buffer, "enable");
        }
        return 0;
index 2e19189..17a6fa0 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
index 6d4e44e..fc28b9f 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  *  This driver fully implements the ACPI thermal policy as described in the
index 67c548a..475c907 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index 19bcb80..790e0de 100644 (file)
@@ -4230,6 +4230,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 8*",             NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "FCCT*M500*",                 NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
        /* devices that don't properly handle TRIM commands */
        { "SuperSSpeed S238*",          NULL,   ATA_HORKAGE_NOTRIM, },
index dafae6d..7d62795 100644 (file)
@@ -1252,6 +1252,19 @@ void device_unregister(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(device_unregister);
 
+static struct device *prev_device(struct klist_iter *i)
+{
+       struct klist_node *n = klist_prev(i);
+       struct device *dev = NULL;
+       struct device_private *p;
+
+       if (n) {
+               p = to_device_private_parent(n);
+               dev = p->device;
+       }
+       return dev;
+}
+
 static struct device *next_device(struct klist_iter *i)
 {
        struct klist_node *n = klist_next(i);
@@ -1340,6 +1353,36 @@ int device_for_each_child(struct device *parent, void *data,
 }
 EXPORT_SYMBOL_GPL(device_for_each_child);
 
+/**
+ * device_for_each_child_reverse - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child_reverse(struct device *parent, void *data,
+                                 int (*fn)(struct device *dev, void *data))
+{
+       struct klist_iter i;
+       struct device *child;
+       int error = 0;
+
+       if (!parent->p)
+               return 0;
+
+       klist_iter_init(&parent->p->klist_children, &i);
+       while ((child = prev_device(&i)) && !error)
+               error = fn(child, data);
+       klist_iter_exit(&i);
+       return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
+
 /**
  * device_find_child - device iterator for locating a particular device.
  * @parent: parent struct device
index a638bbb..2d6df1d 100644 (file)
@@ -399,6 +399,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
  *
  * This function must be called with @dev lock held.  When called for a
  * USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
  */
 int driver_probe_device(struct device_driver *drv, struct device *dev)
 {
@@ -410,10 +412,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
        pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
                 drv->bus->name, __func__, dev_name(dev), drv->name);
 
+       if (dev->parent)
+               pm_runtime_get_sync(dev->parent);
+
        pm_runtime_barrier(dev);
        ret = really_probe(dev, drv);
        pm_request_idle(dev);
 
+       if (dev->parent)
+               pm_runtime_put(dev->parent);
+
        return ret;
 }
 
@@ -507,11 +515,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
 
        device_lock(dev);
 
+       if (dev->parent)
+               pm_runtime_get_sync(dev->parent);
+
        bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
        dev_dbg(dev, "async probe completed\n");
 
        pm_request_idle(dev);
 
+       if (dev->parent)
+               pm_runtime_put(dev->parent);
+
        device_unlock(dev);
 
        put_device(dev);
@@ -541,6 +555,9 @@ static int __device_attach(struct device *dev, bool allow_async)
                        .want_async = false,
                };
 
+               if (dev->parent)
+                       pm_runtime_get_sync(dev->parent);
+
                ret = bus_for_each_drv(dev->bus, NULL, &data,
                                        __device_attach_driver);
                if (!ret && allow_async && data.have_async) {
@@ -557,6 +574,9 @@ static int __device_attach(struct device *dev, bool allow_async)
                } else {
                        pm_request_idle(dev);
                }
+
+               if (dev->parent)
+                       pm_runtime_put(dev->parent);
        }
 out_unlock:
        device_unlock(dev);
index acef9f9..652b5a3 100644 (file)
@@ -38,7 +38,7 @@ struct pm_clock_entry {
  * @dev: The device for the given clock
  * @ce: PM clock entry corresponding to the clock.
  */
-static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
 {
        int ret;
 
@@ -50,8 +50,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
                        dev_err(dev, "%s: failed to enable clk %p, error %d\n",
                                __func__, ce->clk, ret);
        }
-
-       return ret;
 }
 
 /**
index 677fb28..bb703b5 100644 (file)
@@ -11,6 +11,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/err.h>
  *             order.
  * @dynamic:   not-created from static DT entries.
  * @available: true/false - marks if this OPP as available or not
+ * @turbo:     true if turbo (boost) OPP
  * @rate:      Frequency in hertz
- * @u_volt:    Nominal voltage in microvolts corresponding to this OPP
+ * @u_volt:    Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min:        Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max:        Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp:     Maximum current drawn by the device in microamperes
+ * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
+ *             frequency from any other OPP's frequency.
  * @dev_opp:   points back to the device_opp struct this opp belongs to
  * @rcu_head:  RCU callback head used for deferred freeing
+ * @np:                OPP's device node.
  *
  * This structure stores the OPP information for a given device.
  */
@@ -63,11 +71,34 @@ struct dev_pm_opp {
 
        bool available;
        bool dynamic;
+       bool turbo;
        unsigned long rate;
+
        unsigned long u_volt;
+       unsigned long u_volt_min;
+       unsigned long u_volt_max;
+       unsigned long u_amp;
+       unsigned long clock_latency_ns;
 
        struct device_opp *dev_opp;
        struct rcu_head rcu_head;
+
+       struct device_node *np;
+};
+
+/**
+ * struct device_list_opp - devices managed by 'struct device_opp'
+ * @node:      list node
+ * @dev:       device to which the struct object belongs
+ * @rcu_head:  RCU callback head used for deferred freeing
+ *
+ * This is an internal data structure maintaining the list of devices that are
+ * managed by 'struct device_opp'.
+ */
+struct device_list_opp {
+       struct list_head node;
+       const struct device *dev;
+       struct rcu_head rcu_head;
 };
 
 /**
@@ -77,10 +108,12 @@ struct dev_pm_opp {
  *             list.
  *             RCU usage: nodes are not modified in the list of device_opp,
  *             however addition is possible and is secured by dev_opp_list_lock
- * @dev:       device pointer
  * @srcu_head: notifier head to notify the OPP availability changes.
  * @rcu_head:  RCU callback head used for deferred freeing
+ * @dev_list:  list of devices that share these OPPs
  * @opp_list:  list of opps
+ * @np:                struct device_node pointer for opp's DT node.
+ * @shared_opp: OPP is shared between multiple devices.
  *
  * This is an internal data structure maintaining the link to opps attached to
  * a device. This structure is not meant to be shared to users as it is
@@ -93,10 +126,15 @@ struct dev_pm_opp {
 struct device_opp {
        struct list_head node;
 
-       struct device *dev;
        struct srcu_notifier_head srcu_head;
        struct rcu_head rcu_head;
+       struct list_head dev_list;
        struct list_head opp_list;
+
+       struct device_node *np;
+       unsigned long clock_latency_ns_max;
+       bool shared_opp;
+       struct dev_pm_opp *suspend_opp;
 };
 
 /*
@@ -116,6 +154,38 @@ do {                                                                       \
                           "dev_opp_list_lock protection");             \
 } while (0)
 
+static struct device_list_opp *_find_list_dev(const struct device *dev,
+                                             struct device_opp *dev_opp)
+{
+       struct device_list_opp *list_dev;
+
+       list_for_each_entry(list_dev, &dev_opp->dev_list, node)
+               if (list_dev->dev == dev)
+                       return list_dev;
+
+       return NULL;
+}
+
+static struct device_opp *_managed_opp(const struct device_node *np)
+{
+       struct device_opp *dev_opp;
+
+       list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
+               if (dev_opp->np == np) {
+                       /*
+                        * Multiple devices can point to the same OPP table and
+                        * so will have same node-pointer, np.
+                        *
+                        * But the OPPs will be considered as shared only if the
+                        * OPP table contains a "opp-shared" property.
+                        */
+                       return dev_opp->shared_opp ? dev_opp : NULL;
+               }
+       }
+
+       return NULL;
+}
+
 /**
  * _find_device_opp() - find device_opp struct using device pointer
  * @dev:       device pointer used to lookup device OPPs
@@ -132,21 +202,18 @@ do {                                                                      \
  */
 static struct device_opp *_find_device_opp(struct device *dev)
 {
-       struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+       struct device_opp *dev_opp;
 
-       if (unlikely(IS_ERR_OR_NULL(dev))) {
+       if (IS_ERR_OR_NULL(dev)) {
                pr_err("%s: Invalid parameters\n", __func__);
                return ERR_PTR(-EINVAL);
        }
 
-       list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
-               if (tmp_dev_opp->dev == dev) {
-                       dev_opp = tmp_dev_opp;
-                       break;
-               }
-       }
+       list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
+               if (_find_list_dev(dev, dev_opp))
+                       return dev_opp;
 
-       return dev_opp;
+       return ERR_PTR(-ENODEV);
 }
 
 /**
@@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
        opp_rcu_lockdep_assert();
 
        tmp_opp = rcu_dereference(opp);
-       if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+       if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
                pr_err("%s: Invalid parameters\n", __func__);
        else
                v = tmp_opp->u_volt;
@@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
        opp_rcu_lockdep_assert();
 
        tmp_opp = rcu_dereference(opp);
-       if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+       if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
                pr_err("%s: Invalid parameters\n", __func__);
        else
                f = tmp_opp->rate;
@@ -213,6 +280,66 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 
+/**
+ * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
+ * @opp: opp for which turbo mode is being verified
+ *
+ * Turbo OPPs are not for normal use, and can be enabled (under certain
+ * conditions) for short duration of times to finish high throughput work
+ * quickly. Running on them for longer times may overheat the chip.
+ *
+ * Return: true if opp is turbo opp, else false.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. This means that opp which could have been fetched by
+ * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
+ * under RCU lock. The pointer returned by the opp_find_freq family must be
+ * used in the same section as the usage of this function with the pointer
+ * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
+ * pointer.
+ */
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+       struct dev_pm_opp *tmp_opp;
+
+       opp_rcu_lockdep_assert();
+
+       tmp_opp = rcu_dereference(opp);
+       if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
+               pr_err("%s: Invalid parameters\n", __func__);
+               return false;
+       }
+
+       return tmp_opp->turbo;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
+
+/**
+ * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
+ * @dev:       device for which we do this operation
+ *
+ * Return: This function returns the max clock latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+       struct device_opp *dev_opp;
+       unsigned long clock_latency_ns;
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp))
+               clock_latency_ns = 0;
+       else
+               clock_latency_ns = dev_opp->clock_latency_ns_max;
+
+       rcu_read_unlock();
+       return clock_latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+
 /**
  * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:       device for which we do this operation
@@ -407,18 +534,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
+/* List-dev Helpers */
+static void _kfree_list_dev_rcu(struct rcu_head *head)
+{
+       struct device_list_opp *list_dev;
+
+       list_dev = container_of(head, struct device_list_opp, rcu_head);
+       kfree_rcu(list_dev, rcu_head);
+}
+
+static void _remove_list_dev(struct device_list_opp *list_dev,
+                            struct device_opp *dev_opp)
+{
+       list_del(&list_dev->node);
+       call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
+                 _kfree_list_dev_rcu);
+}
+
+static struct device_list_opp *_add_list_dev(const struct device *dev,
+                                            struct device_opp *dev_opp)
+{
+       struct device_list_opp *list_dev;
+
+       list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
+       if (!list_dev)
+               return NULL;
+
+       /* Initialize list-dev */
+       list_dev->dev = dev;
+       list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+
+       return list_dev;
+}
+
 /**
- * _add_device_opp() - Allocate a new device OPP table
+ * _add_device_opp() - Find device OPP table or allocate a new one
  * @dev:       device for which we do this operation
  *
- * New device node which uses OPPs - used when multiple devices with OPP tables
- * are maintained.
+ * It tries to find an existing table first, if it couldn't find one, it
+ * allocates a new OPP table and returns that.
  *
  * Return: valid device_opp pointer if success, else NULL.
  */
 static struct device_opp *_add_device_opp(struct device *dev)
 {
        struct device_opp *dev_opp;
+       struct device_list_opp *list_dev;
+
+       /* Check for existing list for 'dev' first */
+       dev_opp = _find_device_opp(dev);
+       if (!IS_ERR(dev_opp))
+               return dev_opp;
 
        /*
         * Allocate a new device OPP table. In the infrequent case where a new
@@ -428,7 +594,14 @@ static struct device_opp *_add_device_opp(struct device *dev)
        if (!dev_opp)
                return NULL;
 
-       dev_opp->dev = dev;
+       INIT_LIST_HEAD(&dev_opp->dev_list);
+
+       list_dev = _add_list_dev(dev, dev_opp);
+       if (!list_dev) {
+               kfree(dev_opp);
+               return NULL;
+       }
+
        srcu_init_notifier_head(&dev_opp->srcu_head);
        INIT_LIST_HEAD(&dev_opp->opp_list);
 
@@ -438,136 +611,41 @@ static struct device_opp *_add_device_opp(struct device *dev)
 }
 
 /**
- * _opp_add_dynamic() - Allocate a dynamic OPP.
- * @dev:       device for which we do this operation
- * @freq:      Frequency in Hz for this OPP
- * @u_volt:    Voltage in uVolts for this OPP
- * @dynamic:   Dynamically added OPPs.
- *
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
- *
- * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
- * freed by of_free_opp_table.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0           On success OR
- *             Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST     Freq are same and volt are different OR
- *             Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM     Memory allocation failure
+ * _kfree_device_rcu() - Free device_opp RCU handler
+ * @head:      RCU head
  */
-static int _opp_add_dynamic(struct device *dev, unsigned long freq,
-                           long u_volt, bool dynamic)
+static void _kfree_device_rcu(struct rcu_head *head)
 {
-       struct device_opp *dev_opp = NULL;
-       struct dev_pm_opp *opp, *new_opp;
-       struct list_head *head;
-       int ret;
-
-       /* allocate new OPP node */
-       new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
-       if (!new_opp)
-               return -ENOMEM;
-
-       /* Hold our list modification lock here */
-       mutex_lock(&dev_opp_list_lock);
-
-       /* populate the opp table */
-       new_opp->rate = freq;
-       new_opp->u_volt = u_volt;
-       new_opp->available = true;
-       new_opp->dynamic = dynamic;
-
-       /* Check for existing list for 'dev' */
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp)) {
-               dev_opp = _add_device_opp(dev);
-               if (!dev_opp) {
-                       ret = -ENOMEM;
-                       goto free_opp;
-               }
-
-               head = &dev_opp->opp_list;
-               goto list_add;
-       }
-
-       /*
-        * Insert new OPP in order of increasing frequency
-        * and discard if already present
-        */
-       head = &dev_opp->opp_list;
-       list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
-               if (new_opp->rate <= opp->rate)
-                       break;
-               else
-                       head = &opp->node;
-       }
-
-       /* Duplicate OPPs ? */
-       if (new_opp->rate == opp->rate) {
-               ret = opp->available && new_opp->u_volt == opp->u_volt ?
-                       0 : -EEXIST;
-
-               dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
-                        __func__, opp->rate, opp->u_volt, opp->available,
-                        new_opp->rate, new_opp->u_volt, new_opp->available);
-               goto free_opp;
-       }
-
-list_add:
-       new_opp->dev_opp = dev_opp;
-       list_add_rcu(&new_opp->node, head);
-       mutex_unlock(&dev_opp_list_lock);
-
-       /*
-        * Notify the changes in the availability of the operable
-        * frequency/voltage list.
-        */
-       srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
-       return 0;
+       struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
 
-free_opp:
-       mutex_unlock(&dev_opp_list_lock);
-       kfree(new_opp);
-       return ret;
+       kfree_rcu(device_opp, rcu_head);
 }
 
 /**
- * dev_pm_opp_add()  - Add an OPP table from a table definitions
- * @dev:       device for which we do this operation
- * @freq:      Frequency in Hz for this OPP
- * @u_volt:    Voltage in uVolts for this OPP
- *
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions.
+ * _remove_device_opp() - Removes a device OPP table
+ * @dev_opp: device OPP table to be removed.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0           On success OR
- *             Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST     Freq are same and volt are different OR
- *             Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM     Memory allocation failure
+ * Removes/frees device OPP table it it doesn't contain any OPPs.
  */
-int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+static void _remove_device_opp(struct device_opp *dev_opp)
 {
-       return _opp_add_dynamic(dev, freq, u_volt, true);
+       struct device_list_opp *list_dev;
+
+       if (!list_empty(&dev_opp->opp_list))
+               return;
+
+       list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
+                                   node);
+
+       _remove_list_dev(list_dev, dev_opp);
+
+       /* dev_list must be empty now */
+       WARN_ON(!list_empty(&dev_opp->dev_list));
+
+       list_del_rcu(&dev_opp->node);
+       call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+                 _kfree_device_rcu);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_add);
 
 /**
  * _kfree_opp_rcu() - Free OPP RCU handler
@@ -580,21 +658,11 @@ static void _kfree_opp_rcu(struct rcu_head *head)
        kfree_rcu(opp, rcu_head);
 }
 
-/**
- * _kfree_device_rcu() - Free device_opp RCU handler
- * @head:      RCU head
- */
-static void _kfree_device_rcu(struct rcu_head *head)
-{
-       struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
-
-       kfree_rcu(device_opp, rcu_head);
-}
-
 /**
  * _opp_remove()  - Remove an OPP from a table definition
  * @dev_opp:   points back to the device_opp struct this opp belongs to
  * @opp:       pointer to the OPP to remove
+ * @notify:    OPP_EVENT_REMOVE notification should be sent or not
  *
  * This function removes an opp definition from the opp list.
  *
@@ -603,21 +671,18 @@ static void _kfree_device_rcu(struct rcu_head *head)
  * strategy.
  */
 static void _opp_remove(struct device_opp *dev_opp,
-                       struct dev_pm_opp *opp)
+                       struct dev_pm_opp *opp, bool notify)
 {
        /*
         * Notify the changes in the availability of the operable
         * frequency/voltage list.
         */
-       srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+       if (notify)
+               srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
        list_del_rcu(&opp->node);
        call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
-       if (list_empty(&dev_opp->opp_list)) {
-               list_del_rcu(&dev_opp->node);
-               call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
-                         _kfree_device_rcu);
-       }
+       _remove_device_opp(dev_opp);
 }
 
 /**
@@ -659,51 +724,346 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
                goto unlock;
        }
 
-       _opp_remove(dev_opp, opp);
+       _opp_remove(dev_opp, opp, true);
 unlock:
        mutex_unlock(&dev_opp_list_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
-/**
- * _opp_set_availability() - helper to set the availability of an opp
- * @dev:               device for which we do this operation
- * @freq:              OPP frequency to modify availability
- * @availability_req:  availability status requested for this opp
- *
- * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
- * share a common logic which is isolated here.
- *
- * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
- * successful.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks to
- * keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
- */
-static int _opp_set_availability(struct device *dev, unsigned long freq,
-                                bool availability_req)
+static struct dev_pm_opp *_allocate_opp(struct device *dev,
+                                       struct device_opp **dev_opp)
 {
-       struct device_opp *dev_opp;
-       struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
-       int r = 0;
+       struct dev_pm_opp *opp;
 
-       /* keep the node allocated */
-       new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
-       if (!new_opp)
-               return -ENOMEM;
+       /* allocate new OPP node */
+       opp = kzalloc(sizeof(*opp), GFP_KERNEL);
+       if (!opp)
+               return NULL;
 
-       mutex_lock(&dev_opp_list_lock);
+       INIT_LIST_HEAD(&opp->node);
 
-       /* Find the device_opp */
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp)) {
-               r = PTR_ERR(dev_opp);
-               dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
-               goto unlock;
+       *dev_opp = _add_device_opp(dev);
+       if (!*dev_opp) {
+               kfree(opp);
+               return NULL;
+       }
+
+       return opp;
+}
+
+static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+                   struct device_opp *dev_opp)
+{
+       struct dev_pm_opp *opp;
+       struct list_head *head = &dev_opp->opp_list;
+
+       /*
+        * Insert new OPP in order of increasing frequency and discard if
+        * already present.
+        *
+        * Need to use &dev_opp->opp_list in the condition part of the 'for'
+        * loop, don't replace it with head otherwise it will become an infinite
+        * loop.
+        */
+       list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+               if (new_opp->rate > opp->rate) {
+                       head = &opp->node;
+                       continue;
+               }
+
+               if (new_opp->rate < opp->rate)
+                       break;
+
+               /* Duplicate OPPs */
+               dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
+                        __func__, opp->rate, opp->u_volt, opp->available,
+                        new_opp->rate, new_opp->u_volt, new_opp->available);
+
+               return opp->available && new_opp->u_volt == opp->u_volt ?
+                       0 : -EEXIST;
+       }
+
+       new_opp->dev_opp = dev_opp;
+       list_add_rcu(&new_opp->node, head);
+
+       return 0;
+}
+
+/**
+ * _opp_add_dynamic() - Allocate a dynamic OPP.
+ * @dev:       device for which we do this operation
+ * @freq:      Frequency in Hz for this OPP
+ * @u_volt:    Voltage in uVolts for this OPP
+ * @dynamic:   Dynamically added OPPs.
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
+ *
+ * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
+ * freed by of_free_opp_table.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ */
+static int _opp_add_dynamic(struct device *dev, unsigned long freq,
+                           long u_volt, bool dynamic)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *new_opp;
+       int ret;
+
+       /* Hold our list modification lock here */
+       mutex_lock(&dev_opp_list_lock);
+
+       new_opp = _allocate_opp(dev, &dev_opp);
+       if (!new_opp) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       /* populate the opp table */
+       new_opp->rate = freq;
+       new_opp->u_volt = u_volt;
+       new_opp->available = true;
+       new_opp->dynamic = dynamic;
+
+       ret = _opp_add(dev, new_opp, dev_opp);
+       if (ret)
+               goto free_opp;
+
+       mutex_unlock(&dev_opp_list_lock);
+
+       /*
+        * Notify the changes in the availability of the operable
+        * frequency/voltage list.
+        */
+       srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+       return 0;
+
+free_opp:
+       _opp_remove(dev_opp, new_opp, false);
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+       return ret;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
+{
+       u32 microvolt[3] = {0};
+       int count, ret;
+
+       count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+       if (!count)
+               return 0;
+
+       /* There can be one or three elements here */
+       if (count != 1 && count != 3) {
+               dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
+                       __func__, count);
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
+                                        count);
+       if (ret) {
+               dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
+                       ret);
+               return -EINVAL;
+       }
+
+       opp->u_volt = microvolt[0];
+       opp->u_volt_min = microvolt[1];
+       opp->u_volt_max = microvolt[2];
+
+       return 0;
+}
+
+/**
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @dev:       device for which we do this operation
+ * @np:                device node
+ *
+ * This function adds an opp definition to the opp list and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ * -EINVAL     Failed parsing the OPP node
+ */
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *new_opp;
+       u64 rate;
+       u32 val;
+       int ret;
+
+       /* Hold our list modification lock here */
+       mutex_lock(&dev_opp_list_lock);
+
+       new_opp = _allocate_opp(dev, &dev_opp);
+       if (!new_opp) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       ret = of_property_read_u64(np, "opp-hz", &rate);
+       if (ret < 0) {
+               dev_err(dev, "%s: opp-hz not found\n", __func__);
+               goto free_opp;
+       }
+
+       /*
+        * Rate is defined as an unsigned long in clk API, and so casting
+        * explicitly to its type. Must be fixed once rate is 64 bit
+        * guaranteed in clk API.
+        */
+       new_opp->rate = (unsigned long)rate;
+       new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+       new_opp->np = np;
+       new_opp->dynamic = false;
+       new_opp->available = true;
+
+       if (!of_property_read_u32(np, "clock-latency-ns", &val))
+               new_opp->clock_latency_ns = val;
+
+       ret = opp_get_microvolt(new_opp, dev);
+       if (ret)
+               goto free_opp;
+
+       if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
+               new_opp->u_amp = val;
+
+       ret = _opp_add(dev, new_opp, dev_opp);
+       if (ret)
+               goto free_opp;
+
+       /* OPP to select on device suspend */
+       if (of_property_read_bool(np, "opp-suspend")) {
+               if (dev_opp->suspend_opp)
+                       dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+                                __func__, dev_opp->suspend_opp->rate,
+                                new_opp->rate);
+               else
+                       dev_opp->suspend_opp = new_opp;
+       }
+
+       if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
+               dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+       mutex_unlock(&dev_opp_list_lock);
+
+       pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+                __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+                new_opp->u_volt_min, new_opp->u_volt_max,
+                new_opp->clock_latency_ns);
+
+       /*
+        * Notify the changes in the availability of the operable
+        * frequency/voltage list.
+        */
+       srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+       return 0;
+
+free_opp:
+       _opp_remove(dev_opp, new_opp, false);
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+       return ret;
+}
+
+/**
+ * dev_pm_opp_add()  - Add an OPP table from a table definitions
+ * @dev:       device for which we do this operation
+ * @freq:      Frequency in Hz for this OPP
+ * @u_volt:    Voltage in uVolts for this OPP
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ */
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+{
+       return _opp_add_dynamic(dev, freq, u_volt, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
+
+/**
+ * _opp_set_availability() - helper to set the availability of an opp
+ * @dev:               device for which we do this operation
+ * @freq:              OPP frequency to modify availability
+ * @availability_req:  availability status requested for this opp
+ *
+ * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
+ * share a common logic which is isolated here.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks to
+ * keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+static int _opp_set_availability(struct device *dev, unsigned long freq,
+                                bool availability_req)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       int r = 0;
+
+       /* keep the node allocated */
+       new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
+       if (!new_opp)
+               return -ENOMEM;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       /* Find the device_opp */
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               r = PTR_ERR(dev_opp);
+               dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+               goto unlock;
        }
 
        /* Do we have the frequency? */
@@ -825,28 +1185,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 
 #ifdef CONFIG_OF
 /**
- * of_init_opp_table() - Initialize opp table from device tree
+ * of_free_opp_table() - Free OPP table entries created from static DT entries
  * @dev:       device pointer used to lookup device OPPs.
  *
- * Register the initial OPP table with the OPP library for given device.
+ * Free OPPs created using static entries present in DT.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
- *
- * Return:
- * 0           On success OR
- *             Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST     Freq are same and volt are different OR
- *             Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM     Memory allocation failure
- * -ENODEV     when 'operating-points' property is not found or is invalid data
- *             in device node.
- * -ENODATA    when empty 'operating-points' property is found
  */
-int of_init_opp_table(struct device *dev)
+void of_free_opp_table(struct device *dev)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *opp, *tmp;
+
+       /* Hold our list modification lock here */
+       mutex_lock(&dev_opp_list_lock);
+
+       /* Check for existing list for 'dev' */
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               int error = PTR_ERR(dev_opp);
+
+               if (error != -ENODEV)
+                       WARN(1, "%s: dev_opp: %d\n",
+                            IS_ERR_OR_NULL(dev) ?
+                                       "Invalid device" : dev_name(dev),
+                            error);
+               goto unlock;
+       }
+
+       /* Find if dev_opp manages a single device */
+       if (list_is_singular(&dev_opp->dev_list)) {
+               /* Free static OPPs */
+               list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+                       if (!opp->dynamic)
+                               _opp_remove(dev_opp, opp, true);
+               }
+       } else {
+               _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+       }
+
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(of_free_opp_table);
+
+void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+       struct device *cpu_dev;
+       int cpu;
+
+       WARN_ON(cpumask_empty(cpumask));
+
+       for_each_cpu(cpu, cpumask) {
+               cpu_dev = get_cpu_device(cpu);
+               if (!cpu_dev) {
+                       pr_err("%s: failed to get cpu%d device\n", __func__,
+                              cpu);
+                       continue;
+               }
+
+               of_free_opp_table(cpu_dev);
+       }
+}
+EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
+
+/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
+static struct device_node *
+_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
+{
+       struct device_node *opp_np;
+
+       opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
+       if (!opp_np) {
+               dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
+                       __func__, prop->name);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return opp_np;
+}
+
+/* Returns opp descriptor node for a device. Caller must do of_node_put() */
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+       const struct property *prop;
+
+       prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
+       if (!prop)
+               return ERR_PTR(-ENODEV);
+       if (!prop->value)
+               return ERR_PTR(-ENODATA);
+
+       /*
+        * TODO: Support for multiple OPP tables.
+        *
+        * There should be only ONE phandle present in "operating-points-v2"
+        * property.
+        */
+       if (prop->length != sizeof(__be32)) {
+               dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return _of_get_opp_desc_node_from_prop(dev, prop);
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_init_opp_table_v2(struct device *dev,
+                                const struct property *prop)
+{
+       struct device_node *opp_np, *np;
+       struct device_opp *dev_opp;
+       int ret = 0, count = 0;
+
+       if (!prop->value)
+               return -ENODATA;
+
+       /* Get opp node */
+       opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
+       if (IS_ERR(opp_np))
+               return PTR_ERR(opp_np);
+
+       dev_opp = _managed_opp(opp_np);
+       if (dev_opp) {
+               /* OPPs are already managed */
+               if (!_add_list_dev(dev, dev_opp))
+                       ret = -ENOMEM;
+               goto put_opp_np;
+       }
+
+       /* We have opp-list node now, iterate over it and add OPPs */
+       for_each_available_child_of_node(opp_np, np) {
+               count++;
+
+               ret = _opp_add_static_v2(dev, np);
+               if (ret) {
+                       dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+                               ret);
+                       goto free_table;
+               }
+       }
+
+       /* There should be one of more OPP defined */
+       if (WARN_ON(!count)) {
+               ret = -ENOENT;
+               goto put_opp_np;
+       }
+
+       dev_opp = _find_device_opp(dev);
+       if (WARN_ON(IS_ERR(dev_opp))) {
+               ret = PTR_ERR(dev_opp);
+               goto free_table;
+       }
+
+       dev_opp->np = opp_np;
+       dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+       of_node_put(opp_np);
+       return 0;
+
+free_table:
+       of_free_opp_table(dev);
+put_opp_np:
+       of_node_put(opp_np);
+
+       return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_init_opp_table_v1(struct device *dev)
 {
        const struct property *prop;
        const __be32 *val;
@@ -881,47 +1392,177 @@ int of_init_opp_table(struct device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(of_init_opp_table);
 
 /**
- * of_free_opp_table() - Free OPP table entries created from static DT entries
+ * of_init_opp_table() - Initialize opp table from device tree
  * @dev:       device pointer used to lookup device OPPs.
  *
- * Free OPPs created using static entries present in DT.
+ * Register the initial OPP table with the OPP library for given device.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ * -ENODEV     when 'operating-points' property is not found or is invalid data
+ *             in device node.
+ * -ENODATA    when empty 'operating-points' property is found
+ * -EINVAL     when invalid entries are found in opp-v2 table
  */
-void of_free_opp_table(struct device *dev)
+int of_init_opp_table(struct device *dev)
 {
+       const struct property *prop;
+
+       /*
+        * OPPs have two version of bindings now. The older one is deprecated,
+        * try for the new binding first.
+        */
+       prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
+       if (!prop) {
+               /*
+                * Try old-deprecated bindings for backward compatibility with
+                * older dtbs.
+                */
+               return _of_init_opp_table_v1(dev);
+       }
+
+       return _of_init_opp_table_v2(dev, prop);
+}
+EXPORT_SYMBOL_GPL(of_init_opp_table);
+
+int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+       struct device *cpu_dev;
+       int cpu, ret = 0;
+
+       WARN_ON(cpumask_empty(cpumask));
+
+       for_each_cpu(cpu, cpumask) {
+               cpu_dev = get_cpu_device(cpu);
+               if (!cpu_dev) {
+                       pr_err("%s: failed to get cpu%d device\n", __func__,
+                              cpu);
+                       continue;
+               }
+
+               ret = of_init_opp_table(cpu_dev);
+               if (ret) {
+                       pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+                              __func__, cpu, ret);
+
+                       /* Free all other OPPs */
+                       of_cpumask_free_opp_table(cpumask);
+                       break;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
+
+/* Required only for V1 bindings, as v2 can manage it from DT itself */
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+       struct device_list_opp *list_dev;
        struct device_opp *dev_opp;
-       struct dev_pm_opp *opp, *tmp;
+       struct device *dev;
+       int cpu, ret = 0;
 
-       /* Check for existing list for 'dev' */
-       dev_opp = _find_device_opp(dev);
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(cpu_dev);
        if (IS_ERR(dev_opp)) {
-               int error = PTR_ERR(dev_opp);
-               if (error != -ENODEV)
-                       WARN(1, "%s: dev_opp: %d\n",
-                            IS_ERR_OR_NULL(dev) ?
-                                       "Invalid device" : dev_name(dev),
-                            error);
-               return;
+               ret = -EINVAL;
+               goto out_rcu_read_unlock;
        }
 
-       /* Hold our list modification lock here */
-       mutex_lock(&dev_opp_list_lock);
+       for_each_cpu(cpu, cpumask) {
+               if (cpu == cpu_dev->id)
+                       continue;
 
-       /* Free static OPPs */
-       list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
-               if (!opp->dynamic)
-                       _opp_remove(dev_opp, opp);
+               dev = get_cpu_device(cpu);
+               if (!dev) {
+                       dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+                               __func__, cpu);
+                       continue;
+               }
+
+               list_dev = _add_list_dev(dev, dev_opp);
+               if (!list_dev) {
+                       dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+                               __func__, cpu);
+                       continue;
+               }
        }
+out_rcu_read_unlock:
+       rcu_read_unlock();
 
-       mutex_unlock(&dev_opp_list_lock);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(of_free_opp_table);
+EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * cpumask should be already set to mask of cpu_dev->id.
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+       struct device_node *np, *tmp_np;
+       struct device *tcpu_dev;
+       int cpu, ret = 0;
+
+       /* Get OPP descriptor node */
+       np = _of_get_opp_desc_node(cpu_dev);
+       if (IS_ERR(np)) {
+               dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
+                       PTR_ERR(np));
+               return -ENOENT;
+       }
+
+       /* OPPs are shared ? */
+       if (!of_property_read_bool(np, "opp-shared"))
+               goto put_cpu_node;
+
+       for_each_possible_cpu(cpu) {
+               if (cpu == cpu_dev->id)
+                       continue;
+
+               tcpu_dev = get_cpu_device(cpu);
+               if (!tcpu_dev) {
+                       dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+                               __func__, cpu);
+                       ret = -ENODEV;
+                       goto put_cpu_node;
+               }
+
+               /* Get OPP descriptor node */
+               tmp_np = _of_get_opp_desc_node(tcpu_dev);
+               if (IS_ERR(tmp_np)) {
+                       dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
+                               __func__, PTR_ERR(tmp_np));
+                       ret = PTR_ERR(tmp_np);
+                       goto put_cpu_node;
+               }
+
+               /* CPUs are sharing opp node */
+               if (np == tmp_np)
+                       cpumask_set_cpu(cpu, cpumask);
+
+               of_node_put(tmp_np);
+       }
+
+put_cpu_node:
+       of_node_put(np);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
 #endif
index f1a5d95..998fa6b 100644 (file)
@@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
 extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
 extern int pm_qos_sysfs_add_flags(struct device *dev);
 extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
 
 #else /* CONFIG_PM */
 
index e56d538..7f3646e 100644 (file)
@@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
        mutex_unlock(&dev_pm_qos_mtx);
        return ret;
 }
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+       int ret;
+
+       if (!dev->power.set_latency_tolerance)
+               return -EINVAL;
+
+       mutex_lock(&dev_pm_qos_sysfs_mtx);
+       ret = pm_qos_sysfs_add_latency_tolerance(dev);
+       mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+       mutex_lock(&dev_pm_qos_sysfs_mtx);
+       pm_qos_sysfs_remove_latency_tolerance(dev);
+       mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+       /* Remove the request from user space now */
+       pm_runtime_get_sync(dev);
+       dev_pm_qos_update_user_latency_tolerance(dev,
+               PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+       pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
index d2be3f9..a7b4679 100644 (file)
@@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev)
        sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
 }
 
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+       return sysfs_merge_group(&dev->kobj,
+                                &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+       sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
 void rpm_sysfs_remove(struct device *dev)
 {
        sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
index f3f6d16..841b15c 100644 (file)
  */
 void device_add_property_set(struct device *dev, struct property_set *pset)
 {
-       if (pset)
-               pset->fwnode.type = FWNODE_PDATA;
+       if (!pset)
+               return;
 
+       pset->fwnode.type = FWNODE_PDATA;
        set_secondary_fwnode(dev, &pset->fwnode);
 }
 EXPORT_SYMBOL_GPL(device_add_property_set);
@@ -461,7 +462,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
                return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
                                          DEV_PROP_STRING, val, 1);
 
-       return -ENXIO;
+       return pset_prop_read_array(to_pset(fwnode), propname,
+                                   DEV_PROP_STRING, val, 1);
 }
 EXPORT_SYMBOL_GPL(fwnode_property_read_string);
 
index 4a2ef09..f504232 100644 (file)
@@ -3756,6 +3756,14 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
        struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
        u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
 
+       /*
+        * For flush requests, request_idx starts at the end of the
+        * tag space.  Since we don't support FLUSH/FUA, simply return
+        * 0 as there's nothing to be done.
+        */
+       if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
+               return 0;
+
        cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
                        &cmd->command_dma, GFP_KERNEL);
        if (!cmd->command)
index cc8a71c..2bacf24 100644 (file)
@@ -130,6 +130,13 @@ config ARM_KIRKWOOD_CPUFREQ
          This adds the CPUFreq driver for Marvell Kirkwood
          SoCs.
 
+config ARM_MT8173_CPUFREQ
+       bool "Mediatek MT8173 CPUFreq support"
+       depends on ARCH_MEDIATEK && REGULATOR
+       select PM_OPP
+       help
+         This adds the CPUFreq driver support for Mediatek MT8173 SoC.
+
 config ARM_OMAP2PLUS_CPUFREQ
        bool "TI OMAP2+"
        depends on ARCH_OMAP2PLUS
index 2169bf7..9c75faf 100644 (file)
@@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ)   += hisi-acpu-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)                += imx6q-cpufreq.o
 obj-$(CONFIG_ARM_INTEGRATOR)           += integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)     += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_MT8173_CPUFREQ)       += mt8173-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)    += omap-cpufreq.o
 obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)       += pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)                   += pxa3xx-cpufreq.o
index 0136dfc..15b921a 100644 (file)
@@ -65,18 +65,21 @@ enum {
 #define MSR_K7_HWCR_CPB_DIS    (1ULL << 25)
 
 struct acpi_cpufreq_data {
-       struct acpi_processor_performance *acpi_data;
        struct cpufreq_frequency_table *freq_table;
        unsigned int resume;
        unsigned int cpu_feature;
+       unsigned int acpi_perf_cpu;
        cpumask_var_t freqdomain_cpus;
 };
 
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance __percpu *acpi_perf_data;
 
+static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
+{
+       return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
+}
+
 static struct cpufreq_driver acpi_cpufreq_driver;
 
 static unsigned int acpi_pstate_strict;
@@ -144,7 +147,7 @@ static int _store_boost(int val)
 
 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
 {
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+       struct acpi_cpufreq_data *data = policy->driver_data;
 
        return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 }
@@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
        struct acpi_processor_performance *perf;
        int i;
 
-       perf = data->acpi_data;
+       perf = to_perf_data(data);
 
        for (i = 0; i < perf->state_count; i++) {
                if (value == perf->states[i].status)
@@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
        else
                msr &= INTEL_MSR_RANGE;
 
-       perf = data->acpi_data;
+       perf = to_perf_data(data);
 
        cpufreq_for_each_entry(pos, data->freq_table)
                if (msr == perf->states[pos->driver_data].status)
@@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
        put_cpu();
 }
 
-static u32 get_cur_val(const struct cpumask *mask)
+static u32
+get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
 {
        struct acpi_processor_performance *perf;
        struct drv_cmd cmd;
@@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
        if (unlikely(cpumask_empty(mask)))
                return 0;
 
-       switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
+       switch (data->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
                cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
                break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
-               perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
+               perf = to_perf_data(data);
                cmd.addr.io.port = perf->control_register.address;
                cmd.addr.io.bit_width = perf->control_register.bit_width;
                break;
@@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
+       struct acpi_cpufreq_data *data;
+       struct cpufreq_policy *policy;
        unsigned int freq;
        unsigned int cached_freq;
 
        pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
 
-       if (unlikely(data == NULL ||
-                    data->acpi_data == NULL || data->freq_table == NULL)) {
+       policy = cpufreq_cpu_get(cpu);
+       if (unlikely(!policy))
                return 0;
-       }
 
-       cached_freq = data->freq_table[data->acpi_data->state].frequency;
-       freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+       data = policy->driver_data;
+       cpufreq_cpu_put(policy);
+       if (unlikely(!data || !data->freq_table))
+               return 0;
+
+       cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
+       freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
        if (freq != cached_freq) {
                /*
                 * The dreaded BIOS frequency change behind our back.
@@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
        unsigned int i;
 
        for (i = 0; i < 100; i++) {
-               cur_freq = extract_freq(get_cur_val(mask), data);
+               cur_freq = extract_freq(get_cur_val(mask, data), data);
                if (cur_freq == freq)
                        return 1;
                udelay(10);
@@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                               unsigned int index)
 {
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+       struct acpi_cpufreq_data *data = policy->driver_data;
        struct acpi_processor_performance *perf;
        struct drv_cmd cmd;
        unsigned int next_perf_state = 0; /* Index into perf table */
        int result = 0;
 
-       if (unlikely(data == NULL ||
-            data->acpi_data == NULL || data->freq_table == NULL)) {
+       if (unlikely(data == NULL || data->freq_table == NULL)) {
                return -ENODEV;
        }
 
-       perf = data->acpi_data;
+       perf = to_perf_data(data);
        next_perf_state = data->freq_table[index].driver_data;
        if (perf->state == next_perf_state) {
                if (unlikely(data->resume)) {
@@ -482,8 +490,9 @@ out:
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
-       struct acpi_processor_performance *perf = data->acpi_data;
+       struct acpi_processor_performance *perf;
 
+       perf = to_perf_data(data);
        if (cpu_khz) {
                /* search the closest match to cpu_khz */
                unsigned int i;
@@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                goto err_free;
        }
 
-       data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
-       per_cpu(acfreq_data, cpu) = data;
+       perf = per_cpu_ptr(acpi_perf_data, cpu);
+       data->acpi_perf_cpu = cpu;
+       policy->driver_data = data;
 
        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
                acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
 
-       result = acpi_processor_register_performance(data->acpi_data, cpu);
+       result = acpi_processor_register_performance(perf, cpu);
        if (result)
                goto err_free_mask;
 
-       perf = data->acpi_data;
        policy->shared_type = perf->shared_type;
 
        /*
@@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
 err_freqfree:
        kfree(data->freq_table);
 err_unreg:
-       acpi_processor_unregister_performance(perf, cpu);
+       acpi_processor_unregister_performance(cpu);
 err_free_mask:
        free_cpumask_var(data->freqdomain_cpus);
 err_free:
        kfree(data);
-       per_cpu(acfreq_data, cpu) = NULL;
+       policy->driver_data = NULL;
 
        return result;
 }
 
 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+       struct acpi_cpufreq_data *data = policy->driver_data;
 
        pr_debug("acpi_cpufreq_cpu_exit\n");
 
        if (data) {
-               per_cpu(acfreq_data, policy->cpu) = NULL;
-               acpi_processor_unregister_performance(data->acpi_data,
-                                                     policy->cpu);
+               policy->driver_data = NULL;
+               acpi_processor_unregister_performance(data->acpi_perf_cpu);
                free_cpumask_var(data->freqdomain_cpus);
                kfree(data->freq_table);
                kfree(data);
@@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 
 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 {
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+       struct acpi_cpufreq_data *data = policy->driver_data;
 
        pr_debug("acpi_cpufreq_resume\n");
 
@@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 static struct freq_attr *acpi_cpufreq_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
        &freqdomain_cpus,
-       NULL,   /* this is a placeholder for cpb, do not remove */
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+       &cpb,
+#endif
        NULL,
 };
 
@@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
         * only if configured. This is considered legacy code, which
         * will probably be removed at some point in the future.
         */
-       if (check_amd_hwpstate_cpu(0)) {
-               struct freq_attr **iter;
-
-               pr_debug("adding sysfs entry for cpb\n");
+       if (!check_amd_hwpstate_cpu(0)) {
+               struct freq_attr **attr;
 
-               for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
-                       ;
+               pr_debug("CPB unsupported, do not expose it\n");
 
-               /* make sure there is a terminator behind it */
-               if (iter[1] == NULL)
-                       *iter = &cpb;
+               for (attr = acpi_cpufreq_attr; *attr; attr++)
+                       if (*attr == &cpb) {
+                               *attr = NULL;
+                               break;
+                       }
        }
 #endif
        acpi_cpufreq_boost_init();
index 528a82b..c3583cd 100644 (file)
@@ -36,6 +36,12 @@ struct private_data {
        unsigned int voltage_tolerance; /* in percentage */
 };
 
+static struct freq_attr *cpufreq_dt_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,   /* Extra space for boost-attr if required */
+       NULL,
+};
+
 static int set_target(struct cpufreq_policy *policy, unsigned int index)
 {
        struct dev_pm_opp *opp;
@@ -184,7 +190,6 @@ try_again:
 
 static int cpufreq_init(struct cpufreq_policy *policy)
 {
-       struct cpufreq_dt_platform_data *pd;
        struct cpufreq_frequency_table *freq_table;
        struct device_node *np;
        struct private_data *priv;
@@ -193,6 +198,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        struct clk *cpu_clk;
        unsigned long min_uV = ~0, max_uV = 0;
        unsigned int transition_latency;
+       bool need_update = false;
        int ret;
 
        ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
@@ -208,8 +214,47 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                goto out_put_reg_clk;
        }
 
-       /* OPPs might be populated at runtime, don't check for error here */
-       of_init_opp_table(cpu_dev);
+       /* Get OPP-sharing information from "operating-points-v2" bindings */
+       ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus);
+       if (ret) {
+               /*
+                * operating-points-v2 not supported, fallback to old method of
+                * finding shared-OPPs for backward compatibility.
+                */
+               if (ret == -ENOENT)
+                       need_update = true;
+               else
+                       goto out_node_put;
+       }
+
+       /*
+        * Initialize OPP tables for all policy->cpus. They will be shared by
+        * all CPUs which have marked their CPUs shared with OPP bindings.
+        *
+        * For platforms not using operating-points-v2 bindings, we do this
+        * before updating policy->cpus. Otherwise, we will end up creating
+        * duplicate OPPs for policy->cpus.
+        *
+        * OPPs might be populated at runtime, don't check for error here
+        */
+       of_cpumask_init_opp_table(policy->cpus);
+
+       if (need_update) {
+               struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
+
+               if (!pd || !pd->independent_clocks)
+                       cpumask_setall(policy->cpus);
+
+               /*
+                * OPP tables are initialized only for policy->cpu, do it for
+                * others as well.
+                */
+               set_cpus_sharing_opps(cpu_dev, policy->cpus);
+
+               of_property_read_u32(np, "clock-latency", &transition_latency);
+       } else {
+               transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
+       }
 
        /*
         * But we need OPP table to function so if it is not there let's
@@ -230,7 +275,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 
        of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
 
-       if (of_property_read_u32(np, "clock-latency", &transition_latency))
+       if (!transition_latency)
                transition_latency = CPUFREQ_ETERNAL;
 
        if (!IS_ERR(cpu_reg)) {
@@ -291,11 +336,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                goto out_free_cpufreq_table;
        }
 
-       policy->cpuinfo.transition_latency = transition_latency;
+       /* Support turbo/boost mode */
+       if (policy_has_boost_freq(policy)) {
+               /* This gets disabled by core on driver unregister */
+               ret = cpufreq_enable_boost_support();
+               if (ret)
+                       goto out_free_cpufreq_table;
+               cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+       }
 
-       pd = cpufreq_get_driver_data();
-       if (!pd || !pd->independent_clocks)
-               cpumask_setall(policy->cpus);
+       policy->cpuinfo.transition_latency = transition_latency;
 
        of_node_put(np);
 
@@ -306,7 +356,8 @@ out_free_cpufreq_table:
 out_free_priv:
        kfree(priv);
 out_free_opp:
-       of_free_opp_table(cpu_dev);
+       of_cpumask_free_opp_table(policy->cpus);
+out_node_put:
        of_node_put(np);
 out_put_reg_clk:
        clk_put(cpu_clk);
@@ -322,7 +373,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
 
        cpufreq_cooling_unregister(priv->cdev);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-       of_free_opp_table(priv->cpu_dev);
+       of_cpumask_free_opp_table(policy->related_cpus);
        clk_put(policy->clk);
        if (!IS_ERR(priv->cpu_reg))
                regulator_put(priv->cpu_reg);
@@ -367,7 +418,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
        .exit = cpufreq_exit,
        .ready = cpufreq_ready,
        .name = "cpufreq-dt",
-       .attr = cpufreq_generic_attr,
+       .attr = cpufreq_dt_attr,
 };
 
 static int dt_cpufreq_probe(struct platform_device *pdev)
index 7a3c30c..abb7768 100644 (file)
@@ -112,12 +112,6 @@ static inline bool has_target(void)
        return cpufreq_driver->target_index || cpufreq_driver->target;
 }
 
-/*
- * rwsem to guarantee that cpufreq driver module doesn't unload during critical
- * sections
- */
-static DECLARE_RWSEM(cpufreq_rwsem);
-
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy,
                unsigned int event);
@@ -277,10 +271,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get);
  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
  * freed as that depends on the kobj count.
  *
- * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
- * valid policy is found. This is done to make sure the driver doesn't get
- * unregistered while the policy is being used.
- *
  * Return: A valid policy on success, otherwise NULL on failure.
  */
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
@@ -291,9 +281,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
        if (WARN_ON(cpu >= nr_cpu_ids))
                return NULL;
 
-       if (!down_read_trylock(&cpufreq_rwsem))
-               return NULL;
-
        /* get the cpufreq driver */
        read_lock_irqsave(&cpufreq_driver_lock, flags);
 
@@ -306,9 +293,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       if (!policy)
-               up_read(&cpufreq_rwsem);
-
        return policy;
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -320,13 +304,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
  *
  * This decrements the kobject reference count incremented earlier by calling
  * cpufreq_cpu_get().
- *
- * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
  */
 void cpufreq_cpu_put(struct cpufreq_policy *policy)
 {
        kobject_put(&policy->kobj);
-       up_read(&cpufreq_rwsem);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 
@@ -539,9 +520,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
 {
        int err = -EINVAL;
 
-       if (!cpufreq_driver)
-               goto out;
-
        if (cpufreq_driver->setpolicy) {
                if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
                        *policy = CPUFREQ_POLICY_PERFORMANCE;
@@ -576,7 +554,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
 
                mutex_unlock(&cpufreq_governor_mutex);
        }
-out:
        return err;
 }
 
@@ -625,9 +602,7 @@ static ssize_t store_##file_name                                    \
        int ret, temp;                                                  \
        struct cpufreq_policy new_policy;                               \
                                                                        \
-       ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
-       if (ret)                                                        \
-               return -EINVAL;                                         \
+       memcpy(&new_policy, policy, sizeof(*policy));                   \
                                                                        \
        ret = sscanf(buf, "%u", &new_policy.object);                    \
        if (ret != 1)                                                   \
@@ -681,9 +656,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
        char    str_governor[16];
        struct cpufreq_policy new_policy;
 
-       ret = cpufreq_get_policy(&new_policy, policy->cpu);
-       if (ret)
-               return ret;
+       memcpy(&new_policy, policy, sizeof(*policy));
 
        ret = sscanf(buf, "%15s", str_governor);
        if (ret != 1)
@@ -694,14 +667,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
                return -EINVAL;
 
        ret = cpufreq_set_policy(policy, &new_policy);
-
-       policy->user_policy.policy = policy->policy;
-       policy->user_policy.governor = policy->governor;
-
-       if (ret)
-               return ret;
-       else
-               return count;
+       return ret ? ret : count;
 }
 
 /**
@@ -851,9 +817,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
        struct freq_attr *fattr = to_attr(attr);
        ssize_t ret;
 
-       if (!down_read_trylock(&cpufreq_rwsem))
-               return -EINVAL;
-
        down_read(&policy->rwsem);
 
        if (fattr->show)
@@ -862,7 +825,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
                ret = -EIO;
 
        up_read(&policy->rwsem);
-       up_read(&cpufreq_rwsem);
 
        return ret;
 }
@@ -879,9 +841,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
        if (!cpu_online(policy->cpu))
                goto unlock;
 
-       if (!down_read_trylock(&cpufreq_rwsem))
-               goto unlock;
-
        down_write(&policy->rwsem);
 
        /* Updating inactive policies is invalid, so avoid doing that. */
@@ -897,8 +856,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 
 unlock_policy_rwsem:
        up_write(&policy->rwsem);
-
-       up_read(&cpufreq_rwsem);
 unlock:
        put_online_cpus();
 
@@ -1027,8 +984,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
        }
 }
 
-static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
-                                    struct device *dev)
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
 {
        struct freq_attr **drv_attr;
        int ret = 0;
@@ -1060,11 +1016,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
        return cpufreq_add_dev_symlink(policy);
 }
 
-static void cpufreq_init_policy(struct cpufreq_policy *policy)
+static int cpufreq_init_policy(struct cpufreq_policy *policy)
 {
        struct cpufreq_governor *gov = NULL;
        struct cpufreq_policy new_policy;
-       int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
 
@@ -1083,16 +1038,10 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
                cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
 
        /* set default policy */
-       ret = cpufreq_set_policy(policy, &new_policy);
-       if (ret) {
-               pr_debug("setting policy failed\n");
-               if (cpufreq_driver->exit)
-                       cpufreq_driver->exit(policy);
-       }
+       return cpufreq_set_policy(policy, &new_policy);
 }
 
-static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
-                                 unsigned int cpu, struct device *dev)
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
        int ret = 0;
 
@@ -1126,33 +1075,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
        return 0;
 }
 
-static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
-{
-       struct cpufreq_policy *policy;
-       unsigned long flags;
-
-       read_lock_irqsave(&cpufreq_driver_lock, flags);
-       policy = per_cpu(cpufreq_cpu_data, cpu);
-       read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-       if (likely(policy)) {
-               /* Policy should be inactive here */
-               WARN_ON(!policy_is_inactive(policy));
-
-               down_write(&policy->rwsem);
-               policy->cpu = cpu;
-               policy->governor = NULL;
-               up_write(&policy->rwsem);
-       }
-
-       return policy;
-}
-
-static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
+static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 {
+       struct device *dev = get_cpu_device(cpu);
        struct cpufreq_policy *policy;
        int ret;
 
+       if (WARN_ON(!dev))
+               return NULL;
+
        policy = kzalloc(sizeof(*policy), GFP_KERNEL);
        if (!policy)
                return NULL;
@@ -1180,10 +1111,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
        init_completion(&policy->kobj_unregister);
        INIT_WORK(&policy->update, handle_update);
 
-       policy->cpu = dev->id;
+       policy->cpu = cpu;
 
        /* Set this once on allocation */
-       policy->kobj_cpu = dev->id;
+       policy->kobj_cpu = cpu;
 
        return policy;
 
@@ -1245,59 +1176,34 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
        kfree(policy);
 }
 
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static int cpufreq_online(unsigned int cpu)
 {
-       unsigned int j, cpu = dev->id;
-       int ret = -ENOMEM;
        struct cpufreq_policy *policy;
+       bool new_policy;
        unsigned long flags;
-       bool recover_policy = !sif;
-
-       pr_debug("adding CPU %u\n", cpu);
-
-       if (cpu_is_offline(cpu)) {
-               /*
-                * Only possible if we are here from the subsys_interface add
-                * callback.  A hotplug notifier will follow and we will handle
-                * it as CPU online then.  For now, just create the sysfs link,
-                * unless there is no policy or the link is already present.
-                */
-               policy = per_cpu(cpufreq_cpu_data, cpu);
-               return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-                       ? add_cpu_dev_symlink(policy, cpu) : 0;
-       }
+       unsigned int j;
+       int ret;
 
-       if (!down_read_trylock(&cpufreq_rwsem))
-               return 0;
+       pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
        /* Check if this CPU already has a policy to manage it */
        policy = per_cpu(cpufreq_cpu_data, cpu);
-       if (policy && !policy_is_inactive(policy)) {
+       if (policy) {
                WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
-               ret = cpufreq_add_policy_cpu(policy, cpu, dev);
-               up_read(&cpufreq_rwsem);
-               return ret;
-       }
+               if (!policy_is_inactive(policy))
+                       return cpufreq_add_policy_cpu(policy, cpu);
 
-       /*
-        * Restore the saved policy when doing light-weight init and fall back
-        * to the full init if that fails.
-        */
-       policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
-       if (!policy) {
-               recover_policy = false;
-               policy = cpufreq_policy_alloc(dev);
+               /* This is the only online CPU for the policy.  Start over. */
+               new_policy = false;
+               down_write(&policy->rwsem);
+               policy->cpu = cpu;
+               policy->governor = NULL;
+               up_write(&policy->rwsem);
+       } else {
+               new_policy = true;
+               policy = cpufreq_policy_alloc(cpu);
                if (!policy)
-                       goto nomem_out;
+                       return -ENOMEM;
        }
 
        cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1308,17 +1214,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        ret = cpufreq_driver->init(policy);
        if (ret) {
                pr_debug("initialization failed\n");
-               goto err_set_policy_cpu;
+               goto out_free_policy;
        }
 
        down_write(&policy->rwsem);
 
-       /* related cpus should atleast have policy->cpus */
-       cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
-       /* Remember which CPUs have been present at the policy creation time. */
-       if (!recover_policy)
+       if (new_policy) {
+               /* related_cpus should at least include policy->cpus. */
+               cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+               /* Remember CPUs present at the policy creation time. */
                cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+       }
 
        /*
         * affected cpus must always be the one, which are online. We aren't
@@ -1326,7 +1232,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
         */
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-       if (!recover_policy) {
+       if (new_policy) {
                policy->user_policy.min = policy->min;
                policy->user_policy.max = policy->max;
 
@@ -1340,7 +1246,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
                policy->cur = cpufreq_driver->get(policy->cpu);
                if (!policy->cur) {
                        pr_err("%s: ->get() failed\n", __func__);
-                       goto err_get_freq;
+                       goto out_exit_policy;
                }
        }
 
@@ -1387,10 +1293,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
 
-       if (!recover_policy) {
-               ret = cpufreq_add_dev_interface(policy, dev);
+       if (new_policy) {
+               ret = cpufreq_add_dev_interface(policy);
                if (ret)
-                       goto err_out_unregister;
+                       goto out_exit_policy;
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                CPUFREQ_CREATE_POLICY, policy);
 
@@ -1399,18 +1305,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
        }
 
-       cpufreq_init_policy(policy);
-
-       if (!recover_policy) {
-               policy->user_policy.policy = policy->policy;
-               policy->user_policy.governor = policy->governor;
+       ret = cpufreq_init_policy(policy);
+       if (ret) {
+               pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
+                      __func__, cpu, ret);
+               /* cpufreq_policy_free() will notify based on this */
+               new_policy = false;
+               goto out_exit_policy;
        }
+
        up_write(&policy->rwsem);
 
        kobject_uevent(&policy->kobj, KOBJ_ADD);
 
-       up_read(&cpufreq_rwsem);
-
        /* Callback for handling stuff after policy is ready */
        if (cpufreq_driver->ready)
                cpufreq_driver->ready(policy);
@@ -1419,24 +1326,47 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        return 0;
 
-err_out_unregister:
-err_get_freq:
+out_exit_policy:
        up_write(&policy->rwsem);
 
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
-err_set_policy_cpu:
-       cpufreq_policy_free(policy, recover_policy);
-nomem_out:
-       up_read(&cpufreq_rwsem);
+out_free_policy:
+       cpufreq_policy_free(policy, !new_policy);
+       return ret;
+}
+
+/**
+ * cpufreq_add_dev - the cpufreq interface for a CPU device.
+ * @dev: CPU device.
+ * @sif: Subsystem interface structure pointer (not used)
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+       unsigned cpu = dev->id;
+       int ret;
+
+       dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
+
+       if (cpu_online(cpu)) {
+               ret = cpufreq_online(cpu);
+       } else {
+               /*
+                * A hotplug notifier will follow and we will handle it as CPU
+                * online then.  For now, just create the sysfs link, unless
+                * there is no policy or the link is already present.
+                */
+               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+               ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+                       ? add_cpu_dev_symlink(policy, cpu) : 0;
+       }
 
        return ret;
 }
 
-static int __cpufreq_remove_dev_prepare(struct device *dev)
+static void cpufreq_offline_prepare(unsigned int cpu)
 {
-       unsigned int cpu = dev->id;
-       int ret = 0;
        struct cpufreq_policy *policy;
 
        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1444,11 +1374,11 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
        policy = cpufreq_cpu_get_raw(cpu);
        if (!policy) {
                pr_debug("%s: No cpu_data found\n", __func__);
-               return -EINVAL;
+               return;
        }
 
        if (has_target()) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
                if (ret)
                        pr_err("%s: Failed to stop governor\n", __func__);
        }
@@ -1469,7 +1399,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
        /* Start governor again for active policy */
        if (!policy_is_inactive(policy)) {
                if (has_target()) {
-                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+                       int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
                        if (!ret)
                                ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 
@@ -1479,28 +1409,24 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
        } else if (cpufreq_driver->stop_cpu) {
                cpufreq_driver->stop_cpu(policy);
        }
-
-       return ret;
 }
 
-static int __cpufreq_remove_dev_finish(struct device *dev)
+static void cpufreq_offline_finish(unsigned int cpu)
 {
-       unsigned int cpu = dev->id;
-       int ret;
        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
        if (!policy) {
                pr_debug("%s: No cpu_data found\n", __func__);
-               return -EINVAL;
+               return;
        }
 
        /* Only proceed for inactive policies */
        if (!policy_is_inactive(policy))
-               return 0;
+               return;
 
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
                if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
        }
@@ -1512,8 +1438,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev)
         */
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
-
-       return 0;
 }
 
 /**
@@ -1530,8 +1454,8 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
                return 0;
 
        if (cpu_online(cpu)) {
-               __cpufreq_remove_dev_prepare(dev);
-               __cpufreq_remove_dev_finish(dev);
+               cpufreq_offline_prepare(cpu);
+               cpufreq_offline_finish(cpu);
        }
 
        cpumask_clear_cpu(cpu, policy->real_cpus);
@@ -2247,7 +2171,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 
        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
-       if (new_policy->min > policy->max || new_policy->max < policy->min)
+       /*
+       * This check works well when we store new min/max freq attributes,
+       * because new_policy is a copy of policy with one field updated.
+       */
+       if (new_policy->min > new_policy->max)
                return -EINVAL;
 
        /* verify the cpu speed can be set within this limit */
@@ -2259,10 +2187,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_ADJUST, new_policy);
 
-       /* adjust if necessary - hardware incompatibility*/
-       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                       CPUFREQ_INCOMPATIBLE, new_policy);
-
        /*
         * verify the cpu speed can be set within this limit, which might be
         * different to the first one
@@ -2296,16 +2220,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        old_gov = policy->governor;
        /* end old governor */
        if (old_gov) {
-               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               if (ret) {
+                       /* This can happen due to race with other operations */
+                       pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
+                                __func__, old_gov->name, ret);
+                       return ret;
+               }
+
                up_write(&policy->rwsem);
-               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
                down_write(&policy->rwsem);
+
+               if (ret) {
+                       pr_err("%s: Failed to Exit Governor: %s (%d)\n",
+                              __func__, old_gov->name, ret);
+                       return ret;
+               }
        }
 
        /* start new governor */
        policy->governor = new_policy->governor;
-       if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
-               if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+       ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+       if (!ret) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               if (!ret)
                        goto out;
 
                up_write(&policy->rwsem);
@@ -2317,11 +2256,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        pr_debug("starting governor %s failed\n", policy->governor->name);
        if (old_gov) {
                policy->governor = old_gov;
-               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
-               __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+                       policy->governor = NULL;
+               else
+                       __cpufreq_governor(policy, CPUFREQ_GOV_START);
        }
 
-       return -EINVAL;
+       return ret;
 
  out:
        pr_debug("governor: change or update limits\n");
@@ -2350,8 +2291,6 @@ int cpufreq_update_policy(unsigned int cpu)
        memcpy(&new_policy, policy, sizeof(*policy));
        new_policy.min = policy->user_policy.min;
        new_policy.max = policy->user_policy.max;
-       new_policy.policy = policy->user_policy.policy;
-       new_policy.governor = policy->user_policy.governor;
 
        /*
         * BIOS might change freq behind our back
@@ -2387,27 +2326,23 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
-       struct device *dev;
 
-       dev = get_cpu_device(cpu);
-       if (dev) {
-               switch (action & ~CPU_TASKS_FROZEN) {
-               case CPU_ONLINE:
-                       cpufreq_add_dev(dev, NULL);
-                       break;
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_ONLINE:
+               cpufreq_online(cpu);
+               break;
 
-               case CPU_DOWN_PREPARE:
-                       __cpufreq_remove_dev_prepare(dev);
-                       break;
+       case CPU_DOWN_PREPARE:
+               cpufreq_offline_prepare(cpu);
+               break;
 
-               case CPU_POST_DEAD:
-                       __cpufreq_remove_dev_finish(dev);
-                       break;
+       case CPU_POST_DEAD:
+               cpufreq_offline_finish(cpu);
+               break;
 
-               case CPU_DOWN_FAILED:
-                       cpufreq_add_dev(dev, NULL);
-                       break;
-               }
+       case CPU_DOWN_FAILED:
+               cpufreq_online(cpu);
+               break;
        }
        return NOTIFY_OK;
 }
@@ -2477,6 +2412,49 @@ int cpufreq_boost_supported(void)
 }
 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
 
+static int create_boost_sysfs_file(void)
+{
+       int ret;
+
+       if (!cpufreq_boost_supported())
+               return 0;
+
+       /*
+        * Check if driver provides function to enable boost -
+        * if not, use cpufreq_boost_set_sw as default
+        */
+       if (!cpufreq_driver->set_boost)
+               cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+       ret = cpufreq_sysfs_create_file(&boost.attr);
+       if (ret)
+               pr_err("%s: cannot register global BOOST sysfs file\n",
+                      __func__);
+
+       return ret;
+}
+
+static void remove_boost_sysfs_file(void)
+{
+       if (cpufreq_boost_supported())
+               cpufreq_sysfs_remove_file(&boost.attr);
+}
+
+int cpufreq_enable_boost_support(void)
+{
+       if (!cpufreq_driver)
+               return -EINVAL;
+
+       if (cpufreq_boost_supported())
+               return 0;
+
+       cpufreq_driver->boost_supported = true;
+
+       /* This will get removed on driver unregister */
+       return create_boost_sysfs_file();
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
+
 int cpufreq_boost_enabled(void)
 {
        return cpufreq_driver->boost_enabled;
@@ -2515,10 +2493,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        pr_debug("trying to register driver %s\n", driver_data->name);
 
+       /* Protect against concurrent CPU online/offline. */
+       get_online_cpus();
+
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        if (cpufreq_driver) {
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-               return -EEXIST;
+               ret = -EEXIST;
+               goto out;
        }
        cpufreq_driver = driver_data;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2526,21 +2508,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (driver_data->setpolicy)
                driver_data->flags |= CPUFREQ_CONST_LOOPS;
 
-       if (cpufreq_boost_supported()) {
-               /*
-                * Check if driver provides function to enable boost -
-                * if not, use cpufreq_boost_set_sw as default
-                */
-               if (!cpufreq_driver->set_boost)
-                       cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
-               ret = cpufreq_sysfs_create_file(&boost.attr);
-               if (ret) {
-                       pr_err("%s: cannot register global BOOST sysfs file\n",
-                              __func__);
-                       goto err_null_driver;
-               }
-       }
+       ret = create_boost_sysfs_file();
+       if (ret)
+               goto err_null_driver;
 
        ret = subsys_interface_register(&cpufreq_interface);
        if (ret)
@@ -2557,17 +2527,19 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        register_hotcpu_notifier(&cpufreq_cpu_notifier);
        pr_debug("driver %s up and running\n", driver_data->name);
 
-       return 0;
+out:
+       put_online_cpus();
+       return ret;
+
 err_if_unreg:
        subsys_interface_unregister(&cpufreq_interface);
 err_boost_unreg:
-       if (cpufreq_boost_supported())
-               cpufreq_sysfs_remove_file(&boost.attr);
+       remove_boost_sysfs_file();
 err_null_driver:
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        cpufreq_driver = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-       return ret;
+       goto out;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
 
@@ -2588,19 +2560,18 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 
        pr_debug("unregistering driver %s\n", driver->name);
 
+       /* Protect against concurrent cpu hotplug */
+       get_online_cpus();
        subsys_interface_unregister(&cpufreq_interface);
-       if (cpufreq_boost_supported())
-               cpufreq_sysfs_remove_file(&boost.attr);
-
+       remove_boost_sysfs_file();
        unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
 
-       down_write(&cpufreq_rwsem);
        write_lock_irqsave(&cpufreq_driver_lock, flags);
 
        cpufreq_driver = NULL;
 
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-       up_write(&cpufreq_rwsem);
+       put_online_cpus();
 
        return 0;
 }
index c86a10c..84a1506 100644 (file)
@@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
 static void cs_check_cpu(int cpu, unsigned int load)
 {
        struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
-       struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+       struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
        struct dbs_data *dbs_data = policy->governor_data;
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
@@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
        }
 }
 
-static void cs_dbs_timer(struct work_struct *work)
+static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
+                                struct dbs_data *dbs_data, bool modify_all)
 {
-       struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
-                       struct cs_cpu_dbs_info_s, cdbs.work.work);
-       unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
-       struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
-                       cpu);
-       struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-       int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
-       bool modify_all = true;
 
-       mutex_lock(&core_dbs_info->cdbs.timer_mutex);
-       if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
-               modify_all = false;
-       else
-               dbs_check_cpu(dbs_data, cpu);
+       if (modify_all)
+               dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
 
-       gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
-       mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+       return delay_for_sampling_rate(cs_tuners->sampling_rate);
 }
 
 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
        if (!dbs_info->enable)
                return 0;
 
-       policy = dbs_info->cdbs.cur_policy;
+       policy = dbs_info->cdbs.shared->policy;
 
        /*
         * we only care if our internally tracked freq moves outside the 'valid'
index 57a39f8..939197f 100644 (file)
@@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
 
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 {
-       struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-       struct cpufreq_policy *policy;
+       struct cpufreq_policy *policy = cdbs->shared->policy;
        unsigned int sampling_rate;
        unsigned int max_load = 0;
        unsigned int ignore_nice;
@@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
                ignore_nice = cs_tuners->ignore_nice_load;
        }
 
-       policy = cdbs->cur_policy;
-
        /* Get Absolute Load */
        for_each_cpu(j, policy->cpus) {
-               struct cpu_dbs_common_info *j_cdbs;
+               struct cpu_dbs_info *j_cdbs;
                u64 cur_wall_time, cur_idle_time;
                unsigned int idle_time, wall_time;
                unsigned int load;
@@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
                unsigned int delay)
 {
-       struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 
-       mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
+       mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
 }
 
 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@@ -199,33 +197,63 @@ EXPORT_SYMBOL_GPL(gov_queue_work);
 static inline void gov_cancel_work(struct dbs_data *dbs_data,
                struct cpufreq_policy *policy)
 {
-       struct cpu_dbs_common_info *cdbs;
+       struct cpu_dbs_info *cdbs;
        int i;
 
        for_each_cpu(i, policy->cpus) {
                cdbs = dbs_data->cdata->get_cpu_cdbs(i);
-               cancel_delayed_work_sync(&cdbs->work);
+               cancel_delayed_work_sync(&cdbs->dwork);
        }
 }
 
 /* Will return if we need to evaluate cpu load again or not */
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
-               unsigned int sampling_rate)
+static bool need_load_eval(struct cpu_common_dbs_info *shared,
+                          unsigned int sampling_rate)
 {
-       if (policy_is_shared(cdbs->cur_policy)) {
+       if (policy_is_shared(shared->policy)) {
                ktime_t time_now = ktime_get();
-               s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+               s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
 
                /* Do nothing if we recently have sampled */
                if (delta_us < (s64)(sampling_rate / 2))
                        return false;
                else
-                       cdbs->time_stamp = time_now;
+                       shared->time_stamp = time_now;
        }
 
        return true;
 }
-EXPORT_SYMBOL_GPL(need_load_eval);
+
+static void dbs_timer(struct work_struct *work)
+{
+       struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
+                                                dwork.work);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       struct cpufreq_policy *policy = shared->policy;
+       struct dbs_data *dbs_data = policy->governor_data;
+       unsigned int sampling_rate, delay;
+       bool modify_all = true;
+
+       mutex_lock(&shared->timer_mutex);
+
+       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+
+               sampling_rate = cs_tuners->sampling_rate;
+       } else {
+               struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+               sampling_rate = od_tuners->sampling_rate;
+       }
+
+       if (!need_load_eval(cdbs->shared, sampling_rate))
+               modify_all = false;
+
+       delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
+       gov_queue_work(dbs_data, policy, delay, modify_all);
+
+       mutex_unlock(&shared->timer_mutex);
+}
 
 static void set_sampling_rate(struct dbs_data *dbs_data,
                unsigned int sampling_rate)
@@ -239,6 +267,37 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
        }
 }
 
+static int alloc_common_dbs_info(struct cpufreq_policy *policy,
+                                struct common_dbs_data *cdata)
+{
+       struct cpu_common_dbs_info *shared;
+       int j;
+
+       /* Allocate memory for the common information for policy->cpus */
+       shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+       if (!shared)
+               return -ENOMEM;
+
+       /* Set shared for all CPUs, online+offline */
+       for_each_cpu(j, policy->related_cpus)
+               cdata->get_cpu_cdbs(j)->shared = shared;
+
+       return 0;
+}
+
+static void free_common_dbs_info(struct cpufreq_policy *policy,
+                                struct common_dbs_data *cdata)
+{
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       int j;
+
+       for_each_cpu(j, policy->cpus)
+               cdata->get_cpu_cdbs(j)->shared = NULL;
+
+       kfree(shared);
+}
+
 static int cpufreq_governor_init(struct cpufreq_policy *policy,
                                 struct dbs_data *dbs_data,
                                 struct common_dbs_data *cdata)
@@ -246,9 +305,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
        unsigned int latency;
        int ret;
 
+       /* State should be equivalent to EXIT */
+       if (policy->governor_data)
+               return -EBUSY;
+
        if (dbs_data) {
                if (WARN_ON(have_governor_per_policy()))
                        return -EINVAL;
+
+               ret = alloc_common_dbs_info(policy, cdata);
+               if (ret)
+                       return ret;
+
                dbs_data->usage_count++;
                policy->governor_data = dbs_data;
                return 0;
@@ -258,12 +326,16 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
        if (!dbs_data)
                return -ENOMEM;
 
+       ret = alloc_common_dbs_info(policy, cdata);
+       if (ret)
+               goto free_dbs_data;
+
        dbs_data->cdata = cdata;
        dbs_data->usage_count = 1;
 
        ret = cdata->init(dbs_data, !policy->governor->initialized);
        if (ret)
-               goto free_dbs_data;
+               goto free_common_dbs_info;
 
        /* policy latency is in ns. Convert it to us first */
        latency = policy->cpuinfo.transition_latency / 1000;
@@ -300,15 +372,22 @@ put_kobj:
        }
 cdata_exit:
        cdata->exit(dbs_data, !policy->governor->initialized);
+free_common_dbs_info:
+       free_common_dbs_info(policy, cdata);
 free_dbs_data:
        kfree(dbs_data);
        return ret;
 }
 
-static void cpufreq_governor_exit(struct cpufreq_policy *policy,
-                                 struct dbs_data *dbs_data)
+static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data)
 {
        struct common_dbs_data *cdata = dbs_data->cdata;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+
+       /* State should be equivalent to INIT */
+       if (!cdbs->shared || cdbs->shared->policy)
+               return -EBUSY;
 
        policy->governor_data = NULL;
        if (!--dbs_data->usage_count) {
@@ -323,6 +402,9 @@ static void cpufreq_governor_exit(struct cpufreq_policy *policy,
                cdata->exit(dbs_data, policy->governor->initialized == 1);
                kfree(dbs_data);
        }
+
+       free_common_dbs_info(policy, cdata);
+       return 0;
 }
 
 static int cpufreq_governor_start(struct cpufreq_policy *policy,
@@ -330,12 +412,17 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
 {
        struct common_dbs_data *cdata = dbs_data->cdata;
        unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
-       struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
        int io_busy = 0;
 
        if (!policy->cur)
                return -EINVAL;
 
+       /* State should be equivalent to INIT */
+       if (!shared || shared->policy)
+               return -EBUSY;
+
        if (cdata->governor == GOV_CONSERVATIVE) {
                struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
@@ -349,12 +436,14 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
                io_busy = od_tuners->io_is_busy;
        }
 
+       shared->policy = policy;
+       shared->time_stamp = ktime_get();
+       mutex_init(&shared->timer_mutex);
+
        for_each_cpu(j, policy->cpus) {
-               struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
+               struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
                unsigned int prev_load;
 
-               j_cdbs->cpu = j;
-               j_cdbs->cur_policy = policy;
                j_cdbs->prev_cpu_idle =
                        get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
 
@@ -366,8 +455,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
                if (ignore_nice)
                        j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
-               mutex_init(&j_cdbs->timer_mutex);
-               INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
+               INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
        }
 
        if (cdata->governor == GOV_CONSERVATIVE) {
@@ -386,20 +474,24 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
                od_ops->powersave_bias_init_cpu(cpu);
        }
 
-       /* Initiate timer time stamp */
-       cpu_cdbs->time_stamp = ktime_get();
-
        gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
                       true);
        return 0;
 }
 
-static void cpufreq_governor_stop(struct cpufreq_policy *policy,
-                                 struct dbs_data *dbs_data)
+static int cpufreq_governor_stop(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data)
 {
        struct common_dbs_data *cdata = dbs_data->cdata;
        unsigned int cpu = policy->cpu;
-       struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+
+       /* State should be equivalent to START */
+       if (!shared || !shared->policy)
+               return -EBUSY;
+
+       gov_cancel_work(dbs_data, policy);
 
        if (cdata->governor == GOV_CONSERVATIVE) {
                struct cs_cpu_dbs_info_s *cs_dbs_info =
@@ -408,38 +500,40 @@ static void cpufreq_governor_stop(struct cpufreq_policy *policy,
                cs_dbs_info->enable = 0;
        }
 
-       gov_cancel_work(dbs_data, policy);
-
-       mutex_destroy(&cpu_cdbs->timer_mutex);
-       cpu_cdbs->cur_policy = NULL;
+       shared->policy = NULL;
+       mutex_destroy(&shared->timer_mutex);
+       return 0;
 }
 
-static void cpufreq_governor_limits(struct cpufreq_policy *policy,
-                                   struct dbs_data *dbs_data)
+static int cpufreq_governor_limits(struct cpufreq_policy *policy,
+                                  struct dbs_data *dbs_data)
 {
        struct common_dbs_data *cdata = dbs_data->cdata;
        unsigned int cpu = policy->cpu;
-       struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
 
-       if (!cpu_cdbs->cur_policy)
-               return;
+       /* State should be equivalent to START */
+       if (!cdbs->shared || !cdbs->shared->policy)
+               return -EBUSY;
 
-       mutex_lock(&cpu_cdbs->timer_mutex);
-       if (policy->max < cpu_cdbs->cur_policy->cur)
-               __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
+       mutex_lock(&cdbs->shared->timer_mutex);
+       if (policy->max < cdbs->shared->policy->cur)
+               __cpufreq_driver_target(cdbs->shared->policy, policy->max,
                                        CPUFREQ_RELATION_H);
-       else if (policy->min > cpu_cdbs->cur_policy->cur)
-               __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
+       else if (policy->min > cdbs->shared->policy->cur)
+               __cpufreq_driver_target(cdbs->shared->policy, policy->min,
                                        CPUFREQ_RELATION_L);
        dbs_check_cpu(dbs_data, cpu);
-       mutex_unlock(&cpu_cdbs->timer_mutex);
+       mutex_unlock(&cdbs->shared->timer_mutex);
+
+       return 0;
 }
 
 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                         struct common_dbs_data *cdata, unsigned int event)
 {
        struct dbs_data *dbs_data;
-       int ret = 0;
+       int ret;
 
        /* Lock governor to block concurrent initialization of governor */
        mutex_lock(&cdata->mutex);
@@ -449,7 +543,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
        else
                dbs_data = cdata->gdbs_data;
 
-       if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
+       if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
                ret = -EINVAL;
                goto unlock;
        }
@@ -459,17 +553,19 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                ret = cpufreq_governor_init(policy, dbs_data, cdata);
                break;
        case CPUFREQ_GOV_POLICY_EXIT:
-               cpufreq_governor_exit(policy, dbs_data);
+               ret = cpufreq_governor_exit(policy, dbs_data);
                break;
        case CPUFREQ_GOV_START:
                ret = cpufreq_governor_start(policy, dbs_data);
                break;
        case CPUFREQ_GOV_STOP:
-               cpufreq_governor_stop(policy, dbs_data);
+               ret = cpufreq_governor_stop(policy, dbs_data);
                break;
        case CPUFREQ_GOV_LIMITS:
-               cpufreq_governor_limits(policy, dbs_data);
+               ret = cpufreq_governor_limits(policy, dbs_data);
                break;
+       default:
+               ret = -EINVAL;
        }
 
 unlock:
index 34736f5..50f1717 100644 (file)
@@ -109,7 +109,7 @@ store_one(_gov, file_name)
 
 /* create helper routines */
 #define define_get_cpu_dbs_routines(_dbs_info)                         \
-static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu)               \
+static struct cpu_dbs_info *get_cpu_cdbs(int cpu)                      \
 {                                                                      \
        return &per_cpu(_dbs_info, cpu).cdbs;                           \
 }                                                                      \
@@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu)                           \
  * cs_*: Conservative governor
  */
 
+/* Common to all CPUs of a policy */
+struct cpu_common_dbs_info {
+       struct cpufreq_policy *policy;
+       /*
+        * percpu mutex that serializes governor limit change with dbs_timer
+        * invocation. We do not want dbs_timer to run when user is changing
+        * the governor or limits.
+        */
+       struct mutex timer_mutex;
+       ktime_t time_stamp;
+};
+
 /* Per cpu structures */
-struct cpu_dbs_common_info {
-       int cpu;
+struct cpu_dbs_info {
        u64 prev_cpu_idle;
        u64 prev_cpu_wall;
        u64 prev_cpu_nice;
@@ -141,19 +152,12 @@ struct cpu_dbs_common_info {
         * wake-up from idle.
         */
        unsigned int prev_load;
-       struct cpufreq_policy *cur_policy;
-       struct delayed_work work;
-       /*
-        * percpu mutex that serializes governor limit change with gov_dbs_timer
-        * invocation. We do not want gov_dbs_timer to run when user is changing
-        * the governor or limits.
-        */
-       struct mutex timer_mutex;
-       ktime_t time_stamp;
+       struct delayed_work dwork;
+       struct cpu_common_dbs_info *shared;
 };
 
 struct od_cpu_dbs_info_s {
-       struct cpu_dbs_common_info cdbs;
+       struct cpu_dbs_info cdbs;
        struct cpufreq_frequency_table *freq_table;
        unsigned int freq_lo;
        unsigned int freq_lo_jiffies;
@@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s {
 };
 
 struct cs_cpu_dbs_info_s {
-       struct cpu_dbs_common_info cdbs;
+       struct cpu_dbs_info cdbs;
        unsigned int down_skip;
        unsigned int requested_freq;
        unsigned int enable:1;
@@ -204,9 +208,11 @@ struct common_dbs_data {
         */
        struct dbs_data *gdbs_data;
 
-       struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
+       struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
        void *(*get_cpu_dbs_info_s)(int cpu);
-       void (*gov_dbs_timer)(struct work_struct *work);
+       unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
+                                     struct dbs_data *dbs_data,
+                                     bool modify_all);
        void (*gov_check_cpu)(int cpu, unsigned int load);
        int (*init)(struct dbs_data *dbs_data, bool notify);
        void (*exit)(struct dbs_data *dbs_data, bool notify);
@@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol                               \
 extern struct mutex cpufreq_governor_lock;
 
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
-               unsigned int sampling_rate);
 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                struct common_dbs_data *cdata, unsigned int event);
 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
index 3c1e10f..1fa9088 100644 (file)
@@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
 static void od_check_cpu(int cpu, unsigned int load)
 {
        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-       struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+       struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
        struct dbs_data *dbs_data = policy->governor_data;
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 
@@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load)
        }
 }
 
-static void od_dbs_timer(struct work_struct *work)
+static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
+                                struct dbs_data *dbs_data, bool modify_all)
 {
-       struct od_cpu_dbs_info_s *dbs_info =
-               container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
-       unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
-       struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+       struct cpufreq_policy *policy = cdbs->shared->policy;
+       unsigned int cpu = policy->cpu;
+       struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
                        cpu);
-       struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-       int delay = 0, sample_type = core_dbs_info->sample_type;
-       bool modify_all = true;
+       int delay = 0, sample_type = dbs_info->sample_type;
 
-       mutex_lock(&core_dbs_info->cdbs.timer_mutex);
-       if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
-               modify_all = false;
+       if (!modify_all)
                goto max_delay;
-       }
 
        /* Common NORMAL_SAMPLE setup */
-       core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+       dbs_info->sample_type = OD_NORMAL_SAMPLE;
        if (sample_type == OD_SUB_SAMPLE) {
-               delay = core_dbs_info->freq_lo_jiffies;
-               __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
-                               core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
+               delay = dbs_info->freq_lo_jiffies;
+               __cpufreq_driver_target(policy, dbs_info->freq_lo,
+                                       CPUFREQ_RELATION_H);
        } else {
                dbs_check_cpu(dbs_data, cpu);
-               if (core_dbs_info->freq_lo) {
+               if (dbs_info->freq_lo) {
                        /* Setup timer for SUB_SAMPLE */
-                       core_dbs_info->sample_type = OD_SUB_SAMPLE;
-                       delay = core_dbs_info->freq_hi_jiffies;
+                       dbs_info->sample_type = OD_SUB_SAMPLE;
+                       delay = dbs_info->freq_hi_jiffies;
                }
        }
 
 max_delay:
        if (!delay)
                delay = delay_for_sampling_rate(od_tuners->sampling_rate
-                               * core_dbs_info->rate_mult);
+                               * dbs_info->rate_mult);
 
-       gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
-       mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+       return delay;
 }
 
 /************************** sysfs interface ************************/
@@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
                dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
                cpufreq_cpu_put(policy);
 
-               mutex_lock(&dbs_info->cdbs.timer_mutex);
+               mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
 
-               if (!delayed_work_pending(&dbs_info->cdbs.work)) {
-                       mutex_unlock(&dbs_info->cdbs.timer_mutex);
+               if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
+                       mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
                        continue;
                }
 
                next_sampling = jiffies + usecs_to_jiffies(new_rate);
-               appointed_at = dbs_info->cdbs.work.timer.expires;
+               appointed_at = dbs_info->cdbs.dwork.timer.expires;
 
                if (time_before(next_sampling, appointed_at)) {
 
-                       mutex_unlock(&dbs_info->cdbs.timer_mutex);
-                       cancel_delayed_work_sync(&dbs_info->cdbs.work);
-                       mutex_lock(&dbs_info->cdbs.timer_mutex);
+                       mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
+                       cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
+                       mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
 
-                       gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
-                                       usecs_to_jiffies(new_rate), true);
+                       gov_queue_work(dbs_data, policy,
+                                      usecs_to_jiffies(new_rate), true);
 
                }
-               mutex_unlock(&dbs_info->cdbs.timer_mutex);
+               mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
        }
 }
 
@@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
 
        get_online_cpus();
        for_each_online_cpu(cpu) {
+               struct cpu_common_dbs_info *shared;
+
                if (cpumask_test_cpu(cpu, &done))
                        continue;
 
-               policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
-               if (!policy)
+               shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
+               if (!shared)
                        continue;
 
+               policy = shared->policy;
                cpumask_or(&done, &done, policy->cpus);
 
                if (policy->governor != &cpufreq_gov_ondemand)
index 773bcde..0f5e6d5 100644 (file)
@@ -75,6 +75,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
                }
                freq_table[i].driver_data = i;
                freq_table[i].frequency = rate / 1000;
+
+               /* Is Boost/turbo opp ? */
+               if (dev_pm_opp_is_turbo(opp))
+                       freq_table[i].flags = CPUFREQ_BOOST_FREQ;
        }
 
        freq_table[i].driver_data = i;
index a0d2a42..4085244 100644 (file)
@@ -78,7 +78,7 @@ static int eps_acpi_init(void)
 static int eps_acpi_exit(struct cpufreq_policy *policy)
 {
        if (eps_acpi_cpu_perf) {
-               acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0);
+               acpi_processor_unregister_performance(0);
                free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
                kfree(eps_acpi_cpu_perf);
                eps_acpi_cpu_perf = NULL;
index dfbbf98..a8f1daf 100644 (file)
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
 
+bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+
+       if (!table)
+               return false;
+
+       cpufreq_for_each_valid_entry(pos, table)
+               if (pos->flags & CPUFREQ_BOOST_FREQ)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(policy_has_boost_freq);
+
 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
                                    struct cpufreq_frequency_table *table)
 {
index c30aaa6..0202429 100644 (file)
@@ -29,7 +29,6 @@ MODULE_LICENSE("GPL");
 
 struct cpufreq_acpi_io {
        struct acpi_processor_performance       acpi_data;
-       struct cpufreq_frequency_table          *freq_table;
        unsigned int                            resume;
 };
 
@@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init (
        unsigned int            cpu = policy->cpu;
        struct cpufreq_acpi_io  *data;
        unsigned int            result = 0;
+       struct cpufreq_frequency_table *freq_table;
 
        pr_debug("acpi_cpufreq_cpu_init\n");
 
@@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init (
        }
 
        /* alloc freq_table */
-       data->freq_table = kzalloc(sizeof(*data->freq_table) *
+       freq_table = kzalloc(sizeof(*freq_table) *
                                   (data->acpi_data.state_count + 1),
                                   GFP_KERNEL);
-       if (!data->freq_table) {
+       if (!freq_table) {
                result = -ENOMEM;
                goto err_unreg;
        }
@@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init (
        for (i = 0; i <= data->acpi_data.state_count; i++)
        {
                if (i < data->acpi_data.state_count) {
-                       data->freq_table[i].frequency =
+                       freq_table[i].frequency =
                              data->acpi_data.states[i].core_frequency * 1000;
                } else {
-                       data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+                       freq_table[i].frequency = CPUFREQ_TABLE_END;
                }
        }
 
-       result = cpufreq_table_validate_and_show(policy, data->freq_table);
+       result = cpufreq_table_validate_and_show(policy, freq_table);
        if (result) {
                goto err_freqfree;
        }
@@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init (
        return (result);
 
  err_freqfree:
-       kfree(data->freq_table);
+       kfree(freq_table);
  err_unreg:
-       acpi_processor_unregister_performance(&data->acpi_data, cpu);
+       acpi_processor_unregister_performance(cpu);
  err_free:
        kfree(data);
        acpi_io_data[cpu] = NULL;
@@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit (
 
        if (data) {
                acpi_io_data[policy->cpu] = NULL;
-               acpi_processor_unregister_performance(&data->acpi_data,
-                                                     policy->cpu);
+               acpi_processor_unregister_performance(policy->cpu);
+               kfree(policy->freq_table);
                kfree(data);
        }
 
index 129e266..2faa421 100644 (file)
@@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy,
        /* get current setting */
        cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
 
-       if (machine_is_integrator()) {
+       if (machine_is_integrator())
                vco.s = (cm_osc >> 8) & 7;
-       } else if (machine_is_cintegrator()) {
+       else if (machine_is_cintegrator())
                vco.s = 1;
-       }
        vco.v = cm_osc & 255;
        vco.r = 22;
        freqs.old = icst_hz(&cclk_params, vco) / 1000;
@@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu)
        /* detect memory etc. */
        cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
 
-       if (machine_is_integrator()) {
+       if (machine_is_integrator())
                vco.s = (cm_osc >> 8) & 7;
-       } else {
+       else
                vco.s = 1;
-       }
        vco.v = cm_osc & 255;
        vco.r = 22;
 
@@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
        struct resource *res;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-        if (!res)
+       if (!res)
                return -ENODEV;
 
        cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
@@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = {
 module_platform_driver_probe(integrator_cpufreq_driver,
                             integrator_cpufreq_probe);
 
-MODULE_AUTHOR ("Russell M. King");
-MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
-MODULE_LICENSE ("GPL");
+MODULE_AUTHOR("Russell M. King");
+MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
+MODULE_LICENSE("GPL");
index fcb929e..31d0548 100644 (file)
@@ -484,12 +484,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
 }
 /************************** sysfs end ************************/
 
-static void intel_pstate_hwp_enable(void)
+static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
-       hwp_active++;
        pr_info("intel_pstate: HWP enabled\n");
 
-       wrmsrl( MSR_PM_ENABLE, 0x1);
+       wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
 static int byt_get_min_pstate(void)
@@ -522,7 +521,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        int32_t vid_fp;
        u32 vid;
 
-       val = pstate << 8;
+       val = (u64)pstate << 8;
        if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
@@ -611,7 +610,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
 {
        u64 val;
 
-       val = pstate << 8;
+       val = (u64)pstate << 8;
        if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
@@ -909,6 +908,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x4c, byt_params),
        ICPU(0x4e, core_params),
        ICPU(0x4f, core_params),
+       ICPU(0x5e, core_params),
        ICPU(0x56, core_params),
        ICPU(0x57, knl_params),
        {}
@@ -933,6 +933,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        cpu->cpu = cpunum;
+
+       if (hwp_active)
+               intel_pstate_hwp_enable(cpu);
+
        intel_pstate_get_cpu_pstates(cpu);
 
        init_timer_deferrable(&cpu->timer);
@@ -1170,6 +1174,10 @@ static struct hw_vendor_info vendor_info[] = {
        {1, "ORACLE", "X4270M3 ", PPC},
        {1, "ORACLE", "X4270M2 ", PPC},
        {1, "ORACLE", "X4170M2 ", PPC},
+       {1, "ORACLE", "X4170 M3", PPC},
+       {1, "ORACLE", "X4275 M3", PPC},
+       {1, "ORACLE", "X6-2    ", PPC},
+       {1, "ORACLE", "Sudbury ", PPC},
        {0, "", ""},
 };
 
@@ -1246,7 +1254,7 @@ static int __init intel_pstate_init(void)
                return -ENOMEM;
 
        if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
-               intel_pstate_hwp_enable();
+               hwp_active++;
 
        if (!hwp_active && hwp_only)
                goto out;
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
new file mode 100644 (file)
index 0000000..49caed2
--- /dev/null
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define MIN_VOLT_SHIFT         (100000)
+#define MAX_VOLT_SHIFT         (200000)
+#define MAX_VOLT_LIMIT         (1150000)
+#define VOLT_TOL               (10000)
+
+/*
+ * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
+ * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
+ * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
+ * voltage inputs need to be controlled under a hardware limitation:
+ * 100mV < Vsram - Vproc < 200mV
+ *
+ * When scaling the clock frequency of a CPU clock domain, the clock source
+ * needs to be switched to another stable PLL clock temporarily until
+ * the original PLL becomes stable at target frequency.
+ */
+struct mtk_cpu_dvfs_info {
+       struct device *cpu_dev;
+       struct regulator *proc_reg;
+       struct regulator *sram_reg;
+       struct clk *cpu_clk;
+       struct clk *inter_clk;
+       struct thermal_cooling_device *cdev;
+       int intermediate_voltage;
+       bool need_voltage_tracking;
+};
+
+static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
+                                       int new_vproc)
+{
+       struct regulator *proc_reg = info->proc_reg;
+       struct regulator *sram_reg = info->sram_reg;
+       int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
+
+       old_vproc = regulator_get_voltage(proc_reg);
+       old_vsram = regulator_get_voltage(sram_reg);
+       /* Vsram should not exceed the maximum allowed voltage of SoC. */
+       new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
+
+       if (old_vproc < new_vproc) {
+               /*
+                * When scaling up voltages, Vsram and Vproc scale up step
+                * by step. At each step, set Vsram to (Vproc + 200mV) first,
+                * then set Vproc to (Vsram - 100mV).
+                * Keep doing it until Vsram and Vproc hit target voltages.
+                */
+               do {
+                       old_vsram = regulator_get_voltage(sram_reg);
+                       old_vproc = regulator_get_voltage(proc_reg);
+
+                       vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
+
+                       if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
+                               vsram = MAX_VOLT_LIMIT;
+
+                               /*
+                                * If the target Vsram hits the maximum voltage,
+                                * try to set the exact voltage value first.
+                                */
+                               ret = regulator_set_voltage(sram_reg, vsram,
+                                                           vsram);
+                               if (ret)
+                                       ret = regulator_set_voltage(sram_reg,
+                                                       vsram - VOLT_TOL,
+                                                       vsram);
+
+                               vproc = new_vproc;
+                       } else {
+                               ret = regulator_set_voltage(sram_reg, vsram,
+                                                           vsram + VOLT_TOL);
+
+                               vproc = vsram - MIN_VOLT_SHIFT;
+                       }
+                       if (ret)
+                               return ret;
+
+                       ret = regulator_set_voltage(proc_reg, vproc,
+                                                   vproc + VOLT_TOL);
+                       if (ret) {
+                               regulator_set_voltage(sram_reg, old_vsram,
+                                                     old_vsram);
+                               return ret;
+                       }
+               } while (vproc < new_vproc || vsram < new_vsram);
+       } else if (old_vproc > new_vproc) {
+               /*
+                * When scaling down voltages, Vsram and Vproc scale down step
+                * by step. At each step, set Vproc to (Vsram - 200mV) first,
+                * then set Vproc to (Vproc + 100mV).
+                * Keep doing it until Vsram and Vproc hit target voltages.
+                */
+               do {
+                       old_vproc = regulator_get_voltage(proc_reg);
+                       old_vsram = regulator_get_voltage(sram_reg);
+
+                       vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
+                       ret = regulator_set_voltage(proc_reg, vproc,
+                                                   vproc + VOLT_TOL);
+                       if (ret)
+                               return ret;
+
+                       if (vproc == new_vproc)
+                               vsram = new_vsram;
+                       else
+                               vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
+
+                       if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
+                               vsram = MAX_VOLT_LIMIT;
+
+                               /*
+                                * If the target Vsram hits the maximum voltage,
+                                * try to set the exact voltage value first.
+                                */
+                               ret = regulator_set_voltage(sram_reg, vsram,
+                                                           vsram);
+                               if (ret)
+                                       ret = regulator_set_voltage(sram_reg,
+                                                       vsram - VOLT_TOL,
+                                                       vsram);
+                       } else {
+                               ret = regulator_set_voltage(sram_reg, vsram,
+                                                           vsram + VOLT_TOL);
+                       }
+
+                       if (ret) {
+                               regulator_set_voltage(proc_reg, old_vproc,
+                                                     old_vproc);
+                               return ret;
+                       }
+               } while (vproc > new_vproc + VOLT_TOL ||
+                        vsram > new_vsram + VOLT_TOL);
+       }
+
+       return 0;
+}
+
+static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
+{
+       if (info->need_voltage_tracking)
+               return mtk_cpufreq_voltage_tracking(info, vproc);
+       else
+               return regulator_set_voltage(info->proc_reg, vproc,
+                                            vproc + VOLT_TOL);
+}
+
+static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
+                                 unsigned int index)
+{
+       struct cpufreq_frequency_table *freq_table = policy->freq_table;
+       struct clk *cpu_clk = policy->clk;
+       struct clk *armpll = clk_get_parent(cpu_clk);
+       struct mtk_cpu_dvfs_info *info = policy->driver_data;
+       struct device *cpu_dev = info->cpu_dev;
+       struct dev_pm_opp *opp;
+       long freq_hz, old_freq_hz;
+       int vproc, old_vproc, inter_vproc, target_vproc, ret;
+
+       inter_vproc = info->intermediate_voltage;
+
+       old_freq_hz = clk_get_rate(cpu_clk);
+       old_vproc = regulator_get_voltage(info->proc_reg);
+
+       freq_hz = freq_table[index].frequency * 1000;
+
+       rcu_read_lock();
+       opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+       if (IS_ERR(opp)) {
+               rcu_read_unlock();
+               pr_err("cpu%d: failed to find OPP for %ld\n",
+                      policy->cpu, freq_hz);
+               return PTR_ERR(opp);
+       }
+       vproc = dev_pm_opp_get_voltage(opp);
+       rcu_read_unlock();
+
+       /*
+        * If the new voltage or the intermediate voltage is higher than the
+        * current voltage, scale up voltage first.
+        */
+       target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
+       if (old_vproc < target_vproc) {
+               ret = mtk_cpufreq_set_voltage(info, target_vproc);
+               if (ret) {
+                       pr_err("cpu%d: failed to scale up voltage!\n",
+                              policy->cpu);
+                       mtk_cpufreq_set_voltage(info, old_vproc);
+                       return ret;
+               }
+       }
+
+       /* Reparent the CPU clock to intermediate clock. */
+       ret = clk_set_parent(cpu_clk, info->inter_clk);
+       if (ret) {
+               pr_err("cpu%d: failed to re-parent cpu clock!\n",
+                      policy->cpu);
+               mtk_cpufreq_set_voltage(info, old_vproc);
+               WARN_ON(1);
+               return ret;
+       }
+
+       /* Set the original PLL to target rate. */
+       ret = clk_set_rate(armpll, freq_hz);
+       if (ret) {
+               pr_err("cpu%d: failed to scale cpu clock rate!\n",
+                      policy->cpu);
+               clk_set_parent(cpu_clk, armpll);
+               mtk_cpufreq_set_voltage(info, old_vproc);
+               return ret;
+       }
+
+       /* Set parent of CPU clock back to the original PLL. */
+       ret = clk_set_parent(cpu_clk, armpll);
+       if (ret) {
+               pr_err("cpu%d: failed to re-parent cpu clock!\n",
+                      policy->cpu);
+               mtk_cpufreq_set_voltage(info, inter_vproc);
+               WARN_ON(1);
+               return ret;
+       }
+
+       /*
+        * If the new voltage is lower than the intermediate voltage or the
+        * original voltage, scale down to the new voltage.
+        */
+       if (vproc < inter_vproc || vproc < old_vproc) {
+               ret = mtk_cpufreq_set_voltage(info, vproc);
+               if (ret) {
+                       pr_err("cpu%d: failed to scale down voltage!\n",
+                              policy->cpu);
+                       clk_set_parent(cpu_clk, info->inter_clk);
+                       clk_set_rate(armpll, old_freq_hz);
+                       clk_set_parent(cpu_clk, armpll);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
+{
+       struct mtk_cpu_dvfs_info *info = policy->driver_data;
+       struct device_node *np = of_node_get(info->cpu_dev->of_node);
+
+       if (WARN_ON(!np))
+               return;
+
+       if (of_find_property(np, "#cooling-cells", NULL)) {
+               info->cdev = of_cpufreq_cooling_register(np,
+                                                        policy->related_cpus);
+
+               if (IS_ERR(info->cdev)) {
+                       dev_err(info->cpu_dev,
+                               "running cpufreq without cooling device: %ld\n",
+                               PTR_ERR(info->cdev));
+
+                       info->cdev = NULL;
+               }
+       }
+
+       of_node_put(np);
+}
+
+static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+{
+       struct device *cpu_dev;
+       struct regulator *proc_reg = ERR_PTR(-ENODEV);
+       struct regulator *sram_reg = ERR_PTR(-ENODEV);
+       struct clk *cpu_clk = ERR_PTR(-ENODEV);
+       struct clk *inter_clk = ERR_PTR(-ENODEV);
+       struct dev_pm_opp *opp;
+       unsigned long rate;
+       int ret;
+
+       cpu_dev = get_cpu_device(cpu);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu%d device\n", cpu);
+               return -ENODEV;
+       }
+
+       cpu_clk = clk_get(cpu_dev, "cpu");
+       if (IS_ERR(cpu_clk)) {
+               if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
+                       pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
+               else
+                       pr_err("failed to get cpu clk for cpu%d\n", cpu);
+
+               ret = PTR_ERR(cpu_clk);
+               return ret;
+       }
+
+       inter_clk = clk_get(cpu_dev, "intermediate");
+       if (IS_ERR(inter_clk)) {
+               if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
+                       pr_warn("intermediate clk for cpu%d not ready, retry.\n",
+                               cpu);
+               else
+                       pr_err("failed to get intermediate clk for cpu%d\n",
+                              cpu);
+
+               ret = PTR_ERR(inter_clk);
+               goto out_free_resources;
+       }
+
+       proc_reg = regulator_get_exclusive(cpu_dev, "proc");
+       if (IS_ERR(proc_reg)) {
+               if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
+                       pr_warn("proc regulator for cpu%d not ready, retry.\n",
+                               cpu);
+               else
+                       pr_err("failed to get proc regulator for cpu%d\n",
+                              cpu);
+
+               ret = PTR_ERR(proc_reg);
+               goto out_free_resources;
+       }
+
+       /* Both presence and absence of sram regulator are valid cases. */
+       sram_reg = regulator_get_exclusive(cpu_dev, "sram");
+
+       ret = of_init_opp_table(cpu_dev);
+       if (ret) {
+               pr_warn("no OPP table for cpu%d\n", cpu);
+               goto out_free_resources;
+       }
+
+       /* Search a safe voltage for intermediate frequency. */
+       rate = clk_get_rate(inter_clk);
+       rcu_read_lock();
+       opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+       if (IS_ERR(opp)) {
+               rcu_read_unlock();
+               pr_err("failed to get intermediate opp for cpu%d\n", cpu);
+               ret = PTR_ERR(opp);
+               goto out_free_opp_table;
+       }
+       info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
+       rcu_read_unlock();
+
+       info->cpu_dev = cpu_dev;
+       info->proc_reg = proc_reg;
+       info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
+       info->cpu_clk = cpu_clk;
+       info->inter_clk = inter_clk;
+
+       /*
+        * If SRAM regulator is present, software "voltage tracking" is needed
+        * for this CPU power domain.
+        */
+       info->need_voltage_tracking = !IS_ERR(sram_reg);
+
+       return 0;
+
+out_free_opp_table:
+       of_free_opp_table(cpu_dev);
+
+out_free_resources:
+       if (!IS_ERR(proc_reg))
+               regulator_put(proc_reg);
+       if (!IS_ERR(sram_reg))
+               regulator_put(sram_reg);
+       if (!IS_ERR(cpu_clk))
+               clk_put(cpu_clk);
+       if (!IS_ERR(inter_clk))
+               clk_put(inter_clk);
+
+       return ret;
+}
+
+static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
+{
+       if (!IS_ERR(info->proc_reg))
+               regulator_put(info->proc_reg);
+       if (!IS_ERR(info->sram_reg))
+               regulator_put(info->sram_reg);
+       if (!IS_ERR(info->cpu_clk))
+               clk_put(info->cpu_clk);
+       if (!IS_ERR(info->inter_clk))
+               clk_put(info->inter_clk);
+
+       of_free_opp_table(info->cpu_dev);
+}
+
+static int mtk_cpufreq_init(struct cpufreq_policy *policy)
+{
+       struct mtk_cpu_dvfs_info *info;
+       struct cpufreq_frequency_table *freq_table;
+       int ret;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       ret = mtk_cpu_dvfs_info_init(info, policy->cpu);
+       if (ret) {
+               pr_err("%s failed to initialize dvfs info for cpu%d\n",
+                      __func__, policy->cpu);
+               goto out_free_dvfs_info;
+       }
+
+       ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
+       if (ret) {
+               pr_err("failed to init cpufreq table for cpu%d: %d\n",
+                      policy->cpu, ret);
+               goto out_release_dvfs_info;
+       }
+
+       ret = cpufreq_table_validate_and_show(policy, freq_table);
+       if (ret) {
+               pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+               goto out_free_cpufreq_table;
+       }
+
+       /* CPUs in the same cluster share a clock and power domain. */
+       cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling);
+       policy->driver_data = info;
+       policy->clk = info->cpu_clk;
+
+       return 0;
+
+out_free_cpufreq_table:
+       dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
+
+out_release_dvfs_info:
+       mtk_cpu_dvfs_info_release(info);
+
+out_free_dvfs_info:
+       kfree(info);
+
+       return ret;
+}
+
+static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       struct mtk_cpu_dvfs_info *info = policy->driver_data;
+
+       cpufreq_cooling_unregister(info->cdev);
+       dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
+       mtk_cpu_dvfs_info_release(info);
+       kfree(info);
+
+       return 0;
+}
+
+static struct cpufreq_driver mt8173_cpufreq_driver = {
+       .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+       .verify = cpufreq_generic_frequency_table_verify,
+       .target_index = mtk_cpufreq_set_target,
+       .get = cpufreq_generic_get,
+       .init = mtk_cpufreq_init,
+       .exit = mtk_cpufreq_exit,
+       .ready = mtk_cpufreq_ready,
+       .name = "mtk-cpufreq",
+       .attr = cpufreq_generic_attr,
+};
+
+static int mt8173_cpufreq_probe(struct platform_device *pdev)
+{
+       int ret;
+
+       ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
+       if (ret)
+               pr_err("failed to register mtk cpufreq driver\n");
+
+       return ret;
+}
+
+static struct platform_driver mt8173_cpufreq_platdrv = {
+       .driver = {
+               .name   = "mt8173-cpufreq",
+       },
+       .probe          = mt8173_cpufreq_probe,
+};
+
+static int mt8173_cpufreq_driver_init(void)
+{
+       struct platform_device *pdev;
+       int err;
+
+       if (!of_machine_is_compatible("mediatek,mt8173"))
+               return -ENODEV;
+
+       err = platform_driver_register(&mt8173_cpufreq_platdrv);
+       if (err)
+               return err;
+
+       /*
+        * Since there's no place to hold device registration code and no
+        * device tree based way to match cpufreq driver yet, both the driver
+        * and the device registration codes are put here to handle defer
+        * probing.
+        */
+       pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               pr_err("failed to register mtk-cpufreq platform device\n");
+               return PTR_ERR(pdev);
+       }
+
+       return 0;
+}
+device_initcall(mt8173_cpufreq_driver_init);
index 37c5742..c1ae199 100644 (file)
@@ -421,7 +421,7 @@ static int powernow_acpi_init(void)
        return 0;
 
 err2:
-       acpi_processor_unregister_performance(acpi_processor_perf, 0);
+       acpi_processor_unregister_performance(0);
 err1:
        free_cpumask_var(acpi_processor_perf->shared_cpu_map);
 err05:
@@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
 {
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
        if (acpi_processor_perf) {
-               acpi_processor_unregister_performance(acpi_processor_perf, 0);
+               acpi_processor_unregister_performance(0);
                free_cpumask_var(acpi_processor_perf->shared_cpu_map);
                kfree(acpi_processor_perf);
        }
index 5c035d0..0b5bf13 100644 (file)
@@ -795,7 +795,7 @@ err_out_mem:
        kfree(powernow_table);
 
 err_out:
-       acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+       acpi_processor_unregister_performance(data->cpu);
 
        /* data->acpi_data.state_count informs us at ->exit()
         * whether ACPI was used */
@@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
 {
        if (data->acpi_data.state_count)
-               acpi_processor_unregister_performance(&data->acpi_data,
-                               data->cpu);
+               acpi_processor_unregister_performance(data->cpu);
        free_cpumask_var(data->acpi_data.shared_cpu_map);
 }
 
index ebef0d8..64994e1 100644 (file)
 #include <linux/smp.h>
 #include <linux/of.h>
 #include <linux/reboot.h>
+#include <linux/slab.h>
 
 #include <asm/cputhreads.h>
 #include <asm/firmware.h>
 #include <asm/reg.h>
 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
+#include <asm/opal.h>
 
 #define POWERNV_MAX_PSTATES    256
 #define PMSR_PSAFE_ENABLE      (1UL << 30)
 #define PMSR_SPR_EM_DISABLE    (1UL << 31)
 #define PMSR_MAX(x)            ((x >> 32) & 0xFF)
-#define PMSR_LP(x)             ((x >> 48) & 0xFF)
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
-static bool rebooting, throttled;
+static bool rebooting, throttled, occ_reset;
+
+static struct chip {
+       unsigned int id;
+       bool throttled;
+       cpumask_t mask;
+       struct work_struct throttle;
+       bool restore;
+} *chips;
+
+static int nr_chips;
 
 /*
  * Note: The set of pstates consists of contiguous integers, the
@@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void)
        return powernv_pstate_info.max - powernv_pstate_info.nominal;
 }
 
-static void powernv_cpufreq_throttle_check(unsigned int cpu)
+static void powernv_cpufreq_throttle_check(void *data)
 {
+       unsigned int cpu = smp_processor_id();
        unsigned long pmsr;
-       int pmsr_pmax, pmsr_lp;
+       int pmsr_pmax, i;
 
        pmsr = get_pmspr(SPRN_PMSR);
 
+       for (i = 0; i < nr_chips; i++)
+               if (chips[i].id == cpu_to_chip_id(cpu))
+                       break;
+
        /* Check for Pmax Capping */
        pmsr_pmax = (s8)PMSR_MAX(pmsr);
        if (pmsr_pmax != powernv_pstate_info.max) {
-               throttled = true;
-               pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax);
-               pr_info("Max allowed Pstate is capped\n");
+               if (chips[i].throttled)
+                       goto next;
+               chips[i].throttled = true;
+               pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
+                       chips[i].id, pmsr_pmax);
+       } else if (chips[i].throttled) {
+               chips[i].throttled = false;
+               pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
+                       chips[i].id, pmsr_pmax);
        }
 
-       /*
-        * Check for Psafe by reading LocalPstate
-        * or check if Psafe_mode_active is set in PMSR.
-        */
-       pmsr_lp = (s8)PMSR_LP(pmsr);
-       if ((pmsr_lp < powernv_pstate_info.min) ||
-                               (pmsr & PMSR_PSAFE_ENABLE)) {
+       /* Check if Psafe_mode_active is set in PMSR. */
+next:
+       if (pmsr & PMSR_PSAFE_ENABLE) {
                throttled = true;
                pr_info("Pstate set to safe frequency\n");
        }
@@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
                return 0;
 
        if (!throttled)
-               powernv_cpufreq_throttle_check(smp_processor_id());
+               powernv_cpufreq_throttle_check(NULL);
 
        freq_data.pstate_id = powernv_freqs[new_index].driver_data;
 
@@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
        .notifier_call = powernv_cpufreq_reboot_notifier,
 };
 
+void powernv_cpufreq_work_fn(struct work_struct *work)
+{
+       struct chip *chip = container_of(work, struct chip, throttle);
+       unsigned int cpu;
+       cpumask_var_t mask;
+
+       smp_call_function_any(&chip->mask,
+                             powernv_cpufreq_throttle_check, NULL, 0);
+
+       if (!chip->restore)
+               return;
+
+       chip->restore = false;
+       cpumask_copy(mask, &chip->mask);
+       for_each_cpu_and(cpu, mask, cpu_online_mask) {
+               int index, tcpu;
+               struct cpufreq_policy policy;
+
+               cpufreq_get_policy(&policy, cpu);
+               cpufreq_frequency_table_target(&policy, policy.freq_table,
+                                              policy.cur,
+                                              CPUFREQ_RELATION_C, &index);
+               powernv_cpufreq_target_index(&policy, index);
+               for_each_cpu(tcpu, policy.cpus)
+                       cpumask_clear_cpu(tcpu, mask);
+       }
+}
+
+static char throttle_reason[][30] = {
+                                       "No throttling",
+                                       "Power Cap",
+                                       "Processor Over Temperature",
+                                       "Power Supply Failure",
+                                       "Over Current",
+                                       "OCC Reset"
+                                    };
+
+static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
+                                  unsigned long msg_type, void *_msg)
+{
+       struct opal_msg *msg = _msg;
+       struct opal_occ_msg omsg;
+       int i;
+
+       if (msg_type != OPAL_MSG_OCC)
+               return 0;
+
+       omsg.type = be64_to_cpu(msg->params[0]);
+
+       switch (omsg.type) {
+       case OCC_RESET:
+               occ_reset = true;
+               pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
+               /*
+                * powernv_cpufreq_throttle_check() is called in
+                * target() callback which can detect the throttle state
+                * for governors like ondemand.
+                * But static governors will not call target() often thus
+                * report throttling here.
+                */
+               if (!throttled) {
+                       throttled = true;
+                       pr_crit("CPU frequency is throttled for duration\n");
+               }
+
+               break;
+       case OCC_LOAD:
+               pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
+               break;
+       case OCC_THROTTLE:
+               omsg.chip = be64_to_cpu(msg->params[1]);
+               omsg.throttle_status = be64_to_cpu(msg->params[2]);
+
+               if (occ_reset) {
+                       occ_reset = false;
+                       throttled = false;
+                       pr_info("OCC Active, CPU frequency is no longer throttled\n");
+
+                       for (i = 0; i < nr_chips; i++) {
+                               chips[i].restore = true;
+                               schedule_work(&chips[i].throttle);
+                       }
+
+                       return 0;
+               }
+
+               if (omsg.throttle_status &&
+                   omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
+                       pr_info("OCC: Chip %u Pmax reduced due to %s\n",
+                               (unsigned int)omsg.chip,
+                               throttle_reason[omsg.throttle_status]);
+               else if (!omsg.throttle_status)
+                       pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
+                               throttle_reason[omsg.throttle_status]);
+               else
+                       return 0;
+
+               for (i = 0; i < nr_chips; i++)
+                       if (chips[i].id == omsg.chip) {
+                               if (!omsg.throttle_status)
+                                       chips[i].restore = true;
+                               schedule_work(&chips[i].throttle);
+                       }
+       }
+       return 0;
+}
+
+static struct notifier_block powernv_cpufreq_opal_nb = {
+       .notifier_call  = powernv_cpufreq_occ_msg,
+       .next           = NULL,
+       .priority       = 0,
+};
+
 static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 {
        struct powernv_smp_call_data freq_data;
@@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
        .attr           = powernv_cpu_freq_attr,
 };
 
+static int init_chip_info(void)
+{
+       unsigned int chip[256];
+       unsigned int cpu, i;
+       unsigned int prev_chip_id = UINT_MAX;
+
+       for_each_possible_cpu(cpu) {
+               unsigned int id = cpu_to_chip_id(cpu);
+
+               if (prev_chip_id != id) {
+                       prev_chip_id = id;
+                       chip[nr_chips++] = id;
+               }
+       }
+
+       chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+       if (!chips)
+               return -ENOMEM;
+
+       for (i = 0; i < nr_chips; i++) {
+               chips[i].id = chip[i];
+               chips[i].throttled = false;
+               cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
+               INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
+               chips[i].restore = false;
+       }
+
+       return 0;
+}
+
 static int __init powernv_cpufreq_init(void)
 {
        int rc = 0;
@@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void)
                return rc;
        }
 
+       /* Populate chip info */
+       rc = init_chip_info();
+       if (rc)
+               return rc;
+
        register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+       opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
        return cpufreq_register_driver(&powernv_cpufreq_driver);
 }
 module_init(powernv_cpufreq_init);
@@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init);
 static void __exit powernv_cpufreq_exit(void)
 {
        unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+       opal_message_notifier_unregister(OPAL_MSG_OCC,
+                                        &powernv_cpufreq_opal_nb);
        cpufreq_unregister_driver(&powernv_cpufreq_driver);
 }
 module_exit(powernv_cpufreq_exit);
index d29e8da..7969f76 100644 (file)
@@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb,
        struct cpufreq_frequency_table *cbe_freqs;
        u8 node;
 
-       /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
-        * and CPUFREQ_NOTIFY policy events?)
+       /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
+        * policy events?)
         */
        if (event == CPUFREQ_START)
                return 0;
index ffa3389..992ce6f 100644 (file)
@@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table)
        pentry = (struct sfi_freq_table_entry *)sb->pentry;
        totallen = num_freq_table_entries * sizeof(*pentry);
 
-       sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
+       sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
        if (!sfi_cpufreq_array)
                return -ENOMEM;
 
-       memcpy(sfi_cpufreq_array, pentry, totallen);
-
        return 0;
 }
 
index 4ab7a21..15d3214 100644 (file)
@@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
        unsigned int prev_speed;
        unsigned int ret = 0;
        unsigned long flags;
-       struct timeval tv1, tv2;
+       ktime_t tv1, tv2;
 
        if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
                return -EINVAL;
@@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
 
        /* start latency measurement */
        if (transition_latency)
-               do_gettimeofday(&tv1);
+               tv1 = ktime_get();
 
        /* switch to high state */
        set_state(SPEEDSTEP_HIGH);
 
        /* end latency measurement */
        if (transition_latency)
-               do_gettimeofday(&tv2);
+               tv2 = ktime_get();
 
        *high_speed = speedstep_get_frequency(processor);
        if (!*high_speed) {
@@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
                set_state(SPEEDSTEP_LOW);
 
        if (transition_latency) {
-               *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
-                       tv2.tv_usec - tv1.tv_usec;
+               *transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
                pr_debug("transition latency is %u uSec\n", *transition_latency);
 
                /* convert uSec to nSec and add 20% for safety reasons */
index 7d99d13..f9901f5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
  *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
  * Author : Chanwoo Choi <cw00.choi@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -82,6 +82,15 @@ struct __exynos_ppmu_events {
        PPMU_EVENT(mscl),
        PPMU_EVENT(fimd0x),
        PPMU_EVENT(fimd1x),
+
+       /* Only for Exynos5433 SoCs */
+       PPMU_EVENT(d0-cpu),
+       PPMU_EVENT(d0-general),
+       PPMU_EVENT(d0-rt),
+       PPMU_EVENT(d1-cpu),
+       PPMU_EVENT(d1-general),
+       PPMU_EVENT(d1-rt),
+
        { /* sentinel */ },
 };
 
@@ -96,6 +105,9 @@ static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
        return -EINVAL;
 }
 
+/*
+ * The devfreq-event ops structure for PPMU v1.1
+ */
 static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
 {
        struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
@@ -200,10 +212,158 @@ static const struct devfreq_event_ops exynos_ppmu_ops = {
        .get_event = exynos_ppmu_get_event,
 };
 
+/*
+ * The devfreq-event ops structure for PPMU v2.0
+ */
+static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
+{
+       struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       u32 pmnc, clear;
+
+       /* Disable all counters */
+       clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
+               | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+
+       __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
+       __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
+       __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
+       __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
+
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
+       __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
+
+       /* Disable PPMU */
+       pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+       pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+       __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+       return 0;
+}
+
+static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
+{
+       struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       int id = exynos_ppmu_find_ppmu_id(edev);
+       u32 pmnc, cntens;
+
+       /* Enable all counters */
+       cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
+       cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+       __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
+
+       /* Set the event of Read/Write data count  */
+       switch (id) {
+       case PPMU_PMNCNT0:
+       case PPMU_PMNCNT1:
+       case PPMU_PMNCNT2:
+               __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
+                               info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+               break;
+       case PPMU_PMNCNT3:
+               __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
+                               info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+               break;
+       }
+
+       /* Reset cycle counter/performance counter and enable PPMU */
+       pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+       pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+                       | PPMU_PMNC_COUNTER_RESET_MASK
+                       | PPMU_PMNC_CC_RESET_MASK
+                       | PPMU_PMNC_CC_DIVIDER_MASK
+                       | PPMU_V2_PMNC_START_MODE_MASK);
+       pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+       pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+       pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+       pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
+       __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+       return 0;
+}
+
+static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
+                                   struct devfreq_event_data *edata)
+{
+       struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       int id = exynos_ppmu_find_ppmu_id(edev);
+       u32 pmnc, cntenc;
+       u32 pmcnt_high, pmcnt_low;
+       u64 load_count = 0;
+
+       /* Disable PPMU */
+       pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+       pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+       __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+       /* Read cycle count and performance count */
+       edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
+
+       switch (id) {
+       case PPMU_PMNCNT0:
+       case PPMU_PMNCNT1:
+       case PPMU_PMNCNT2:
+               load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
+               break;
+       case PPMU_PMNCNT3:
+               pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
+               pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
+               load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
+               break;
+       }
+       edata->load_count = load_count;
+
+       /* Disable all counters */
+       cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
+       cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+       __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
+
+       dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
+                                       edata->load_count, edata->total_count);
+       return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
+       .disable = exynos_ppmu_v2_disable,
+       .set_event = exynos_ppmu_v2_set_event,
+       .get_event = exynos_ppmu_v2_get_event,
+};
+
+static const struct of_device_id exynos_ppmu_id_match[] = {
+       {
+               .compatible = "samsung,exynos-ppmu",
+               .data = (void *)&exynos_ppmu_ops,
+       }, {
+               .compatible = "samsung,exynos-ppmu-v2",
+               .data = (void *)&exynos_ppmu_v2_ops,
+       },
+       { /* sentinel */ },
+};
+
+static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
+{
+       const struct of_device_id *match;
+
+       match = of_match_node(exynos_ppmu_id_match, np);
+       return (struct devfreq_event_ops *)match->data;
+}
+
 static int of_get_devfreq_events(struct device_node *np,
                                 struct exynos_ppmu *info)
 {
        struct devfreq_event_desc *desc;
+       struct devfreq_event_ops *event_ops;
        struct device *dev = info->dev;
        struct device_node *events_np, *node;
        int i, j, count;
@@ -214,6 +374,7 @@ static int of_get_devfreq_events(struct device_node *np,
                        "failed to get child node of devfreq-event devices\n");
                return -EINVAL;
        }
+       event_ops = exynos_bus_get_ops(np);
 
        count = of_get_child_count(events_np);
        desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
@@ -238,7 +399,7 @@ static int of_get_devfreq_events(struct device_node *np,
                        continue;
                }
 
-               desc[j].ops = &exynos_ppmu_ops;
+               desc[j].ops = event_ops;
                desc[j].driver_data = info;
 
                of_property_read_string(node, "event-name", &desc[j].name);
@@ -354,11 +515,6 @@ static int exynos_ppmu_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id exynos_ppmu_id_match[] = {
-       { .compatible = "samsung,exynos-ppmu", },
-       { /* sentinel */ },
-};
-
 static struct platform_driver exynos_ppmu_driver = {
        .probe  = exynos_ppmu_probe,
        .remove = exynos_ppmu_remove,
index 4e831d4..05774c4 100644 (file)
@@ -26,6 +26,9 @@ enum ppmu_counter {
        PPMU_PMNCNT_MAX,
 };
 
+/***
+ * PPMUv1.1 Definitions
+ */
 enum ppmu_event_type {
        PPMU_RO_BUSY_CYCLE_CNT  = 0x0,
        PPMU_WO_BUSY_CYCLE_CNT  = 0x1,
@@ -90,4 +93,71 @@ enum ppmu_reg {
 #define PPMU_PMNCT(x)                  (PPMU_PMCNT0 + (0x10 * x))
 #define PPMU_BEVTxSEL(x)               (PPMU_BEVT0SEL + (0x100 * x))
 
+/***
+ * PPMU_V2.0 definitions
+ */
+enum ppmu_v2_mode {
+       PPMU_V2_MODE_MANUAL = 0,
+       PPMU_V2_MODE_AUTO = 1,
+       PPMU_V2_MODE_CIG = 2,   /* CIG (Conditional Interrupt Generation) */
+};
+
+enum ppmu_v2_event_type {
+       PPMU_V2_RO_DATA_CNT     = 0x4,
+       PPMU_V2_WO_DATA_CNT     = 0x5,
+
+       PPMU_V2_EVT3_RW_DATA_CNT = 0x22,        /* Only for Event3 */
+};
+
+enum ppmu_V2_reg {
+       /* PPC control register */
+       PPMU_V2_PMNC            = 0x04,
+       PPMU_V2_CNTENS          = 0x08,
+       PPMU_V2_CNTENC          = 0x0c,
+       PPMU_V2_INTENS          = 0x10,
+       PPMU_V2_INTENC          = 0x14,
+       PPMU_V2_FLAG            = 0x18,
+
+       /* Cycle Counter and Performance Event Counter Register */
+       PPMU_V2_CCNT            = 0x48,
+       PPMU_V2_PMCNT0          = 0x34,
+       PPMU_V2_PMCNT1          = 0x38,
+       PPMU_V2_PMCNT2          = 0x3c,
+       PPMU_V2_PMCNT3_LOW      = 0x40,
+       PPMU_V2_PMCNT3_HIGH     = 0x44,
+
+       /* Bus Event Generator */
+       PPMU_V2_CIG_CFG0                = 0x1c,
+       PPMU_V2_CIG_CFG1                = 0x20,
+       PPMU_V2_CIG_CFG2                = 0x24,
+       PPMU_V2_CIG_RESULT      = 0x28,
+       PPMU_V2_CNT_RESET       = 0x2c,
+       PPMU_V2_CNT_AUTO                = 0x30,
+       PPMU_V2_CH_EV0_TYPE     = 0x200,
+       PPMU_V2_CH_EV1_TYPE     = 0x204,
+       PPMU_V2_CH_EV2_TYPE     = 0x208,
+       PPMU_V2_CH_EV3_TYPE     = 0x20c,
+       PPMU_V2_SM_ID_V         = 0x220,
+       PPMU_V2_SM_ID_A         = 0x224,
+       PPMU_V2_SM_OTHERS_V     = 0x228,
+       PPMU_V2_SM_OTHERS_A     = 0x22c,
+       PPMU_V2_INTERRUPT_RESET = 0x260,
+};
+
+/* PMNC register */
+#define PPMU_V2_PMNC_START_MODE_SHIFT  20
+#define PPMU_V2_PMNC_START_MODE_MASK   (0x3 << PPMU_V2_PMNC_START_MODE_SHIFT)
+
+#define PPMU_PMNC_CC_RESET_SHIFT       2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT  1
+#define PPMU_PMNC_ENABLE_SHIFT         0
+#define PPMU_PMNC_START_MODE_MASK      BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK      BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK                BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK   BIT(1)
+#define PPMU_PMNC_ENABLE_MASK          BIT(0)
+
+#define PPMU_V2_PMNCT(x)               (PPMU_V2_PMCNT0 + (0x4 * x))
+#define PPMU_V2_CH_EVx_TYPE(x)         (PPMU_V2_CH_EV0_TYPE + (0x4 * x))
+
 #endif /* __EXYNOS_PPMU_H__ */
index 88d474b..bdbbe5b 100644 (file)
@@ -85,6 +85,14 @@ config INTEL_IOP_ADMA
        help
          Enable support for the Intel(R) IOP Series RAID engines.
 
+config IDMA64
+       tristate "Intel integrated DMA 64-bit support"
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable DMA support for Intel Low Power Subsystem such as found on
+         Intel Skylake PCH.
+
 source "drivers/dma/dw/Kconfig"
 
 config AT_HDMAC
index 6a4d6f2..56ff8c7 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
 obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_IDMA64) += idma64.o
 obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
new file mode 100644 (file)
index 0000000..18c14e1
--- /dev/null
@@ -0,0 +1,710 @@
+/*
+ * Core driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "idma64.h"
+
+/* Platform driver name */
+#define DRV_NAME               "idma64"
+
+/* For now we support only two channels */
+#define IDMA64_NR_CHAN         2
+
+/* ---------------------------------------------------------------------- */
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+       return &chan->dev->device;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_off(struct idma64 *idma64)
+{
+       unsigned short count = 100;
+
+       dma_writel(idma64, CFG, 0);
+
+       channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+       channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
+       channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
+       channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
+       channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+       do {
+               cpu_relax();
+       } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
+}
+
+static void idma64_on(struct idma64 *idma64)
+{
+       dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+       u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
+       u32 cfglo = 0;
+
+       /* Enforce FIFO drain when channel is suspended */
+       cfglo |= IDMA64C_CFGL_CH_DRAIN;
+
+       /* Set default burst alignment */
+       cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
+
+       channel_writel(idma64c, CFG_LO, cfglo);
+       channel_writel(idma64c, CFG_HI, cfghi);
+
+       /* Enable interrupts */
+       channel_set_bit(idma64, MASK(XFER), idma64c->mask);
+       channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
+
+       /*
+        * Enforce the controller to be turned on.
+        *
+        * The iDMA is turned off in ->probe() and looses context during system
+        * suspend / resume cycle. That's why we have to enable it each time we
+        * use it.
+        */
+       idma64_on(idma64);
+}
+
+static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+       channel_clear_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+       struct idma64_desc *desc = idma64c->desc;
+       struct idma64_hw_desc *hw = &desc->hw[0];
+
+       channel_writeq(idma64c, SAR, 0);
+       channel_writeq(idma64c, DAR, 0);
+
+       channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
+       channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
+
+       channel_writeq(idma64c, LLP, hw->llp);
+
+       channel_set_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_stop_transfer(struct idma64_chan *idma64c)
+{
+       struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+
+       idma64_chan_stop(idma64, idma64c);
+}
+
+static void idma64_start_transfer(struct idma64_chan *idma64c)
+{
+       struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+       struct virt_dma_desc *vdesc;
+
+       /* Get the next descriptor */
+       vdesc = vchan_next_desc(&idma64c->vchan);
+       if (!vdesc) {
+               idma64c->desc = NULL;
+               return;
+       }
+
+       list_del(&vdesc->node);
+       idma64c->desc = to_idma64_desc(vdesc);
+
+       /* Configure the channel */
+       idma64_chan_init(idma64, idma64c);
+
+       /* Start the channel with a new descriptor */
+       idma64_chan_start(idma64, idma64c);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
+               u32 status_err, u32 status_xfer)
+{
+       struct idma64_chan *idma64c = &idma64->chan[c];
+       struct idma64_desc *desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&idma64c->vchan.lock, flags);
+       desc = idma64c->desc;
+       if (desc) {
+               if (status_err & (1 << c)) {
+                       dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
+                       desc->status = DMA_ERROR;
+               } else if (status_xfer & (1 << c)) {
+                       dma_writel(idma64, CLEAR(XFER), idma64c->mask);
+                       desc->status = DMA_COMPLETE;
+                       vchan_cookie_complete(&desc->vdesc);
+                       idma64_start_transfer(idma64c);
+               }
+
+               /* idma64_start_transfer() updates idma64c->desc */
+               if (idma64c->desc == NULL || desc->status == DMA_ERROR)
+                       idma64_stop_transfer(idma64c);
+       }
+       spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static irqreturn_t idma64_irq(int irq, void *dev)
+{
+       struct idma64 *idma64 = dev;
+       u32 status = dma_readl(idma64, STATUS_INT);
+       u32 status_xfer;
+       u32 status_err;
+       unsigned short i;
+
+       dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+
+       /* Check if we have any interrupt from the DMA controller */
+       if (!status)
+               return IRQ_NONE;
+
+       /* Disable interrupts */
+       channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+       channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+       status_xfer = dma_readl(idma64, RAW(XFER));
+       status_err = dma_readl(idma64, RAW(ERROR));
+
+       for (i = 0; i < idma64->dma.chancnt; i++)
+               idma64_chan_irq(idma64, i, status_err, status_xfer);
+
+       /* Re-enable interrupts */
+       channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+       channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+       return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
+{
+       struct idma64_desc *desc;
+
+       desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+
+       desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
+       if (!desc->hw) {
+               kfree(desc);
+               return NULL;
+       }
+
+       return desc;
+}
+
+static void idma64_desc_free(struct idma64_chan *idma64c,
+               struct idma64_desc *desc)
+{
+       struct idma64_hw_desc *hw;
+
+       if (desc->ndesc) {
+               unsigned int i = desc->ndesc;
+
+               do {
+                       hw = &desc->hw[--i];
+                       dma_pool_free(idma64c->pool, hw->lli, hw->llp);
+               } while (i);
+       }
+
+       kfree(desc->hw);
+       kfree(desc);
+}
+
+static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
+
+       idma64_desc_free(idma64c, to_idma64_desc(vdesc));
+}
+
+static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
+               struct dma_slave_config *config,
+               enum dma_transfer_direction direction, u64 llp)
+{
+       struct idma64_lli *lli = hw->lli;
+       u64 sar, dar;
+       u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
+       u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
+       u32 src_width, dst_width;
+
+       if (direction == DMA_MEM_TO_DEV) {
+               sar = hw->phys;
+               dar = config->dst_addr;
+               ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
+                        IDMA64C_CTLL_FC_M2P;
+               src_width = min_t(u32, 2, __fls(sar | hw->len));
+               dst_width = __fls(config->dst_addr_width);
+       } else {        /* DMA_DEV_TO_MEM */
+               sar = config->src_addr;
+               dar = hw->phys;
+               ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
+                        IDMA64C_CTLL_FC_P2M;
+               src_width = __fls(config->src_addr_width);
+               dst_width = min_t(u32, 2, __fls(dar | hw->len));
+       }
+
+       lli->sar = sar;
+       lli->dar = dar;
+
+       lli->ctlhi = ctlhi;
+       lli->ctllo = ctllo |
+                    IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
+                    IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
+                    IDMA64C_CTLL_DST_WIDTH(dst_width) |
+                    IDMA64C_CTLL_SRC_WIDTH(src_width);
+
+       lli->llp = llp;
+       return hw->llp;
+}
+
+static void idma64_desc_fill(struct idma64_chan *idma64c,
+               struct idma64_desc *desc)
+{
+       struct dma_slave_config *config = &idma64c->config;
+       struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
+       struct idma64_lli *lli = hw->lli;
+       u64 llp = 0;
+       unsigned int i = desc->ndesc;
+
+       /* Fill the hardware descriptors and link them to a list */
+       do {
+               hw = &desc->hw[--i];
+               llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
+               desc->length += hw->len;
+       } while (i);
+
+       /* Trigger interrupt after last block */
+       lli->ctllo |= IDMA64C_CTLL_INT_EN;
+}
+
+static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_transfer_direction direction,
+               unsigned long flags, void *context)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+       struct idma64_desc *desc;
+       struct scatterlist *sg;
+       unsigned int i;
+
+       desc = idma64_alloc_desc(sg_len);
+       if (!desc)
+               return NULL;
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               struct idma64_hw_desc *hw = &desc->hw[i];
+
+               /* Allocate DMA capable memory for hardware descriptor */
+               hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
+               if (!hw->lli) {
+                       desc->ndesc = i;
+                       idma64_desc_free(idma64c, desc);
+                       return NULL;
+               }
+
+               hw->phys = sg_dma_address(sg);
+               hw->len = sg_dma_len(sg);
+       }
+
+       desc->ndesc = sg_len;
+       desc->direction = direction;
+       desc->status = DMA_IN_PROGRESS;
+
+       idma64_desc_fill(idma64c, desc);
+       return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
+}
+
+static void idma64_issue_pending(struct dma_chan *chan)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&idma64c->vchan.lock, flags);
+       if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
+               idma64_start_transfer(idma64c);
+       spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
+{
+       struct idma64_desc *desc = idma64c->desc;
+       struct idma64_hw_desc *hw;
+       size_t bytes = desc->length;
+       u64 llp;
+       u32 ctlhi;
+       unsigned int i = 0;
+
+       llp = channel_readq(idma64c, LLP);
+       do {
+               hw = &desc->hw[i];
+       } while ((hw->llp != llp) && (++i < desc->ndesc));
+
+       if (!i)
+               return bytes;
+
+       do {
+               bytes -= desc->hw[--i].len;
+       } while (i);
+
+       ctlhi = channel_readl(idma64c, CTL_HI);
+       return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
+}
+
+static enum dma_status idma64_tx_status(struct dma_chan *chan,
+               dma_cookie_t cookie, struct dma_tx_state *state)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+       struct virt_dma_desc *vdesc;
+       enum dma_status status;
+       size_t bytes;
+       unsigned long flags;
+
+       status = dma_cookie_status(chan, cookie, state);
+       if (status == DMA_COMPLETE)
+               return status;
+
+       spin_lock_irqsave(&idma64c->vchan.lock, flags);
+       vdesc = vchan_find_desc(&idma64c->vchan, cookie);
+       if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
+               bytes = idma64_active_desc_size(idma64c);
+               dma_set_residue(state, bytes);
+               status = idma64c->desc->status;
+       } else if (vdesc) {
+               bytes = to_idma64_desc(vdesc)->length;
+               dma_set_residue(state, bytes);
+       }
+       spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+       return status;
+}
+
+static void convert_burst(u32 *maxburst)
+{
+       if (*maxburst)
+               *maxburst = __fls(*maxburst);
+       else
+               *maxburst = 0;
+}
+
+static int idma64_slave_config(struct dma_chan *chan,
+               struct dma_slave_config *config)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+       /* Check if chan will be configured for slave transfers */
+       if (!is_slave_direction(config->direction))
+               return -EINVAL;
+
+       memcpy(&idma64c->config, config, sizeof(idma64c->config));
+
+       convert_burst(&idma64c->config.src_maxburst);
+       convert_burst(&idma64c->config.dst_maxburst);
+
+       return 0;
+}
+
+static void idma64_chan_deactivate(struct idma64_chan *idma64c)
+{
+       unsigned short count = 100;
+       u32 cfglo;
+
+       cfglo = channel_readl(idma64c, CFG_LO);
+       channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
+       do {
+               udelay(1);
+               cfglo = channel_readl(idma64c, CFG_LO);
+       } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
+}
+
+static void idma64_chan_activate(struct idma64_chan *idma64c)
+{
+       u32 cfglo;
+
+       cfglo = channel_readl(idma64c, CFG_LO);
+       channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
+}
+
+static int idma64_pause(struct dma_chan *chan)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&idma64c->vchan.lock, flags);
+       if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
+               idma64_chan_deactivate(idma64c);
+               idma64c->desc->status = DMA_PAUSED;
+       }
+       spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+       return 0;
+}
+
+static int idma64_resume(struct dma_chan *chan)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&idma64c->vchan.lock, flags);
+       if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
+               idma64c->desc->status = DMA_IN_PROGRESS;
+               idma64_chan_activate(idma64c);
+       }
+       spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+       return 0;
+}
+
+static int idma64_terminate_all(struct dma_chan *chan)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&idma64c->vchan.lock, flags);
+       idma64_chan_deactivate(idma64c);
+       idma64_stop_transfer(idma64c);
+       if (idma64c->desc) {
+               idma64_vdesc_free(&idma64c->desc->vdesc);
+               idma64c->desc = NULL;
+       }
+       vchan_get_all_descriptors(&idma64c->vchan, &head);
+       spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+       vchan_dma_desc_free_list(&idma64c->vchan, &head);
+       return 0;
+}
+
+static int idma64_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+       /* Create a pool of consistent memory blocks for hardware descriptors */
+       idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
+                                       chan->device->dev,
+                                       sizeof(struct idma64_lli), 8, 0);
+       if (!idma64c->pool) {
+               dev_err(chan2dev(chan), "No memory for descriptors\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void idma64_free_chan_resources(struct dma_chan *chan)
+{
+       struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+       vchan_free_chan_resources(to_virt_chan(chan));
+       dma_pool_destroy(idma64c->pool);
+       idma64c->pool = NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define IDMA64_BUSWIDTHS                               \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          |       \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         |       \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+static int idma64_probe(struct idma64_chip *chip)
+{
+       struct idma64 *idma64;
+       unsigned short nr_chan = IDMA64_NR_CHAN;
+       unsigned short i;
+       int ret;
+
+       idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
+       if (!idma64)
+               return -ENOMEM;
+
+       idma64->regs = chip->regs;
+       chip->idma64 = idma64;
+
+       idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
+                                   GFP_KERNEL);
+       if (!idma64->chan)
+               return -ENOMEM;
+
+       idma64->all_chan_mask = (1 << nr_chan) - 1;
+
+       /* Turn off iDMA controller */
+       idma64_off(idma64);
+
+       ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
+                              dev_name(chip->dev), idma64);
+       if (ret)
+               return ret;
+
+       INIT_LIST_HEAD(&idma64->dma.channels);
+       for (i = 0; i < nr_chan; i++) {
+               struct idma64_chan *idma64c = &idma64->chan[i];
+
+               idma64c->vchan.desc_free = idma64_vdesc_free;
+               vchan_init(&idma64c->vchan, &idma64->dma);
+
+               idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
+               idma64c->mask = BIT(i);
+       }
+
+       dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
+       dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
+
+       idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
+       idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
+
+       idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
+
+       idma64->dma.device_issue_pending = idma64_issue_pending;
+       idma64->dma.device_tx_status = idma64_tx_status;
+
+       idma64->dma.device_config = idma64_slave_config;
+       idma64->dma.device_pause = idma64_pause;
+       idma64->dma.device_resume = idma64_resume;
+       idma64->dma.device_terminate_all = idma64_terminate_all;
+
+       idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
+       idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
+       idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+       idma64->dma.dev = chip->dev;
+
+       ret = dma_async_device_register(&idma64->dma);
+       if (ret)
+               return ret;
+
+       dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
+       return 0;
+}
+
+static int idma64_remove(struct idma64_chip *chip)
+{
+       struct idma64 *idma64 = chip->idma64;
+       unsigned short i;
+
+       dma_async_device_unregister(&idma64->dma);
+
+       /*
+        * Explicitly call devm_request_irq() to avoid the side effects with
+        * the scheduled tasklets.
+        */
+       devm_free_irq(chip->dev, chip->irq, idma64);
+
+       for (i = 0; i < idma64->dma.chancnt; i++) {
+               struct idma64_chan *idma64c = &idma64->chan[i];
+
+               tasklet_kill(&idma64c->vchan.task);
+       }
+
+       return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int idma64_platform_probe(struct platform_device *pdev)
+{
+       struct idma64_chip *chip;
+       struct device *dev = &pdev->dev;
+       struct resource *mem;
+       int ret;
+
+       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->irq = platform_get_irq(pdev, 0);
+       if (chip->irq < 0)
+               return chip->irq;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       chip->regs = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(chip->regs))
+               return PTR_ERR(chip->regs);
+
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
+
+       chip->dev = dev;
+
+       ret = idma64_probe(chip);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, chip);
+       return 0;
+}
+
+static int idma64_platform_remove(struct platform_device *pdev)
+{
+       struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+       return idma64_remove(chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int idma64_pm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+       idma64_off(chip->idma64);
+       return 0;
+}
+
+static int idma64_pm_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+       idma64_on(chip->idma64);
+       return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops idma64_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
+};
+
+static struct platform_driver idma64_platform_driver = {
+       .probe          = idma64_platform_probe,
+       .remove         = idma64_platform_remove,
+       .driver = {
+               .name   = DRV_NAME,
+               .pm     = &idma64_dev_pm_ops,
+       },
+};
+
+module_platform_driver(idma64_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("iDMA64 core driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
new file mode 100644 (file)
index 0000000..a4d9968
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ * Driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_IDMA64_H__
+#define __DMA_IDMA64_H__
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "virt-dma.h"
+
+/* Channel registers */
+
+#define IDMA64_CH_SAR          0x00    /* Source Address Register */
+#define IDMA64_CH_DAR          0x08    /* Destination Address Register */
+#define IDMA64_CH_LLP          0x10    /* Linked List Pointer */
+#define IDMA64_CH_CTL_LO       0x18    /* Control Register Low */
+#define IDMA64_CH_CTL_HI       0x1c    /* Control Register High */
+#define IDMA64_CH_SSTAT                0x20
+#define IDMA64_CH_DSTAT                0x28
+#define IDMA64_CH_SSTATAR      0x30
+#define IDMA64_CH_DSTATAR      0x38
+#define IDMA64_CH_CFG_LO       0x40    /* Configuration Register Low */
+#define IDMA64_CH_CFG_HI       0x44    /* Configuration Register High */
+#define IDMA64_CH_SGR          0x48
+#define IDMA64_CH_DSR          0x50
+
+#define IDMA64_CH_LENGTH       0x58
+
+/* Bitfields in CTL_LO */
+#define IDMA64C_CTLL_INT_EN            (1 << 0)        /* irqs enabled? */
+#define IDMA64C_CTLL_DST_WIDTH(x)      ((x) << 1)      /* bytes per element */
+#define IDMA64C_CTLL_SRC_WIDTH(x)      ((x) << 4)
+#define IDMA64C_CTLL_DST_INC           (0 << 8)        /* DAR update/not */
+#define IDMA64C_CTLL_DST_FIX           (1 << 8)
+#define IDMA64C_CTLL_SRC_INC           (0 << 10)       /* SAR update/not */
+#define IDMA64C_CTLL_SRC_FIX           (1 << 10)
+#define IDMA64C_CTLL_DST_MSIZE(x)      ((x) << 11)     /* burst, #elements */
+#define IDMA64C_CTLL_SRC_MSIZE(x)      ((x) << 14)
+#define IDMA64C_CTLL_FC_M2P            (1 << 20)       /* mem-to-periph */
+#define IDMA64C_CTLL_FC_P2M            (2 << 20)       /* periph-to-mem */
+#define IDMA64C_CTLL_LLP_D_EN          (1 << 27)       /* dest block chain */
+#define IDMA64C_CTLL_LLP_S_EN          (1 << 28)       /* src block chain */
+
+/* Bitfields in CTL_HI */
+#define IDMA64C_CTLH_BLOCK_TS(x)       ((x) & ((1 << 17) - 1))
+#define IDMA64C_CTLH_DONE              (1 << 17)
+
+/* Bitfields in CFG_LO */
+#define IDMA64C_CFGL_DST_BURST_ALIGN   (1 << 0)        /* dst burst align */
+#define IDMA64C_CFGL_SRC_BURST_ALIGN   (1 << 1)        /* src burst align */
+#define IDMA64C_CFGL_CH_SUSP           (1 << 8)
+#define IDMA64C_CFGL_FIFO_EMPTY                (1 << 9)
+#define IDMA64C_CFGL_CH_DRAIN          (1 << 10)       /* drain FIFO */
+#define IDMA64C_CFGL_DST_OPT_BL                (1 << 20)       /* optimize dst burst length */
+#define IDMA64C_CFGL_SRC_OPT_BL                (1 << 21)       /* optimize src burst length */
+
+/* Bitfields in CFG_HI */
+#define IDMA64C_CFGH_SRC_PER(x)                ((x) << 0)      /* src peripheral */
+#define IDMA64C_CFGH_DST_PER(x)                ((x) << 4)      /* dst peripheral */
+#define IDMA64C_CFGH_RD_ISSUE_THD(x)   ((x) << 8)
+#define IDMA64C_CFGH_RW_ISSUE_THD(x)   ((x) << 18)
+
+/* Interrupt registers */
+
+#define IDMA64_INT_XFER                0x00
+#define IDMA64_INT_BLOCK       0x08
+#define IDMA64_INT_SRC_TRAN    0x10
+#define IDMA64_INT_DST_TRAN    0x18
+#define IDMA64_INT_ERROR       0x20
+
+#define IDMA64_RAW(x)          (0x2c0 + IDMA64_INT_##x)        /* r */
+#define IDMA64_STATUS(x)       (0x2e8 + IDMA64_INT_##x)        /* r (raw & mask) */
+#define IDMA64_MASK(x)         (0x310 + IDMA64_INT_##x)        /* rw (set = irq enabled) */
+#define IDMA64_CLEAR(x)                (0x338 + IDMA64_INT_##x)        /* w (ack, affects "raw") */
+
+/* Common registers */
+
+#define IDMA64_STATUS_INT      0x360   /* r */
+#define IDMA64_CFG             0x398
+#define IDMA64_CH_EN           0x3a0
+
+/* Bitfields in CFG */
+#define IDMA64_CFG_DMA_EN              (1 << 0)
+
+/* Hardware descriptor for Linked LIst transfers */
+struct idma64_lli {
+       u64             sar;
+       u64             dar;
+       u64             llp;
+       u32             ctllo;
+       u32             ctlhi;
+       u32             sstat;
+       u32             dstat;
+};
+
+struct idma64_hw_desc {
+       struct idma64_lli *lli;
+       dma_addr_t llp;
+       dma_addr_t phys;
+       unsigned int len;
+};
+
+struct idma64_desc {
+       struct virt_dma_desc vdesc;
+       enum dma_transfer_direction direction;
+       struct idma64_hw_desc *hw;
+       unsigned int ndesc;
+       size_t length;
+       enum dma_status status;
+};
+
+static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
+{
+       return container_of(vdesc, struct idma64_desc, vdesc);
+}
+
+struct idma64_chan {
+       struct virt_dma_chan vchan;
+
+       void __iomem *regs;
+
+       /* hardware configuration */
+       enum dma_transfer_direction direction;
+       unsigned int mask;
+       struct dma_slave_config config;
+
+       void *pool;
+       struct idma64_desc *desc;
+};
+
+static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct idma64_chan, vchan.chan);
+}
+
+#define channel_set_bit(idma64, reg, mask)     \
+       dma_writel(idma64, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(idma64, reg, mask)   \
+       dma_writel(idma64, reg, ((mask) << 8) | 0)
+
+static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
+{
+       return readl(idma64c->regs + offset);
+}
+
+static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
+                                 u32 value)
+{
+       writel(value, idma64c->regs + offset);
+}
+
+#define channel_readl(idma64c, reg)            \
+       idma64c_readl(idma64c, IDMA64_CH_##reg)
+#define channel_writel(idma64c, reg, value)    \
+       idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
+
+static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
+{
+       u64 l, h;
+
+       l = idma64c_readl(idma64c, offset);
+       h = idma64c_readl(idma64c, offset + 4);
+
+       return l | (h << 32);
+}
+
+static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
+                                 u64 value)
+{
+       idma64c_writel(idma64c, offset, value);
+       idma64c_writel(idma64c, offset + 4, value >> 32);
+}
+
+#define channel_readq(idma64c, reg)            \
+       idma64c_readq(idma64c, IDMA64_CH_##reg)
+#define channel_writeq(idma64c, reg, value)    \
+       idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
+
+struct idma64 {
+       struct dma_device dma;
+
+       void __iomem *regs;
+
+       /* channels */
+       unsigned short all_chan_mask;
+       struct idma64_chan *chan;
+};
+
+static inline struct idma64 *to_idma64(struct dma_device *ddev)
+{
+       return container_of(ddev, struct idma64, dma);
+}
+
+static inline u32 idma64_readl(struct idma64 *idma64, int offset)
+{
+       return readl(idma64->regs + offset);
+}
+
+static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
+{
+       writel(value, idma64->regs + offset);
+}
+
+#define dma_readl(idma64, reg)                 \
+       idma64_readl(idma64, IDMA64_##reg)
+#define dma_writel(idma64, reg, value)         \
+       idma64_writel(idma64, IDMA64_##reg, (value))
+
+/**
+ * struct idma64_chip - representation of DesignWare DMA controller hardware
+ * @dev:               struct device of the DMA controller
+ * @irq:               irq line
+ * @regs:              memory mapped I/O space
+ * @idma64:            struct idma64 that is filed by idma64_probe()
+ */
+struct idma64_chip {
+       struct device   *dev;
+       int             irq;
+       void __iomem    *regs;
+       struct idma64   *idma64;
+};
+
+#endif /* __DMA_IDMA64_H__ */
index e269f08..bbec500 100644 (file)
@@ -46,6 +46,7 @@ config OMAP_MBOX_KFIFO_SIZE
 config PCC
        bool "Platform Communication Channel Driver"
        depends on ACPI
+       default n
        help
          ACPI 5.0+ spec defines a generic mode of communication
          between the OS and a platform such as the BMC. This medium
index 26d121d..68885a8 100644 (file)
@@ -352,4 +352,10 @@ static int __init pcc_init(void)
 
        return 0;
 }
-device_initcall(pcc_init);
+
+/*
+ * Make PCC init postcore so that users of this mailbox
+ * such as the ACPI Processor driver have it available
+ * at their init.
+ */
+postcore_initcall(pcc_init);
index 3f68dd2..076f593 100644 (file)
@@ -328,6 +328,29 @@ config INTEL_SOC_PMIC
          thermal, charger and related power management functions
          on these systems.
 
+config MFD_INTEL_LPSS
+       tristate
+       select COMMON_CLK
+       select MFD_CORE
+
+config MFD_INTEL_LPSS_ACPI
+       tristate "Intel Low Power Subsystem support in ACPI mode"
+       select MFD_INTEL_LPSS
+       depends on X86 && ACPI
+       help
+         This driver supports Intel Low Power Subsystem (LPSS) devices such as
+         I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
+         PCH) in ACPI mode.
+
+config MFD_INTEL_LPSS_PCI
+       tristate "Intel Low Power Subsystem support in PCI mode"
+       select MFD_INTEL_LPSS
+       depends on X86 && PCI
+       help
+         This driver supports Intel Low Power Subsystem (LPSS) devices such as
+         I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
+         PCH) in PCI mode.
+
 config MFD_INTEL_MSIC
        bool "Intel MSIC"
        depends on INTEL_SCU_IPC
index ea40e07..9d730a2 100644 (file)
@@ -161,6 +161,9 @@ obj-$(CONFIG_TPS65911_COMPARATOR)   += tps65911-comparator.o
 obj-$(CONFIG_MFD_TPS65090)     += tps65090.o
 obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
 obj-$(CONFIG_MFD_ATMEL_HLCDC)  += atmel-hlcdc.o
+obj-$(CONFIG_MFD_INTEL_LPSS)   += intel-lpss.o
+obj-$(CONFIG_MFD_INTEL_LPSS_PCI)       += intel-lpss-pci.o
+obj-$(CONFIG_MFD_INTEL_LPSS_ACPI)      += intel-lpss-acpi.o
 obj-$(CONFIG_MFD_INTEL_MSIC)   += intel_msic.o
 obj-$(CONFIG_MFD_PALMAS)       += palmas.o
 obj-$(CONFIG_MFD_VIPERBOARD)    += viperboard.o
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
new file mode 100644 (file)
index 0000000..0d92d73
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Intel LPSS ACPI support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#include "intel-lpss.h"
+
+static const struct intel_lpss_platform_info spt_info = {
+       .clk_rate = 120000000,
+};
+
+static const struct acpi_device_id intel_lpss_acpi_ids[] = {
+       /* SPT */
+       { "INT3446", (kernel_ulong_t)&spt_info },
+       { "INT3447", (kernel_ulong_t)&spt_info },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
+
+static int intel_lpss_acpi_probe(struct platform_device *pdev)
+{
+       struct intel_lpss_platform_info *info;
+       const struct acpi_device_id *id;
+
+       id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
+       if (!id)
+               return -ENODEV;
+
+       info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+                           GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       info->irq = platform_get_irq(pdev, 0);
+
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       return intel_lpss_probe(&pdev->dev, info);
+}
+
+static int intel_lpss_acpi_remove(struct platform_device *pdev)
+{
+       intel_lpss_remove(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
+
+static struct platform_driver intel_lpss_acpi_driver = {
+       .probe = intel_lpss_acpi_probe,
+       .remove = intel_lpss_acpi_remove,
+       .driver = {
+               .name = "intel-lpss",
+               .acpi_match_table = intel_lpss_acpi_ids,
+               .pm = &intel_lpss_acpi_pm_ops,
+       },
+};
+
+module_platform_driver(intel_lpss_acpi_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS ACPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
new file mode 100644 (file)
index 0000000..9236dff
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Intel LPSS PCI support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-lpss.h"
+
+static int intel_lpss_pci_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *id)
+{
+       struct intel_lpss_platform_info *info;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+                           GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->mem = &pdev->resource[0];
+       info->irq = pdev->irq;
+
+       /* Probably it is enough to set this for iDMA capable devices only */
+       pci_set_master(pdev);
+
+       ret = intel_lpss_probe(&pdev->dev, info);
+       if (ret)
+               return ret;
+
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_allow(&pdev->dev);
+
+       return 0;
+}
+
+static void intel_lpss_pci_remove(struct pci_dev *pdev)
+{
+       pm_runtime_forbid(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
+       intel_lpss_remove(&pdev->dev);
+}
+
+static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
+
+static const struct intel_lpss_platform_info spt_info = {
+       .clk_rate = 120000000,
+};
+
+static const struct intel_lpss_platform_info spt_uart_info = {
+       .clk_rate = 120000000,
+       .clk_con_id = "baudclk",
+};
+
+static const struct pci_device_id intel_lpss_pci_ids[] = {
+       /* SPT-LP */
+       { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info },
+       /* SPT-H */
+       { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
+       { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
+       { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info },
+       { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+
+static struct pci_driver intel_lpss_pci_driver = {
+       .name = "intel-lpss",
+       .id_table = intel_lpss_pci_ids,
+       .probe = intel_lpss_pci_probe,
+       .remove = intel_lpss_pci_remove,
+       .driver = {
+               .pm = &intel_lpss_pci_pm_ops,
+       },
+};
+
+module_pci_driver(intel_lpss_pci_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
new file mode 100644 (file)
index 0000000..fdf4d5c
--- /dev/null
@@ -0,0 +1,524 @@
+/*
+ * Intel Sunrisepoint LPSS core support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *          Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *          Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
+
+#include "intel-lpss.h"
+
+#define LPSS_DEV_OFFSET                0x000
+#define LPSS_DEV_SIZE          0x200
+#define LPSS_PRIV_OFFSET       0x200
+#define LPSS_PRIV_SIZE         0x100
+#define LPSS_IDMA64_OFFSET     0x800
+#define LPSS_IDMA64_SIZE       0x800
+
+/* Offsets from lpss->priv */
+#define LPSS_PRIV_RESETS               0x04
+#define LPSS_PRIV_RESETS_FUNC          BIT(2)
+#define LPSS_PRIV_RESETS_IDMA          0x3
+
+#define LPSS_PRIV_ACTIVELTR            0x10
+#define LPSS_PRIV_IDLELTR              0x14
+
+#define LPSS_PRIV_LTR_REQ              BIT(15)
+#define LPSS_PRIV_LTR_SCALE_MASK       0xc00
+#define LPSS_PRIV_LTR_SCALE_1US                0x800
+#define LPSS_PRIV_LTR_SCALE_32US       0xc00
+#define LPSS_PRIV_LTR_VALUE_MASK       0x3ff
+
+#define LPSS_PRIV_SSP_REG              0x20
+#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN  BIT(0)
+
+#define LPSS_PRIV_REMAP_ADDR_LO                0x40
+#define LPSS_PRIV_REMAP_ADDR_HI                0x44
+
+#define LPSS_PRIV_CAPS                 0xfc
+#define LPSS_PRIV_CAPS_NO_IDMA         BIT(8)
+#define LPSS_PRIV_CAPS_TYPE_SHIFT      4
+#define LPSS_PRIV_CAPS_TYPE_MASK       (0xf << LPSS_PRIV_CAPS_TYPE_SHIFT)
+
+/* This matches the type field in CAPS register */
+enum intel_lpss_dev_type {
+       LPSS_DEV_I2C = 0,
+       LPSS_DEV_UART,
+       LPSS_DEV_SPI,
+};
+
+struct intel_lpss {
+       const struct intel_lpss_platform_info *info;
+       enum intel_lpss_dev_type type;
+       struct clk *clk;
+       struct clk_lookup *clock;
+       const struct mfd_cell *cell;
+       struct device *dev;
+       void __iomem *priv;
+       int devid;
+       u32 caps;
+       u32 active_ltr;
+       u32 idle_ltr;
+       struct dentry *debugfs;
+};
+
+static const struct resource intel_lpss_dev_resources[] = {
+       DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
+       DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
+       DEFINE_RES_IRQ(0),
+};
+
+static const struct resource intel_lpss_idma64_resources[] = {
+       DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
+       DEFINE_RES_IRQ(0),
+};
+
+#define LPSS_IDMA64_DRIVER_NAME                "idma64"
+
+/*
+ * Cells needs to be ordered so that the iDMA is created first. This is
+ * because we need to be sure the DMA is available when the host controller
+ * driver is probed.
+ */
+static const struct mfd_cell intel_lpss_idma64_cell = {
+       .name = LPSS_IDMA64_DRIVER_NAME,
+       .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
+       .resources = intel_lpss_idma64_resources,
+};
+
+static const struct mfd_cell intel_lpss_i2c_cell = {
+       .name = "i2c_designware",
+       .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+       .resources = intel_lpss_dev_resources,
+};
+
+static const struct mfd_cell intel_lpss_uart_cell = {
+       .name = "dw-apb-uart",
+       .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+       .resources = intel_lpss_dev_resources,
+};
+
+static const struct mfd_cell intel_lpss_spi_cell = {
+       .name = "pxa2xx-spi",
+       .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+       .resources = intel_lpss_dev_resources,
+};
+
+static DEFINE_IDA(intel_lpss_devid_ida);
+static struct dentry *intel_lpss_debugfs;
+
+static int intel_lpss_request_dma_module(const char *name)
+{
+       static bool intel_lpss_dma_requested;
+
+       if (intel_lpss_dma_requested)
+               return 0;
+
+       intel_lpss_dma_requested = true;
+       return request_module("%s", name);
+}
+
+static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
+{
+       lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
+       lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
+}
+
+static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
+{
+       struct dentry *dir;
+
+       dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
+       if (IS_ERR(dir))
+               return PTR_ERR(dir);
+
+       /* Cache the values into lpss structure */
+       intel_lpss_cache_ltr(lpss);
+
+       debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
+       debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
+       debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
+
+       lpss->debugfs = dir;
+       return 0;
+}
+
+static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
+{
+       debugfs_remove_recursive(lpss->debugfs);
+}
+
+static void intel_lpss_ltr_set(struct device *dev, s32 val)
+{
+       struct intel_lpss *lpss = dev_get_drvdata(dev);
+       u32 ltr;
+
+       /*
+        * Program latency tolerance (LTR) accordingly what has been asked
+        * by the PM QoS layer or disable it in case we were passed
+        * negative value or PM_QOS_LATENCY_ANY.
+        */
+       ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
+
+       if (val == PM_QOS_LATENCY_ANY || val < 0) {
+               ltr &= ~LPSS_PRIV_LTR_REQ;
+       } else {
+               ltr |= LPSS_PRIV_LTR_REQ;
+               ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
+               ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
+
+               if (val > LPSS_PRIV_LTR_VALUE_MASK)
+                       ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
+               else
+                       ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
+       }
+
+       if (ltr == lpss->active_ltr)
+               return;
+
+       writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
+       writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
+
+       /* Cache the values into lpss structure */
+       intel_lpss_cache_ltr(lpss);
+}
+
+static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
+{
+       lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
+       dev_pm_qos_expose_latency_tolerance(lpss->dev);
+}
+
+static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
+{
+       dev_pm_qos_hide_latency_tolerance(lpss->dev);
+       lpss->dev->power.set_latency_tolerance = NULL;
+}
+
+static int intel_lpss_assign_devs(struct intel_lpss *lpss)
+{
+       unsigned int type;
+
+       type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
+       type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
+
+       switch (type) {
+       case LPSS_DEV_I2C:
+               lpss->cell = &intel_lpss_i2c_cell;
+               break;
+       case LPSS_DEV_UART:
+               lpss->cell = &intel_lpss_uart_cell;
+               break;
+       case LPSS_DEV_SPI:
+               lpss->cell = &intel_lpss_spi_cell;
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       lpss->type = type;
+
+       return 0;
+}
+
+static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
+{
+       return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
+}
+
+static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
+{
+       resource_size_t addr = lpss->info->mem->start;
+
+       writel(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR_LO);
+#if BITS_PER_LONG > 32
+       writel(addr >> 32, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
+#else
+       writel(0, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
+#endif
+}
+
+static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
+{
+       u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
+
+       /* Bring out the device from reset */
+       writel(value, lpss->priv + LPSS_PRIV_RESETS);
+}
+
+static void intel_lpss_init_dev(const struct intel_lpss *lpss)
+{
+       u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+
+       intel_lpss_deassert_reset(lpss);
+
+       if (!intel_lpss_has_idma(lpss))
+               return;
+
+       intel_lpss_set_remap_addr(lpss);
+
+       /* Make sure that SPI multiblock DMA transfers are re-enabled */
+       if (lpss->type == LPSS_DEV_SPI)
+               writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
+}
+
+static void intel_lpss_unregister_clock_tree(struct clk *clk)
+{
+       struct clk *parent;
+
+       while (clk) {
+               parent = clk_get_parent(clk);
+               clk_unregister(clk);
+               clk = parent;
+       }
+}
+
+static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
+                                            const char *devname,
+                                            struct clk **clk)
+{
+       char name[32];
+       struct clk *tmp = *clk;
+
+       snprintf(name, sizeof(name), "%s-enable", devname);
+       tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
+                               lpss->priv, 0, 0, NULL);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
+
+       snprintf(name, sizeof(name), "%s-div", devname);
+       tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
+                                             0, lpss->priv, 1, 15, 16, 15, 0,
+                                             NULL);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
+       *clk = tmp;
+
+       snprintf(name, sizeof(name), "%s-update", devname);
+       tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
+                               CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
+       *clk = tmp;
+
+       return 0;
+}
+
+static int intel_lpss_register_clock(struct intel_lpss *lpss)
+{
+       const struct mfd_cell *cell = lpss->cell;
+       struct clk *clk;
+       char devname[24];
+       int ret;
+
+       if (!lpss->info->clk_rate)
+               return 0;
+
+       /* Root clock */
+       clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL,
+                                     CLK_IS_ROOT, lpss->info->clk_rate);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
+
+       /*
+        * Support for clock divider only if it has some preset value.
+        * Otherwise we assume that the divider is not used.
+        */
+       if (lpss->type != LPSS_DEV_I2C) {
+               ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
+               if (ret)
+                       goto err_clk_register;
+       }
+
+       ret = -ENOMEM;
+
+       /* Clock for the host controller */
+       lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
+       if (!lpss->clock)
+               goto err_clk_register;
+
+       lpss->clk = clk;
+
+       return 0;
+
+err_clk_register:
+       intel_lpss_unregister_clock_tree(clk);
+
+       return ret;
+}
+
+static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
+{
+       if (IS_ERR_OR_NULL(lpss->clk))
+               return;
+
+       clkdev_drop(lpss->clock);
+       intel_lpss_unregister_clock_tree(lpss->clk);
+}
+
+int intel_lpss_probe(struct device *dev,
+                    const struct intel_lpss_platform_info *info)
+{
+       struct intel_lpss *lpss;
+       int ret;
+
+       if (!info || !info->mem || info->irq <= 0)
+               return -EINVAL;
+
+       lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
+       if (!lpss)
+               return -ENOMEM;
+
+       lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
+                                 LPSS_PRIV_SIZE);
+       if (!lpss->priv)
+               return -ENOMEM;
+
+       lpss->info = info;
+       lpss->dev = dev;
+       lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
+
+       dev_set_drvdata(dev, lpss);
+
+       ret = intel_lpss_assign_devs(lpss);
+       if (ret)
+               return ret;
+
+       intel_lpss_init_dev(lpss);
+
+       lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
+       if (lpss->devid < 0)
+               return lpss->devid;
+
+       ret = intel_lpss_register_clock(lpss);
+       if (ret)
+               goto err_clk_register;
+
+       intel_lpss_ltr_expose(lpss);
+
+       ret = intel_lpss_debugfs_add(lpss);
+       if (ret)
+               dev_warn(dev, "Failed to create debugfs entries\n");
+
+       if (intel_lpss_has_idma(lpss)) {
+               /*
+                * Ensure the DMA driver is loaded before the host
+                * controller device appears, so that the host controller
+                * driver can request its DMA channels as early as
+                * possible.
+                *
+                * If the DMA module is not there that's OK as well.
+                */
+               intel_lpss_request_dma_module(LPSS_IDMA64_DRIVER_NAME);
+
+               ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
+                                     1, info->mem, info->irq, NULL);
+               if (ret)
+                       dev_warn(dev, "Failed to add %s, fallback to PIO\n",
+                                LPSS_IDMA64_DRIVER_NAME);
+       }
+
+       ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
+                             1, info->mem, info->irq, NULL);
+       if (ret)
+               goto err_remove_ltr;
+
+       return 0;
+
+err_remove_ltr:
+       intel_lpss_debugfs_remove(lpss);
+       intel_lpss_ltr_hide(lpss);
+
+err_clk_register:
+       ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_probe);
+
+void intel_lpss_remove(struct device *dev)
+{
+       struct intel_lpss *lpss = dev_get_drvdata(dev);
+
+       mfd_remove_devices(dev);
+       intel_lpss_debugfs_remove(lpss);
+       intel_lpss_ltr_hide(lpss);
+       intel_lpss_unregister_clock(lpss);
+       ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+}
+EXPORT_SYMBOL_GPL(intel_lpss_remove);
+
+static int resume_lpss_device(struct device *dev, void *data)
+{
+       pm_runtime_resume(dev);
+       return 0;
+}
+
+int intel_lpss_prepare(struct device *dev)
+{
+       /*
+        * Resume both child devices before entering system sleep. This
+        * ensures that they are in proper state before they get suspended.
+        */
+       device_for_each_child_reverse(dev, NULL, resume_lpss_device);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_prepare);
+
+int intel_lpss_suspend(struct device *dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+
+int intel_lpss_resume(struct device *dev)
+{
+       struct intel_lpss *lpss = dev_get_drvdata(dev);
+
+       intel_lpss_init_dev(lpss);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_resume);
+
+static int __init intel_lpss_init(void)
+{
+       intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
+       return 0;
+}
+module_init(intel_lpss_init);
+
+static void __exit intel_lpss_exit(void)
+{
+       debugfs_remove(intel_lpss_debugfs);
+}
+module_exit(intel_lpss_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
new file mode 100644 (file)
index 0000000..f28cb28
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Intel LPSS core support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_INTEL_LPSS_H
+#define __MFD_INTEL_LPSS_H
+
+struct device;
+struct resource;
+
+struct intel_lpss_platform_info {
+       struct resource *mem;
+       int irq;
+       unsigned long clk_rate;
+       const char *clk_con_id;
+};
+
+int intel_lpss_probe(struct device *dev,
+                    const struct intel_lpss_platform_info *info);
+void intel_lpss_remove(struct device *dev);
+
+#ifdef CONFIG_PM
+int intel_lpss_prepare(struct device *dev);
+int intel_lpss_suspend(struct device *dev);
+int intel_lpss_resume(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+#define INTEL_LPSS_SLEEP_PM_OPS                        \
+       .prepare = intel_lpss_prepare,          \
+       .suspend = intel_lpss_suspend,          \
+       .resume = intel_lpss_resume,            \
+       .freeze = intel_lpss_suspend,           \
+       .thaw = intel_lpss_resume,              \
+       .poweroff = intel_lpss_suspend,         \
+       .restore = intel_lpss_resume,
+#endif
+
+#define INTEL_LPSS_RUNTIME_PM_OPS              \
+       .runtime_suspend = intel_lpss_suspend,  \
+       .runtime_resume = intel_lpss_resume,
+
+#else /* !CONFIG_PM */
+#define INTEL_LPSS_SLEEP_PM_OPS
+#define INTEL_LPSS_RUNTIME_PM_OPS
+#endif /* CONFIG_PM */
+
+#define INTEL_LPSS_PM_OPS(name)                        \
+const struct dev_pm_ops name = {               \
+       INTEL_LPSS_SLEEP_PM_OPS                 \
+       INTEL_LPSS_RUNTIME_PM_OPS               \
+}
+
+#endif /* __MFD_INTEL_LPSS_H */
index 14fd5cb..c17635d 100644 (file)
@@ -302,7 +302,7 @@ void mfd_remove_devices(struct device *parent)
 {
        atomic_t *cnts = NULL;
 
-       device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
+       device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
        kfree(cnts);
 }
 EXPORT_SYMBOL(mfd_remove_devices);
index 6b94007..838545c 100644 (file)
@@ -854,6 +854,18 @@ static int pcan_usb_probe(struct usb_interface *intf)
 /*
  * describe the PCAN-USB adapter
  */
+static const struct can_bittiming_const pcan_usb_const = {
+       .name = "pcan_usb",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb = {
        .name = "PCAN-USB",
        .device_id = PCAN_USB_PRODUCT_ID,
@@ -862,17 +874,7 @@ const struct peak_usb_adapter pcan_usb = {
        .clock = {
                .freq = PCAN_USB_CRYSTAL_HZ / 2 ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 64,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb),
index 7921cff..5a2e341 100644 (file)
@@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
 
        dev->can.clock = peak_usb_adapter->clock;
-       dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
+       dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
        dev->can.do_set_bittiming = peak_usb_set_bittiming;
-       dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+       dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
        dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
        dev->can.do_set_mode = peak_usb_set_mode;
        dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
index 9e624f0..506fe50 100644 (file)
@@ -48,8 +48,8 @@ struct peak_usb_adapter {
        u32 device_id;
        u32 ctrlmode_supported;
        struct can_clock clock;
-       const struct can_bittiming_const bittiming_const;
-       const struct can_bittiming_const data_bittiming_const;
+       const struct can_bittiming_const * const bittiming_const;
+       const struct can_bittiming_const * const data_bittiming_const;
        unsigned int ctrl_count;
 
        int (*intf_probe)(struct usb_interface *intf);
index 09d14e7..ce44a03 100644 (file)
@@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
 }
 
 /* describes the PCAN-USB FD adapter */
+static const struct can_bittiming_const pcan_usb_fd_const = {
+       .name = "pcan_usb_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 64,
+       .tseg2_min = 1,
+       .tseg2_max = 16,
+       .sjw_max = 16,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_fd_data_const = {
+       .name = "pcan_usb_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_fd = {
        .name = "PCAN-USB FD",
        .device_id = PCAN_USBFD_PRODUCT_ID,
@@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
        .clock = {
                .freq = PCAN_UFD_CRYSTAL_HZ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 64,
-               .tseg2_min = 1,
-               .tseg2_max = 16,
-               .sjw_max = 16,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
-       .data_bittiming_const = {
-               .name = "pcan_usb_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_fd_const,
+       .data_bittiming_const = &pcan_usb_fd_data_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
@@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = {
 };
 
 /* describes the PCAN-USB Pro FD adapter */
+static const struct can_bittiming_const pcan_usb_pro_fd_const = {
+       .name = "pcan_usb_pro_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 64,
+       .tseg2_min = 1,
+       .tseg2_max = 16,
+       .sjw_max = 16,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
+       .name = "pcan_usb_pro_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_pro_fd = {
        .name = "PCAN-USB Pro FD",
        .device_id = PCAN_USBPROFD_PRODUCT_ID,
@@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
        .clock = {
                .freq = PCAN_UFD_CRYSTAL_HZ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb_pro_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 64,
-               .tseg2_min = 1,
-               .tseg2_max = 16,
-               .sjw_max = 16,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
-       .data_bittiming_const = {
-               .name = "pcan_usb_pro_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_pro_fd_const,
+       .data_bittiming_const = &pcan_usb_pro_fd_data_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
index 7d61b32..bbdd605 100644 (file)
@@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
 /*
  * describe the PCAN-USB Pro adapter
  */
+static const struct can_bittiming_const pcan_usb_pro_const = {
+       .name = "pcan_usb_pro",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_pro = {
        .name = "PCAN-USB Pro",
        .device_id = PCAN_USBPRO_PRODUCT_ID,
@@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = {
        .clock = {
                .freq = PCAN_USBPRO_CRYSTAL_HZ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb_pro",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_pro_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
index c51014b..b52e0f6 100644 (file)
@@ -65,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
 obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
 obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
 obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
-obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
 obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
 obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
 obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
index a626c43..cfa3704 100644 (file)
@@ -801,6 +801,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
 
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
 {
+       if (pdata->phy_dev)
+               phy_disconnect(pdata->phy_dev);
+
        mdiobus_unregister(pdata->mdio_bus);
        mdiobus_free(pdata->mdio_bus);
        pdata->mdio_bus = NULL;
index 299eb43..a02ea7f 100644 (file)
@@ -1277,9 +1277,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
        mac_ops->tx_disable(pdata);
 
        xgene_enet_napi_del(pdata);
-       xgene_enet_mdio_remove(pdata);
-       xgene_enet_delete_desc_rings(pdata);
+       if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+               xgene_enet_mdio_remove(pdata);
        unregister_netdev(ndev);
+       xgene_enet_delete_desc_rings(pdata);
        pdata->port_ops->shutdown(pdata);
        free_netdev(ndev);
 
index 64c1e9d..09ff09f 100644 (file)
@@ -2126,6 +2126,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        int ret = 0;
        int timeout = 0;
        u32 reg;
+       u32 dma_ctrl;
+       int i;
 
        /* Disable TDMA to stop add more frames in TX DMA */
        reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@@ -2169,6 +2171,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
                ret = -ETIMEDOUT;
        }
 
+       dma_ctrl = 0;
+       for (i = 0; i < priv->hw_params->rx_queues; i++)
+               dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg &= ~dma_ctrl;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       dma_ctrl = 0;
+       for (i = 0; i < priv->hw_params->tx_queues; i++)
+               dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg &= ~dma_ctrl;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
        return ret;
 }
 
@@ -2820,8 +2836,6 @@ static void bcmgenet_timeout(struct net_device *dev)
 
        netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
 
-       bcmgenet_disable_tx_napi(priv);
-
        for (q = 0; q < priv->hw_params->tx_queues; q++)
                bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
        bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
@@ -2837,8 +2851,6 @@ static void bcmgenet_timeout(struct net_device *dev)
        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
        bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 
-       bcmgenet_enable_tx_napi(priv);
-
        dev->trans_start = jiffies;
 
        dev->stats.tx_errors++;
index 271bb58..b349e6f 100644 (file)
@@ -1778,7 +1778,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
                return ret;
 
        fep->mii_timeout = 0;
-       init_completion(&fep->mdio_done);
+       reinit_completion(&fep->mdio_done);
 
        /* start a read op */
        writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -1817,7 +1817,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                return ret;
 
        fep->mii_timeout = 0;
-       init_completion(&fep->mdio_done);
+       reinit_completion(&fep->mdio_done);
 
        /* start a write op */
        writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
index 605cc89..b1a4ea2 100644 (file)
@@ -1282,7 +1282,12 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
                }
        }
 
-       if (core_stats) {
+       if (!core_stats)
+               return stats_count;
+
+       if (nic_data->datapath_caps &
+                       1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
+               /* Use vadaptor stats. */
                core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
                                         stats[EF10_STAT_rx_multicast] +
                                         stats[EF10_STAT_rx_broadcast];
@@ -1302,6 +1307,26 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
                core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
                core_stats->rx_errors = core_stats->rx_crc_errors;
                core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+       } else {
+               /* Use port stats. */
+               core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
+               core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
+               core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
+               core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
+               core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
+               core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
+               core_stats->rx_length_errors =
+                               stats[EF10_STAT_port_rx_gtjumbo] +
+                               stats[EF10_STAT_port_rx_length_error];
+               core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
+               core_stats->rx_frame_errors =
+                               stats[EF10_STAT_port_rx_align_error];
+               core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
+               core_stats->rx_errors = (core_stats->rx_length_errors +
+                                        core_stats->rx_crc_errors +
+                                        core_stats->rx_frame_errors);
        }
 
        return stats_count;
index 1960b46..d7a6524 100644 (file)
@@ -290,6 +290,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
                return ERR_PTR(-EINVAL);
        }
 
+       /* propagate the fixed link values to struct phy_device */
+       phy->link = status->link;
+       if (status->link) {
+               phy->speed = status->speed;
+               phy->duplex = status->duplex;
+               phy->pause = status->pause;
+               phy->asym_pause = status->asym_pause;
+       }
+
        of_node_get(np);
        phy->dev.of_node = np;
 
index 1e1fbb0..34fe339 100644 (file)
@@ -1038,10 +1038,14 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
        int value = -1;
 
        if (phydrv->read_mmd_indirect == NULL) {
-               mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+               struct mii_bus *bus = phydev->bus;
+
+               mutex_lock(&bus->mdio_lock);
+               mmd_phy_indirect(bus, prtad, devad, addr);
 
                /* Read the content of the MMD's selected register */
-               value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
+               value = bus->read(bus, addr, MII_MMD_DATA);
+               mutex_unlock(&bus->mdio_lock);
        } else {
                value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
        }
@@ -1071,10 +1075,14 @@ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
        struct phy_driver *phydrv = phydev->drv;
 
        if (phydrv->write_mmd_indirect == NULL) {
-               mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+               struct mii_bus *bus = phydev->bus;
+
+               mutex_lock(&bus->mdio_lock);
+               mmd_phy_indirect(bus, prtad, devad, addr);
 
                /* Write the data into MMD's selected register */
-               phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
+               bus->write(bus, addr, MII_MMD_DATA, data);
+               mutex_unlock(&bus->mdio_lock);
        } else {
                phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
        }
index 0302483..55f0178 100644 (file)
@@ -176,7 +176,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
        if (c45_ids)
                dev->c45_ids = *c45_ids;
        dev->bus = bus;
-       dev->dev.parent = bus->parent;
+       dev->dev.parent = &bus->dev;
        dev->dev.bus = &mdio_bus_type;
        dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
        dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
index 3c86b10..e049857 100644 (file)
@@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
        struct driver_info      *info = dev->driver_info;
-       int                     retval, pm;
+       int                     retval, pm, mpn;
 
        clear_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_stop_queue (net);
@@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net)
 
        usbnet_purge_paused_rxq(dev);
 
+       mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
        /* deferred work (task, timer, softirq) must also stop.
         * can't flush_scheduled_work() until we drop rtnl (later),
         * else workers could deadlock; so make workers a NOP.
@@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net)
        if (!pm)
                usb_autopm_put_interface(dev->intf);
 
-       if (info->manage_power &&
-           !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+       if (info->manage_power && mpn)
                info->manage_power(dev, 0);
        else
                usb_autopm_put_interface(dev->intf);
index 34c519e..5bc4b1e 100644 (file)
@@ -2216,6 +2216,8 @@ static int vxlan_open(struct net_device *dev)
 
        if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
                ret = vxlan_igmp_join(vxlan);
+               if (ret == -EADDRINUSE)
+                       ret = 0;
                if (ret) {
                        vxlan_sock_release(vs);
                        return ret;
index b978bbf..f6ae0d0 100644 (file)
@@ -1108,7 +1108,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
 
 #define LEGACY_IO_RESOURCE     (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
 
-static void pci_msi_setup_pci_dev(struct pci_dev *dev)
+void pci_msi_setup_pci_dev(struct pci_dev *dev)
 {
        /*
         * Disable the MSI hardware to avoid screaming interrupts
index 7006860..2516769 100644 (file)
@@ -258,8 +258,7 @@ static int allocate_power(struct thermal_zone_device *tz,
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
-       req_power = devm_kcalloc(&tz->device, num_actors * 5,
-                                sizeof(*req_power), GFP_KERNEL);
+       req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
        if (!req_power) {
                ret = -ENOMEM;
                goto unlock;
index 7245611..94813af 100644 (file)
@@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
 
        switch (val) {
        case CPUFREQ_ADJUST:
-       case CPUFREQ_INCOMPATIBLE:
                pr_debug("min dma period: %d ps, "
                        "new clock %d kHz\n", pxafb_display_dma_period(var),
                        policy->max);
index 89dd7e0..dcf774c 100644 (file)
@@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
 
        switch (val) {
        case CPUFREQ_ADJUST:
-       case CPUFREQ_INCOMPATIBLE:
                dev_dbg(fbi->dev, "min dma period: %d ps, "
                        "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
                        policy->max);
index 59fc190..70fa438 100644 (file)
@@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void)
 
        return 0;
 err_unregister:
-       for_each_possible_cpu(i) {
-               struct acpi_processor_performance *perf;
-               perf = per_cpu_ptr(acpi_perf_data, i);
-               acpi_processor_unregister_performance(perf, i);
-       }
+       for_each_possible_cpu(i)
+               acpi_processor_unregister_performance(i);
+
 err_out:
        /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
        free_acpi_perf_data();
@@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void)
        kfree(acpi_ids_done);
        kfree(acpi_id_present);
        kfree(acpi_id_cst_present);
-       for_each_possible_cpu(i) {
-               struct acpi_processor_performance *perf;
-               perf = per_cpu_ptr(acpi_perf_data, i);
-               acpi_processor_unregister_performance(perf, i);
-       }
+       for_each_possible_cpu(i)
+               acpi_processor_unregister_performance(i);
+
        free_acpi_perf_data();
 }
 
index 518c629..5fa588e 100644 (file)
@@ -844,14 +844,15 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
        struct wb_iter iter;
 
        might_sleep();
-
-       if (!bdi_has_dirty_io(bdi))
-               return;
 restart:
        rcu_read_lock();
        bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
-               if (!wb_has_dirty_io(wb) ||
-                   (skip_if_busy && writeback_in_progress(wb)))
+               /* SYNC_ALL writes out I_DIRTY_TIME too */
+               if (!wb_has_dirty_io(wb) &&
+                   (base_work->sync_mode == WB_SYNC_NONE ||
+                    list_empty(&wb->b_dirty_time)))
+                       continue;
+               if (skip_if_busy && writeback_in_progress(wb))
                        continue;
 
                base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
@@ -899,8 +900,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 {
        might_sleep();
 
-       if (bdi_has_dirty_io(bdi) &&
-           (!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
+       if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
                base_work->auto_free = 0;
                base_work->single_wait = 0;
                base_work->single_done = 0;
@@ -2275,8 +2275,12 @@ void sync_inodes_sb(struct super_block *sb)
        };
        struct backing_dev_info *bdi = sb->s_bdi;
 
-       /* Nothing to do? */
-       if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
+       /*
+        * Can't skip on !bdi_has_dirty() because we should wait for !dirty
+        * inodes under writeback and I_DIRTY_TIME inodes ignored by
+        * bdi_has_dirty() need to be written out too.
+        */
+       if (bdi == &noop_backing_dev_info)
                return;
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
index 6b040f4..fcf9080 100644 (file)
@@ -147,6 +147,7 @@ struct acpi_pld_info {
  *        (Intended for BIOS use only)
  */
 #define ACPI_PLD_REV1_BUFFER_SIZE               16     /* For Revision 1 of the buffer (From ACPI spec) */
+#define ACPI_PLD_REV2_BUFFER_SIZE               20     /* For Revision 2 of the buffer (From ACPI spec) */
 #define ACPI_PLD_BUFFER_SIZE                    20     /* For Revision 2 of the buffer (From ACPI spec) */
 
 /* First 32-bit dword, bits 0:32 */
index 03aacfb..e11611c 100644 (file)
 
 #define ACPI_ROOT_TABLE_SIZE_INCREMENT  4
 
-/* Maximum number of While() loop iterations before forced abort */
-
-#define ACPI_MAX_LOOP_ITERATIONS        0xFFFF
-
 /* Maximum sleep allowed via Sleep() operator */
 
 #define ACPI_MAX_SLEEP                  2000   /* 2000 millisec == two seconds */
index 11c3a01..9f20eb4 100644 (file)
@@ -192,8 +192,9 @@ struct acpi_exception_info {
 #define AE_AML_BAD_RESOURCE_LENGTH      EXCEP_AML (0x001F)
 #define AE_AML_ILLEGAL_ADDRESS          EXCEP_AML (0x0020)
 #define AE_AML_INFINITE_LOOP            EXCEP_AML (0x0021)
+#define AE_AML_UNINITIALIZED_NODE       EXCEP_AML (0x0022)
 
-#define AE_CODE_AML_MAX                 0x0021
+#define AE_CODE_AML_MAX                 0x0022
 
 /*
  * Internal exceptions used for control
@@ -355,7 +356,9 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
        EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
                  "A memory, I/O, or PCI configuration address is invalid"),
        EXCEP_TXT("AE_AML_INFINITE_LOOP",
-                 "An apparent infinite AML While loop, method was aborted")
+                 "An apparent infinite AML While loop, method was aborted"),
+       EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
+                 "A namespace node is uninitialized or unresolved")
 };
 
 static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
index f56de8c..908d4f9 100644 (file)
@@ -88,7 +88,8 @@
 #define ACPI_LV_DEBUG_OBJECT        0x00000002
 #define ACPI_LV_INFO                0x00000004
 #define ACPI_LV_REPAIR              0x00000008
-#define ACPI_LV_ALL_EXCEPTIONS      0x0000000F
+#define ACPI_LV_TRACE_POINT         0x00000010
+#define ACPI_LV_ALL_EXCEPTIONS      0x0000001F
 
 /* Trace verbosity level 1 [Standard Trace Level] */
 
 #define ACPI_DB_DEBUG_OBJECT        ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
 #define ACPI_DB_INFO                ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
 #define ACPI_DB_REPAIR              ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
+#define ACPI_DB_TRACE_POINT         ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT)
 #define ACPI_DB_ALL_EXCEPTIONS      ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
 
 /* Trace level -- also used in the global "DebugLevel" */
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
+/*
+ * Global trace flags
+ */
+#define ACPI_TRACE_ENABLED          ((u32) 4)
+#define ACPI_TRACE_ONESHOT          ((u32) 2)
+#define ACPI_TRACE_OPCODE           ((u32) 1)
+
+/* Defaults for trace debugging level/layer */
+
+#define ACPI_TRACE_LEVEL_ALL        ACPI_LV_ALL
+#define ACPI_TRACE_LAYER_ALL        0x000001FF
+#define ACPI_TRACE_LEVEL_DEFAULT    ACPI_LV_TRACE_POINT
+#define ACPI_TRACE_LAYER_DEFAULT    ACPI_EXECUTER
+
 #if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
 /*
  * The module name is used primarily for error and debug messages.
 #define ACPI_DUMP_PATHNAME(a, b, c, d)  acpi_ns_dump_pathname(a, b, c, d)
 #define ACPI_DUMP_BUFFER(a, b)          acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
 
+#define ACPI_TRACE_POINT(a, b, c, d)    acpi_trace_point (a, b, c, d)
+
 #else                          /* ACPI_DEBUG_OUTPUT */
 /*
  * This is the non-debug case -- make everything go away,
 #define ACPI_DUMP_PATHNAME(a, b, c, d)
 #define ACPI_DUMP_BUFFER(a, b)
 #define ACPI_IS_DEBUG_ENABLED(level, component) 0
+#define ACPI_TRACE_POINT(a, b, c, d)
 
 /* Return macros must have a return statement at the minimum */
 
index 83061ca..5ba8fb6 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index ea6428b..29c6912 100644 (file)
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
index d02df0a..a54ad1c 100644 (file)
@@ -430,4 +430,10 @@ long acpi_os_get_file_offset(ACPI_FILE file);
 acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
 #endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
+void
+acpi_os_trace_point(acpi_trace_event_type type,
+                   u8 begin, u8 *aml, char *pathname);
+#endif
+
 #endif                         /* __ACPIOSXF_H__ */
index e8ec18a..c33eeab 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20150619
+#define ACPI_CA_VERSION                 0x20150818
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -251,7 +251,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
  * traced each time it is executed.
  */
 ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0);
-ACPI_INIT_GLOBAL(acpi_name, acpi_gbl_trace_method_name, 0);
+ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT);
 
 /*
  * Runtime configuration of debug output control masks. We want the debug
@@ -504,7 +506,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                                           acpi_object_handler handler,
                                           void **data))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-                            acpi_debug_trace(char *name, u32 debug_level,
+                            acpi_debug_trace(const char *name, u32 debug_level,
                                              u32 debug_layer, u32 flags))
 
 /*
@@ -907,9 +909,17 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6)
                                                     const char *module_name,
                                                     u32 component_id,
                                                     const char *format, ...))
+
+ACPI_DBG_DEPENDENT_RETURN_VOID(void
+                              acpi_trace_point(acpi_trace_event_type type,
+                                               u8 begin,
+                                               u8 *aml, char *pathname))
 ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
                                void ACPI_INTERNAL_VAR_XFACE
                                acpi_log_error(const char *format, ...))
+ acpi_status acpi_initialize_debugger(void);
+
+void acpi_terminate_debugger(void);
 
 /*
  * Divergences
index a948fc5..6e28f54 100644 (file)
@@ -1186,20 +1186,29 @@ enum acpi_spmi_interface_types {
  * December 19, 2014
  *
  * NOTE: There are two versions of the table with the same signature --
- * the client version and the server version.
+ * the client version and the server version. The common platform_class
+ * field is used to differentiate the two types of tables.
  *
  ******************************************************************************/
 
-struct acpi_table_tcpa_client {
+struct acpi_table_tcpa_hdr {
        struct acpi_table_header header;        /* Common ACPI table header */
        u16 platform_class;
+};
+
+/*
+ * Values for platform_class above.
+ * This is how the client and server subtables are differentiated
+ */
+#define ACPI_TCPA_CLIENT_TABLE          0
+#define ACPI_TCPA_SERVER_TABLE          1
+
+struct acpi_table_tcpa_client {
        u32 minimum_log_length; /* Minimum length for the event log area */
        u64 log_address;        /* Address of the event log area */
 };
 
 struct acpi_table_tcpa_server {
-       struct acpi_table_header header;        /* Common ACPI table header */
-       u16 platform_class;
        u16 reserved;
        u64 minimum_log_length; /* Minimum length for the event log area */
        u64 log_address;        /* Address of the event log area */
index c2a41d2..f914958 100644 (file)
@@ -662,6 +662,7 @@ typedef u32 acpi_object_type;
 #define ACPI_TYPE_DEBUG_OBJECT          0x10
 
 #define ACPI_TYPE_EXTERNAL_MAX          0x10
+#define ACPI_NUM_TYPES                  (ACPI_TYPE_EXTERNAL_MAX + 1)
 
 /*
  * These are object types that do not map directly to the ACPI
@@ -683,6 +684,7 @@ typedef u32 acpi_object_type;
 #define ACPI_TYPE_LOCAL_SCOPE           0x1B   /* 1 Name, multiple object_list Nodes */
 
 #define ACPI_TYPE_NS_NODE_MAX           0x1B   /* Last typecode used within a NS Node */
+#define ACPI_TOTAL_TYPES                (ACPI_TYPE_NS_NODE_MAX + 1)
 
 /*
  * These are special object types that never appear in
@@ -985,7 +987,8 @@ struct acpi_buffer {
  */
 #define ACPI_FULL_PATHNAME              0
 #define ACPI_SINGLE_NAME                1
-#define ACPI_NAME_TYPE_MAX              1
+#define ACPI_FULL_PATHNAME_NO_TRAILING  2
+#define ACPI_NAME_TYPE_MAX              2
 
 /*
  * Predefined Namespace items
@@ -1246,6 +1249,14 @@ struct acpi_memory_list {
 #endif
 };
 
+/* Definitions of trace event types */
+
+typedef enum {
+       ACPI_TRACE_AML_METHOD,
+       ACPI_TRACE_AML_OPCODE,
+       ACPI_TRACE_AML_REGION
+} acpi_trace_event_type;
+
 /* Definitions of _OSI support */
 
 #define ACPI_VENDOR_STRINGS                 0x01
index 3cedd43..ec00e2b 100644 (file)
 
 #ifdef ACPI_ASL_COMPILER
 #define ACPI_APPLICATION
-#define ACPI_DISASSEMBLER
 #define ACPI_DEBUG_OUTPUT
 #define ACPI_CONSTANT_EVAL_ONLY
 #define ACPI_LARGE_NAMESPACE_NODE
 #define ACPI_DATA_TABLE_DISASSEMBLY
 #define ACPI_SINGLE_THREADED
 #define ACPI_32BIT_PHYSICAL_ADDRESS
+
+#define ACPI_DISASSEMBLER 1
 #endif
 
 /* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -89,8 +90,8 @@
 #endif
 
 /*
- * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration.
- * All single threaded.
+ * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
+ * configuration. All single threaded.
  */
 #if (defined ACPI_BIN_APP)      || \
        (defined ACPI_DUMP_APP)     || \
 #define ACPI_USE_NATIVE_RSDP_POINTER
 #endif
 
-/* acpi_dump configuration. Native mapping used if provied by OSPMs */
+/* acpi_dump configuration. Native mapping used if provided by the host */
 
 #ifdef ACPI_DUMP_APP
 #define ACPI_USE_NATIVE_MEMORY_MAPPING
 #define ACPI_USE_LOCAL_CACHE
 #endif
 
-/* Common debug support */
+/* Common debug/disassembler support */
 
 #ifdef ACPI_FULL_DEBUG
-#define ACPI_DEBUGGER
 #define ACPI_DEBUG_OUTPUT
-#define ACPI_DISASSEMBLER
+#define ACPI_DEBUGGER 1
+#define ACPI_DISASSEMBLER 1
 #endif
 
 
  * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
  *      the standard header files may be used.
  *
- * The ACPICA subsystem only uses low level C library functions that do not call
- * operating system services and may therefore be inlined in the code.
+ * The ACPICA subsystem only uses low level C library functions that do not
+ * call operating system services and may therefore be inlined in the code.
  *
  * It may be necessary to tailor these include files to the target
  * generation environment.
index 0a7dc8e..2f296cb 100644 (file)
@@ -56,6 +56,9 @@
 #if defined(_LINUX) || defined(__linux__)
 #include <acpi/platform/aclinuxex.h>
 
+#elif defined(WIN32)
+#include "acwinex.h"
+
 #elif defined(_AED_EFI)
 #include "acefiex.h"
 
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
new file mode 100644 (file)
index 0000000..b647974
--- /dev/null
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Name: acmsvcex.h - Extra VC specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACMSVCEX_H__
+#define __ACMSVCEX_H__
+
+/* Debug support. */
+
+#ifdef _DEBUG
+#define _CRTDBG_MAP_ALLOC      /* Enables specific file/lineno for leaks */
+#include <crtdbg.h>
+#endif
+
+#endif                         /* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
new file mode 100644 (file)
index 0000000..6ed1d71
--- /dev/null
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Name: acwinex.h - Extra OS specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACWINEX_H__
+#define __ACWINEX_H__
+
+/* Windows uses VC */
+
+#endif                         /* __ACWINEX_H__ */
index 4188a4d..ff5f135 100644 (file)
@@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
 
 extern int acpi_processor_register_performance(struct acpi_processor_performance
                                               *performance, unsigned int cpu);
-extern void acpi_processor_unregister_performance(struct
-                                                 acpi_processor_performance
-                                                 *performance,
-                                                 unsigned int cpu);
+extern void acpi_processor_unregister_performance(unsigned int cpu);
 
 /* note: this locks both the calling module and the processor module
          if a _PPC object exists, rmmod is disallowed then */
@@ -318,6 +315,7 @@ int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 void acpi_processor_set_pdc(acpi_handle handle);
 
 /* in processor_throttling.c */
+#ifdef CONFIG_ACPI_CPU_FREQ_PSS
 int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
 extern int acpi_processor_set_throttling(struct acpi_processor *pr,
@@ -330,14 +328,59 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
                        unsigned long action);
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
+#else
+static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+{
+       return 0;
+}
+
+static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+{
+       return -ENODEV;
+}
+
+static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
+                                        int state, bool force)
+{
+       return -ENODEV;
+}
+
+static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+                       unsigned long action) {}
+
+static inline void acpi_processor_throttling_init(void) {}
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+
 /* in processor_idle.c */
+extern struct cpuidle_driver acpi_idle_driver;
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
 int acpi_processor_power_init(struct acpi_processor *pr);
 int acpi_processor_power_exit(struct acpi_processor *pr);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_hotplug(struct acpi_processor *pr);
-extern struct cpuidle_driver acpi_idle_driver;
+#else
+static inline int acpi_processor_power_init(struct acpi_processor *pr)
+{
+       return -ENODEV;
+}
+
+static inline int acpi_processor_power_exit(struct acpi_processor *pr)
+{
+       return -ENODEV;
+}
+
+static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+       return -ENODEV;
+}
 
-#ifdef CONFIG_PM_SLEEP
+static inline int acpi_processor_hotplug(struct acpi_processor *pr)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
+#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
 void acpi_processor_syscore_init(void);
 void acpi_processor_syscore_exit(void);
 #else
@@ -348,7 +391,7 @@ static inline void acpi_processor_syscore_exit(void) {}
 /* in processor_thermal.c */
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
 void acpi_thermal_cpufreq_init(void);
 void acpi_thermal_cpufreq_exit(void);
 #else
@@ -360,6 +403,6 @@ static inline void acpi_thermal_cpufreq_exit(void)
 {
        return;
 }
-#endif
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
 
 #endif
index d2445fa..7235c48 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -221,7 +217,7 @@ struct pci_dev;
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
-
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
 extern int ec_read(u8 addr, u8 *val);
index bde1e56..430efcb 100644 (file)
@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
        unsigned int            transition_latency;
 };
 
-struct cpufreq_real_policy {
+struct cpufreq_user_policy {
        unsigned int            min;    /* in kHz */
        unsigned int            max;    /* in kHz */
-       unsigned int            policy; /* see above */
-       struct cpufreq_governor *governor; /* see below */
 };
 
 struct cpufreq_policy {
@@ -88,7 +86,7 @@ struct cpufreq_policy {
        struct work_struct      update; /* if update_policy() needs to be
                                         * called, but you're in IRQ context */
 
-       struct cpufreq_real_policy      user_policy;
+       struct cpufreq_user_policy user_policy;
        struct cpufreq_frequency_table  *freq_table;
 
        struct list_head        policy_list;
@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
 
 /* Policy Notifiers  */
 #define CPUFREQ_ADJUST                 (0)
-#define CPUFREQ_INCOMPATIBLE           (1)
-#define CPUFREQ_NOTIFY                 (2)
-#define CPUFREQ_START                  (3)
-#define CPUFREQ_CREATE_POLICY          (4)
-#define CPUFREQ_REMOVE_POLICY          (5)
+#define CPUFREQ_NOTIFY                 (1)
+#define CPUFREQ_START                  (2)
+#define CPUFREQ_CREATE_POLICY          (3)
+#define CPUFREQ_REMOVE_POLICY          (4)
 
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +575,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
 int cpufreq_boost_trigger_state(int state);
 int cpufreq_boost_supported(void);
 int cpufreq_boost_enabled(void);
+int cpufreq_enable_boost_support(void);
+bool policy_has_boost_freq(struct cpufreq_policy *policy);
 #else
 static inline int cpufreq_boost_trigger_state(int state)
 {
@@ -591,12 +590,23 @@ static inline int cpufreq_boost_enabled(void)
 {
        return 0;
 }
+
+static inline int cpufreq_enable_boost_support(void)
+{
+       return -EINVAL;
+}
+
+static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+       return false;
+}
 #endif
 /* the following funtion is for cpufreq core use only */
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
 extern struct freq_attr *cpufreq_generic_attr[];
 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
                                      struct cpufreq_frequency_table *table);
index a2b4ea7..9d212fe 100644 (file)
@@ -959,6 +959,8 @@ extern int __must_check device_add(struct device *dev);
 extern void device_del(struct device *dev);
 extern int device_for_each_child(struct device *dev, void *data,
                     int (*fn)(struct device *dev, void *data));
+extern int device_for_each_child_reverse(struct device *dev, void *data,
+                    int (*fn)(struct device *dev, void *data));
 extern struct device *device_find_child(struct device *dev, void *data,
                                int (*match)(struct device *dev, void *data));
 extern int device_rename(struct device *dev, const char *new_name);
index 61e5b72..953f283 100644 (file)
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
 extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
                                 struct klist_node *n);
 extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_prev(struct klist_iter *i);
 extern struct klist_node *klist_next(struct klist_iter *i);
 
 #endif
index edc068d..2194b8c 100644 (file)
@@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
 
 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
 {
-       return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+       return is_of_node(fwnode) ?
+               container_of(fwnode, struct device_node, fwnode) : NULL;
 }
 
 static inline bool of_have_populated_dt(void)
index 8a0321a..860c751 100644 (file)
@@ -1202,6 +1202,7 @@ struct msix_entry {
        u16     entry;  /* driver uses to specify entry, OS writes */
 };
 
+void pci_msi_setup_pci_dev(struct pci_dev *dev);
 
 #ifdef CONFIG_PCI_MSI
 int pci_msi_vec_count(struct pci_dev *dev);
index cec2d45..cab7ba5 100644 (file)
@@ -30,7 +30,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
 
 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
 
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
+
 int dev_pm_opp_get_opp_count(struct device *dev);
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
                                              unsigned long freq,
@@ -62,11 +65,21 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
        return 0;
 }
 
+static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+       return false;
+}
+
 static inline int dev_pm_opp_get_opp_count(struct device *dev)
 {
        return 0;
 }
 
+static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+       return 0;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
                                        unsigned long freq, bool available)
 {
@@ -115,6 +128,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
 int of_init_opp_table(struct device *dev);
 void of_free_opp_table(struct device *dev);
+int of_cpumask_init_opp_table(cpumask_var_t cpumask);
+void of_cpumask_free_opp_table(cpumask_var_t cpumask);
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
 #else
 static inline int of_init_opp_table(struct device *dev)
 {
@@ -124,6 +141,25 @@ static inline int of_init_opp_table(struct device *dev)
 static inline void of_free_opp_table(struct device *dev)
 {
 }
+
+static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+       return -ENOSYS;
+}
+
+static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+}
+
+static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+       return -ENOSYS;
+}
+
+static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+       return -ENOSYS;
+}
 #endif
 
 #endif         /* __LINUX_OPP_H__ */
index 7b3ae0c..0f65d36 100644 (file)
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+int dev_pm_qos_expose_latency_tolerance(struct device *dev);
+void dev_pm_qos_hide_latency_tolerance(struct device *dev);
 
 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
 {
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
                        { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
 static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
                        { return 0; }
+static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+                       { return 0; }
+static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
 
 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
index 89b485a..d74cf7a 100644 (file)
@@ -323,6 +323,47 @@ static struct klist_node *to_klist_node(struct list_head *n)
        return container_of(n, struct klist_node, n_node);
 }
 
+/**
+ * klist_prev - Ante up prev node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the prev node, increment its reference
+ * count, drop the lock, and return that prev node.
+ */
+struct klist_node *klist_prev(struct klist_iter *i)
+{
+       void (*put)(struct klist_node *) = i->i_klist->put;
+       struct klist_node *last = i->i_cur;
+       struct klist_node *prev;
+
+       spin_lock(&i->i_klist->k_lock);
+
+       if (last) {
+               prev = to_klist_node(last->n_node.prev);
+               if (!klist_dec_and_del(last))
+                       put = NULL;
+       } else
+               prev = to_klist_node(i->i_klist->k_list.prev);
+
+       i->i_cur = NULL;
+       while (prev != to_klist_node(&i->i_klist->k_list)) {
+               if (likely(!knode_dead(prev))) {
+                       kref_get(&prev->n_ref);
+                       i->i_cur = prev;
+                       break;
+               }
+               prev = to_klist_node(prev->n_node.prev);
+       }
+
+       spin_unlock(&i->i_klist->k_lock);
+
+       if (put && last)
+               put(last);
+       return i->i_cur;
+}
+EXPORT_SYMBOL_GPL(klist_prev);
+
 /**
  * klist_next - Ante up next node in list.
  * @i: Iterator structure.
index a38d3ac..69f4f68 100644 (file)
@@ -361,6 +361,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
        struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
 
        ip6gre_tunnel_unlink(ign, t);
+       ip6_tnl_dst_reset(t);
        dev_put(dev);
 }
 
index b397f0a..83a7068 100644 (file)
@@ -219,7 +219,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
 #define BROADCAST_ONE          1
 #define BROADCAST_REGISTERED   2
 #define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
+static int pfkey_broadcast(struct sk_buff *skb,
                           int broadcast_flags, struct sock *one_sk,
                           struct net *net)
 {
@@ -244,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                 * socket.
                 */
                if (pfk->promisc)
-                       pfkey_broadcast_one(skb, &skb2, allocation, sk);
+                       pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
                /* the exact target will be processed later */
                if (sk == one_sk)
@@ -259,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                                continue;
                }
 
-               err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk);
+               err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
                /* Error is cleare after succecful sending to at least one
                 * registered KM */
@@ -269,7 +269,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
        rcu_read_unlock();
 
        if (one_sk != NULL)
-               err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+               err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
 
        kfree_skb(skb2);
        kfree_skb(skb);
@@ -292,7 +292,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
                hdr = (struct sadb_msg *) pfk->dump.skb->data;
                hdr->sadb_msg_seq = 0;
                hdr->sadb_msg_errno = rc;
-               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
                pfk->dump.skb = NULL;
        }
@@ -333,7 +333,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
                             sizeof(uint64_t));
 
-       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
+       pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
 
        return 0;
 }
@@ -1365,7 +1365,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
 
        xfrm_state_put(x);
 
-       pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
+       pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
 
        return 0;
 }
@@ -1452,7 +1452,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
        hdr->sadb_msg_seq = c->seq;
        hdr->sadb_msg_pid = c->portid;
 
-       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
+       pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
 
        return 0;
 }
@@ -1565,7 +1565,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
        out_hdr->sadb_msg_reserved = 0;
        out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
        out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+       pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
 
        return 0;
 }
@@ -1670,7 +1670,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
                return -ENOBUFS;
        }
 
-       pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk));
+       pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
 
        return 0;
 }
@@ -1689,7 +1689,7 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 
-       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+       return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
 }
 
 static int key_notify_sa_flush(const struct km_event *c)
@@ -1710,7 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c)
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        hdr->sadb_msg_reserved = 0;
 
-       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+       pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
 
        return 0;
 }
@@ -1767,7 +1767,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
        out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
-               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
        pfk->dump.skb = out_skb;
 
@@ -1847,7 +1847,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
                new_hdr->sadb_msg_errno = 0;
        }
 
-       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+       pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
        return 0;
 }
 
@@ -2181,7 +2181,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = c->seq;
        out_hdr->sadb_msg_pid = c->portid;
-       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
+       pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
        return 0;
 
 }
@@ -2401,7 +2401,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
        out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
+       pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
        err = 0;
 
 out:
@@ -2655,7 +2655,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
        out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
-               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
        pfk->dump.skb = out_skb;
 
@@ -2708,7 +2708,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        hdr->sadb_msg_reserved = 0;
-       pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+       pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
        return 0;
 
 }
@@ -2770,7 +2770,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
        void *ext_hdrs[SADB_EXT_MAX];
        int err;
 
-       pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+       pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
                        BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
 
        memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -2992,7 +2992,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
        out_hdr->sadb_msg_seq = 0;
        out_hdr->sadb_msg_pid = 0;
 
-       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+       pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
        return 0;
 }
 
@@ -3182,7 +3182,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
                       xfrm_ctx->ctx_len);
        }
 
-       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+       return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
 }
 
 static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3380,7 +3380,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
        n_port->sadb_x_nat_t_port_port = sport;
        n_port->sadb_x_nat_t_port_reserved = 0;
 
-       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+       return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
 }
 
 #ifdef CONFIG_NET_KEY_MIGRATE
@@ -3572,7 +3572,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
        }
 
        /* broadcast migrate message to sockets */
-       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
+       pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
 
        return 0;
 
index 67d2104..a774985 100644 (file)
@@ -2401,7 +2401,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
         * sendmsg(), but that's what we've got...
         */
        if (netlink_tx_is_mmaped(sk) &&
-           msg->msg_iter.type == ITER_IOVEC &&
+           iter_is_iovec(&msg->msg_iter) &&
            msg->msg_iter.nr_segs == 1 &&
            msg->msg_iter.iov->iov_base == NULL) {
                err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
index cab9e9b..4fbb674 100644 (file)
@@ -490,6 +490,19 @@ static bool u32_destroy(struct tcf_proto *tp, bool force)
                                        return false;
                        }
                }
+
+               if (tp_c->refcnt > 1)
+                       return false;
+
+               if (tp_c->refcnt == 1) {
+                       struct tc_u_hnode *ht;
+
+                       for (ht = rtnl_dereference(tp_c->hlist);
+                            ht;
+                            ht = rtnl_dereference(ht->next))
+                               if (!ht_empty(ht))
+                                       return false;
+               }
        }
 
        if (root_ht && --root_ht->refcnt == 0)
index 06320c8..a655ddc 100644 (file)
@@ -3132,11 +3132,18 @@ bool sctp_verify_asconf(const struct sctp_association *asoc,
                case SCTP_PARAM_IPV4_ADDRESS:
                        if (length != sizeof(sctp_ipv4addr_param_t))
                                return false;
+                       /* ensure there is only one addr param and it's in the
+                        * beginning of addip_hdr params, or we reject it.
+                        */
+                       if (param.v != addip->addip_hdr.params)
+                               return false;
                        addr_param_seen = true;
                        break;
                case SCTP_PARAM_IPV6_ADDRESS:
                        if (length != sizeof(sctp_ipv6addr_param_t))
                                return false;
+                       if (param.v != addip->addip_hdr.params)
+                               return false;
                        addr_param_seen = true;
                        break;
                case SCTP_PARAM_ADD_IP:
index fef2acd..85e6f03 100644 (file)
@@ -702,7 +702,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
         * outstanding data and rely on the retransmission limit be reached
         * to shutdown the association.
         */
-       if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+       if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
                t->asoc->overall_error_count = 0;
 
        /* Clear the hb_sent flag to signal that we had a good
index 595fffa..9942836 100644 (file)
@@ -380,8 +380,8 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
                return 0;
 
        if (!initxattrs)
-               return call_int_hook(inode_init_security, 0, inode, dir, qstr,
-                                                        NULL, NULL, NULL);
+               return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
+                                    dir, qstr, NULL, NULL, NULL);
        memset(new_xattrs, 0, sizeof(new_xattrs));
        lsm_xattr = new_xattrs;
        ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
@@ -409,8 +409,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
 {
        if (unlikely(IS_PRIVATE(inode)))
                return -EOPNOTSUPP;
-       return call_int_hook(inode_init_security, 0, inode, dir, qstr,
-                               name, value, len);
+       return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
+                            qstr, name, value, len);
 }
 EXPORT_SYMBOL(security_old_inode_init_security);
 
@@ -1281,7 +1281,8 @@ int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
 
 int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
 {
-       return call_int_hook(socket_getpeersec_dgram, 0, sock, skb, secid);
+       return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
+                            skb, secid);
 }
 EXPORT_SYMBOL(security_socket_getpeersec_dgram);
 
index 5de3c5d..d1a2cb6 100644 (file)
@@ -3172,7 +3172,7 @@ static int add_std_chmaps(struct hda_codec *codec)
                        struct snd_pcm_chmap *chmap;
                        const struct snd_pcm_chmap_elem *elem;
 
-                       if (!pcm || pcm->own_chmap ||
+                       if (!pcm || !pcm->pcm || pcm->own_chmap ||
                            !hinfo->substreams)
                                continue;
                        elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
index b077bb6..24f9111 100644 (file)
@@ -671,7 +671,8 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
                }
                for (i = 0; i < path->depth; i++) {
                        if (path->path[i] == nid) {
-                               if (dir == HDA_OUTPUT || path->idx[i] == idx)
+                               if (dir == HDA_OUTPUT || idx == -1 ||
+                                   path->idx[i] == idx)
                                        return true;
                                break;
                        }
@@ -682,7 +683,7 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
 
 /* check whether the NID is referred by any active paths */
 #define is_active_nid_for_any(codec, nid) \
-       is_active_nid(codec, nid, HDA_OUTPUT, 0)
+       is_active_nid(codec, nid, HDA_OUTPUT, -1)
 
 /* get the default amp value for the target state */
 static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
@@ -883,8 +884,7 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
        struct hda_gen_spec *spec = codec->spec;
        int i;
 
-       if (!enable)
-               path->active = false;
+       path->active = enable;
 
        /* make sure the widget is powered up */
        if (enable && (spec->power_down_unused || codec->power_save_node))
@@ -902,9 +902,6 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
                if (has_amp_out(codec, path, i))
                        activate_amp_out(codec, path, i, enable);
        }
-
-       if (enable)
-               path->active = true;
 }
 EXPORT_SYMBOL_GPL(snd_hda_activate_path);
 
index f788a91..ca03c40 100644 (file)
@@ -200,12 +200,33 @@ static int cx_auto_init(struct hda_codec *codec)
        return 0;
 }
 
-#define cx_auto_free   snd_hda_gen_free
+static void cx_auto_reboot_notify(struct hda_codec *codec)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       if (codec->core.vendor_id != 0x14f150f2)
+               return;
+
+       /* Turn the CX20722 codec into D3 to avoid spurious noises
+          from the internal speaker during (and after) reboot */
+       cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+
+       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+       snd_hda_codec_write(codec, codec->core.afg, 0,
+                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+}
+
+static void cx_auto_free(struct hda_codec *codec)
+{
+       cx_auto_reboot_notify(codec);
+       snd_hda_gen_free(codec);
+}
 
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = cx_auto_init,
+       .reboot_notify = cx_auto_reboot_notify,
        .free = cx_auto_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
index 754e689..00ebc0c 100644 (file)
@@ -1268,6 +1268,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
 
+       case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
        case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
                if (fp->altsetting == 3)
index 3d1537b..e882c83 100644 (file)
 # as published by the Free Software Foundation; version 2
 # of the License.
 
-OUTPUT=./
-ifeq ("$(origin O)", "command line")
-       OUTPUT := $(O)/
-endif
-
-ifneq ($(OUTPUT),)
-# check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
-$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
-endif
-
-SUBDIRS = tools/ec
-
-# --- CONFIGURATION BEGIN ---
-
-# Set the following to `true' to make a unstripped, unoptimized
-# binary. Leave this set to `false' for production use.
-DEBUG ?=       true
-
-# make the build silent. Set this to something else to make it noisy again.
-V ?=           false
-
-# Prefix to the directories we're installing to
-DESTDIR ?=
-
-# --- CONFIGURATION END ---
-
-# Directory definitions. These are default and most probably
-# do not need to be changed. Please note that DESTDIR is
-# added in front of any of them
-
-bindir ?=      /usr/bin
-sbindir ?=     /usr/sbin
-mandir ?=      /usr/man
-
-# Toolchain: what tools do we use, and what options do they need:
-
-INSTALL = /usr/bin/install -c
-INSTALL_PROGRAM = ${INSTALL}
-INSTALL_DATA  = ${INSTALL} -m 644
-INSTALL_SCRIPT = ${INSTALL_PROGRAM}
-
-# If you are running a cross compiler, you may want to set this
-# to something more interesting, like "arm-linux-".  If you want
-# to compile vs uClibc, that can be done here as well.
-CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
-CC = $(CROSS)gcc
-LD = $(CROSS)gcc
-STRIP = $(CROSS)strip
-HOSTCC = gcc
-
-# check if compiler option is supported
-cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
-
-# use '-Os' optimization if available, else use -O2
-OPTIMIZATION := $(call cc-supports,-Os,-O2)
-
-WARNINGS := -Wall
-WARNINGS += $(call cc-supports,-Wstrict-prototypes)
-WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
-
-KERNEL_INCLUDE := ../../../include
-ACPICA_INCLUDE := ../../../drivers/acpi/acpica
-CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
-CFLAGS += $(WARNINGS)
-
-ifeq ($(strip $(V)),false)
-       QUIET=@
-       ECHO=@echo
-else
-       QUIET=
-       ECHO=@\#
-endif
-export QUIET ECHO
-
-# if DEBUG is enabled, then we do not strip or optimize
-ifeq ($(strip $(DEBUG)),true)
-       CFLAGS += -O1 -g -DDEBUG
-       STRIPCMD = /bin/true -Since_we_are_debugging
-else
-       CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
-       STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
-endif
-
-# --- ACPIDUMP BEGIN ---
-
-vpath %.c \
-       ../../../drivers/acpi/acpica\
-       tools/acpidump\
-       common\
-       os_specific/service_layers
-
-CFLAGS += -DACPI_DUMP_APP -Itools/acpidump
-
-DUMP_OBJS = \
-       apdump.o\
-       apfiles.o\
-       apmain.o\
-       osunixdir.o\
-       osunixmap.o\
-       osunixxf.o\
-       tbprint.o\
-       tbxfroot.o\
-       utbuffer.o\
-       utdebug.o\
-       utexcep.o\
-       utglobal.o\
-       utmath.o\
-       utprint.o\
-       utstring.o\
-       utxferror.o\
-       oslibcfs.o\
-       oslinuxtbl.o\
-       cmfsize.o\
-       getopt.o
-
-DUMP_OBJS := $(addprefix $(OUTPUT)tools/acpidump/,$(DUMP_OBJS))
-
-$(OUTPUT)acpidump: $(DUMP_OBJS)
-       $(ECHO) "  LD      " $@
-       $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(DUMP_OBJS) -L$(OUTPUT) -o $@
-       $(QUIET) $(STRIPCMD) $@
-
-$(OUTPUT)tools/acpidump/%.o: %.c
-       $(ECHO) "  CC      " $@
-       $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
-
-# --- ACPIDUMP END ---
-
-all: $(OUTPUT)acpidump
-       echo $(OUTPUT)
-
-clean:
-       -find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
-        | xargs rm -f
-       -rm -f $(OUTPUT)acpidump
-
-install-tools:
-       $(INSTALL) -d $(DESTDIR)${sbindir}
-       $(INSTALL_PROGRAM) $(OUTPUT)acpidump $(DESTDIR)${sbindir}
-
-install-man:
-       $(INSTALL_DATA) -D man/acpidump.8 $(DESTDIR)${mandir}/man8/acpidump.8
-
-install: all install-tools install-man
-
-uninstall:
-       - rm -f $(DESTDIR)${sbindir}/acpidump
-       - rm -f $(DESTDIR)${mandir}/man8/acpidump.8
-
-.PHONY: all utils install-tools install-man install uninstall clean
+include ../../scripts/Makefile.include
+
+all: acpidump ec
+clean: acpidump_clean ec_clean
+install: acpidump_install ec_install
+uninstall: acpidump_uninstall ec_uninstall
+
+acpidump ec: FORCE
+       $(call descend,tools/$@,all)
+acpidump_clean ec_clean:
+       $(call descend,tools/$(@:_clean=),clean)
+acpidump_install ec_install:
+       $(call descend,tools/$(@:_install=),install)
+acpidump_uninstall ec_uninstall:
+       $(call descend,tools/$(@:_uninstall=),uninstall)
+
+.PHONY: FORCE
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
new file mode 100644 (file)
index 0000000..552af68
--- /dev/null
@@ -0,0 +1,92 @@
+# tools/power/acpi/Makefile.config - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+include ../../../../scripts/Makefile.include
+
+OUTPUT=./
+ifeq ("$(origin O)", "command line")
+       OUTPUT := $(O)/
+endif
+
+ifneq ($(OUTPUT),)
+# check that the output directory actually exists
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+endif
+
+# --- CONFIGURATION BEGIN ---
+
+# Set the following to `true' to make a unstripped, unoptimized
+# binary. Leave this set to `false' for production use.
+DEBUG ?=       true
+
+# make the build silent. Set this to something else to make it noisy again.
+V ?=           false
+
+# Prefix to the directories we're installing to
+DESTDIR ?=
+
+# --- CONFIGURATION END ---
+
+# Directory definitions. These are default and most probably
+# do not need to be changed. Please note that DESTDIR is
+# added in front of any of them
+
+bindir ?=      /usr/bin
+sbindir ?=     /usr/sbin
+mandir ?=      /usr/man
+
+# Toolchain: what tools do we use, and what options do they need:
+
+INSTALL = /usr/bin/install -c
+INSTALL_PROGRAM = ${INSTALL}
+INSTALL_DATA  = ${INSTALL} -m 644
+INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+
+# If you are running a cross compiler, you may want to set this
+# to something more interesting, like "arm-linux-".  If you want
+# to compile vs uClibc, that can be done here as well.
+CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
+CC = $(CROSS)gcc
+LD = $(CROSS)gcc
+STRIP = $(CROSS)strip
+HOSTCC = gcc
+
+# check if compiler option is supported
+cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+
+# use '-Os' optimization if available, else use -O2
+OPTIMIZATION := $(call cc-supports,-Os,-O2)
+
+WARNINGS := -Wall
+WARNINGS += $(call cc-supports,-Wstrict-prototypes)
+WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
+
+KERNEL_INCLUDE := ../../../include
+ACPICA_INCLUDE := ../../../drivers/acpi/acpica
+CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
+CFLAGS += $(WARNINGS)
+
+ifeq ($(strip $(V)),false)
+       QUIET=@
+       ECHO=@echo
+else
+       QUIET=
+       ECHO=@\#
+endif
+
+# if DEBUG is enabled, then we do not strip or optimize
+ifeq ($(strip $(DEBUG)),true)
+       CFLAGS += -O1 -g -DDEBUG
+       STRIPCMD = /bin/true -Since_we_are_debugging
+else
+       CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
+       STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
+endif
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
new file mode 100644 (file)
index 0000000..ec87a9e
--- /dev/null
@@ -0,0 +1,37 @@
+# tools/power/acpi/Makefile.rules - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE
+       $(ECHO) "  LD      " $@
+       $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@
+       $(QUIET) $(STRIPCMD) $@
+
+$(OUTPUT)%.o: %.c
+       $(ECHO) "  CC      " $@
+       $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
+
+all: $(OUTPUT)$(TOOL)
+clean:
+       -find $(OUTPUT) \( -not -type d \) \
+       -and \( -name '*~' -o -name '*.[oas]' \) \
+       -type f -print \
+        | xargs rm -f
+       -rm -f $(OUTPUT)$(TOOL)
+
+install-tools:
+       $(INSTALL) -d $(DESTDIR)${sbindir}
+       $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir}
+uninstall-tools:
+       - rm -f $(DESTDIR)${sbindir}/$(TOOL)
+
+install: all install-tools $(EXTRA_INSTALL)
+uninstall: uninstall-tools $(EXTRA_UNINSTALL)
+
+.PHONY: FORCE
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
new file mode 100644 (file)
index 0000000..8d76157
--- /dev/null
@@ -0,0 +1,53 @@
+# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+include ../../Makefile.config
+
+TOOL = acpidump
+EXTRA_INSTALL = install-man
+EXTRA_UNINSTALL = uninstall-man
+
+vpath %.c \
+       ../../../../../drivers/acpi/acpica\
+       ./\
+       ../../common\
+       ../../os_specific/service_layers
+CFLAGS += -DACPI_DUMP_APP -I.\
+       -I../../../../../drivers/acpi/acpica\
+       -I../../../../../include
+TOOL_OBJS = \
+       apdump.o\
+       apfiles.o\
+       apmain.o\
+       osunixdir.o\
+       osunixmap.o\
+       osunixxf.o\
+       tbprint.o\
+       tbxfroot.o\
+       utbuffer.o\
+       utdebug.o\
+       utexcep.o\
+       utglobal.o\
+       utmath.o\
+       utnonansi.o\
+       utprint.o\
+       utstring.o\
+       utxferror.o\
+       oslibcfs.o\
+       oslinuxtbl.o\
+       cmfsize.o\
+       getopt.o
+
+include ../../Makefile.rules
+
+install-man: ../../man/acpidump.8
+       $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8
+uninstall-man:
+       - rm -f $(DESTDIR)${mandir}/man8/acpidump.8
index b7b0b92..75d8a12 100644 (file)
@@ -1,22 +1,17 @@
-ec_access: ec_access.o
-       $(ECHO) "  LD      " $@
-       $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $< -o $@
-       $(QUIET) $(STRIPCMD) $@
+# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
 
-%.o: %.c
-       $(ECHO) "  CC      " $@
-       $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
+include ../../Makefile.config
 
-all: ec_access
+TOOL = ec
+TOOL_OBJS = \
+       ec_access.o
 
-install:
-       $(INSTALL) -d $(DESTDIR)${sbindir}
-       $(INSTALL_PROGRAM) ec_access $(DESTDIR)${sbindir}
-
-uninstall:
-       - rm -f $(DESTDIR)${sbindir}/ec_access
-
-clean:
-       -rm -f $(OUTPUT)ec_access
-
-.PHONY: all install uninstall
+include ../../Makefile.rules