Merge branch 'x86/core' into tracing/ftrace
authorIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 09:16:17 +0000 (10:16 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 09:17:48 +0000 (10:17 +0100)
Semantic merge:

  kernel/trace/trace_functions_graph.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
150 files changed:
Documentation/ABI/testing/debugfs-kmemtrace [new file with mode: 0644]
Documentation/ftrace.txt
Documentation/kernel-parameters.txt
Documentation/lockdep-design.txt
Documentation/sysrq.txt
Documentation/tracepoints.txt
Documentation/vm/kmemtrace.txt [new file with mode: 0644]
MAINTAINERS
arch/Kconfig
arch/alpha/include/asm/ftrace.h [new file with mode: 0644]
arch/alpha/include/asm/hardirq.h
arch/avr32/include/asm/ftrace.h [new file with mode: 0644]
arch/avr32/include/asm/hardirq.h
arch/blackfin/include/asm/ftrace.h [new file with mode: 0644]
arch/cris/include/asm/ftrace.h [new file with mode: 0644]
arch/h8300/include/asm/ftrace.h [new file with mode: 0644]
arch/ia64/Kconfig
arch/ia64/include/asm/ftrace.h [new file with mode: 0644]
arch/ia64/include/asm/hardirq.h
arch/ia64/kernel/Makefile
arch/ia64/kernel/entry.S
arch/ia64/kernel/ftrace.c [new file with mode: 0644]
arch/ia64/kernel/ia64_ksyms.c
arch/m68k/include/asm/ftrace.h [new file with mode: 0644]
arch/mips/include/asm/ftrace.h [new file with mode: 0644]
arch/parisc/include/asm/ftrace.h [new file with mode: 0644]
arch/um/include/asm/ftrace.h [new file with mode: 0644]
arch/x86/Kconfig
arch/x86/include/asm/cacheflush.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/ptrace-abi.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/process.c
arch/x86/kvm/Kconfig
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/xtensa/include/asm/ftrace.h [new file with mode: 0644]
block/Kconfig
block/Makefile
block/blktrace.c [deleted file]
drivers/char/sysrq.c
drivers/oprofile/cpu_buffer.c
fs/partitions/check.c
include/asm-frv/ftrace.h [new file with mode: 0644]
include/asm-generic/vmlinux.lds.h
include/asm-m32r/ftrace.h [new file with mode: 0644]
include/asm-mn10300/ftrace.h [new file with mode: 0644]
include/linux/blktrace_api.h
include/linux/ftrace.h
include/linux/ftrace_irq.h
include/linux/hardirq.h
include/linux/kernel.h
include/linux/lockdep.h
include/linux/memory.h
include/linux/module.h
include/linux/mutex.h
include/linux/ring_buffer.h
include/linux/sched.h
include/linux/slab_def.h
include/linux/slob_def.h
include/linux/slub_def.h
include/linux/string.h
include/linux/timer.h
include/linux/trace_clock.h [new file with mode: 0644]
include/linux/tracepoint.h
include/trace/block.h
include/trace/irq.h [new file with mode: 0644]
include/trace/irq_event_types.h [new file with mode: 0644]
include/trace/kmemtrace.h [new file with mode: 0644]
include/trace/lockdep.h [new file with mode: 0644]
include/trace/lockdep_event_types.h [new file with mode: 0644]
include/trace/power.h [new file with mode: 0644]
include/trace/sched.h
include/trace/sched_event_types.h [new file with mode: 0644]
include/trace/trace_event_types.h [new file with mode: 0644]
include/trace/trace_events.h [new file with mode: 0644]
include/trace/workqueue.h [new file with mode: 0644]
init/Kconfig
init/main.c
kernel/extable.c
kernel/irq/handle.c
kernel/kprobes.c
kernel/lockdep.c
kernel/lockdep_internals.h
kernel/lockdep_proc.c
kernel/lockdep_states.h [new file with mode: 0644]
kernel/module.c
kernel/mutex-debug.c
kernel/mutex-debug.h
kernel/mutex.c
kernel/mutex.h
kernel/relay.c
kernel/sched.c
kernel/sched_clock.c
kernel/sched_features.h
kernel/softirq.c
kernel/timer.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/blktrace.c [new file with mode: 0644]
kernel/trace/events.c [new file with mode: 0644]
kernel/trace/ftrace.c
kernel/trace/kmemtrace.c [new file with mode: 0644]
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c
kernel/trace/trace_branch.c
kernel/trace/trace_clock.c [new file with mode: 0644]
kernel/trace/trace_event_types.h [new file with mode: 0644]
kernel/trace/trace_events.c [new file with mode: 0644]
kernel/trace/trace_events_stage_1.h [new file with mode: 0644]
kernel/trace/trace_events_stage_2.h [new file with mode: 0644]
kernel/trace/trace_events_stage_3.h [new file with mode: 0644]
kernel/trace/trace_export.c [new file with mode: 0644]
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_hw_branches.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_mmiotrace.c
kernel/trace/trace_nop.c
kernel/trace/trace_output.c [new file with mode: 0644]
kernel/trace/trace_output.h [new file with mode: 0644]
kernel/trace/trace_power.c
kernel/trace/trace_printk.c [new file with mode: 0644]
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_stat.c [new file with mode: 0644]
kernel/trace/trace_stat.h [new file with mode: 0644]
kernel/trace/trace_sysprof.c
kernel/trace/trace_workqueue.c [new file with mode: 0644]
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/vsprintf.c
mm/memory.c
mm/page_alloc.c
mm/slab.c
mm/slob.c
mm/slub.c
mm/vmscan.c
samples/tracepoints/tp-samples-trace.h
scripts/Makefile.build
scripts/recordmcount.pl

diff --git a/Documentation/ABI/testing/debugfs-kmemtrace b/Documentation/ABI/testing/debugfs-kmemtrace
new file mode 100644 (file)
index 0000000..5e6a92a
--- /dev/null
@@ -0,0 +1,71 @@
+What:          /sys/kernel/debug/kmemtrace/
+Date:          July 2008
+Contact:       Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
+Description:
+
+In kmemtrace-enabled kernels, the following files are created:
+
+/sys/kernel/debug/kmemtrace/
+       cpu<n>          (0400)  Per-CPU tracing data, see below. (binary)
+       total_overruns  (0400)  Total number of bytes which were dropped from
+                               cpu<n> files because of full buffer condition,
+                               non-binary. (text)
+       abi_version     (0400)  Kernel's kmemtrace ABI version. (text)
+
+Each per-CPU file should be read according to the relay interface. That is,
+the reader should set affinity to that specific CPU and, as currently done by
+the userspace application (though there are other methods), use poll() with
+an infinite timeout before every read(). Otherwise, erroneous data may be
+read. The binary data has the following _core_ format:
+
+       Event ID        (1 byte)        Unsigned integer, one of:
+               0 - represents an allocation (KMEMTRACE_EVENT_ALLOC)
+               1 - represents a freeing of previously allocated memory
+                   (KMEMTRACE_EVENT_FREE)
+       Type ID         (1 byte)        Unsigned integer, one of:
+               0 - this is a kmalloc() / kfree()
+               1 - this is a kmem_cache_alloc() / kmem_cache_free()
+               2 - this is a __get_free_pages() et al.
+       Event size      (2 bytes)       Unsigned integer representing the
+                                       size of this event. Used to extend
+                                       kmemtrace. Discard the bytes you
+                                       don't know about.
+       Sequence number (4 bytes)       Signed integer used to reorder data
+                                       logged on SMP machines. Wraparound
+                                       must be taken into account, although
+                                       it is unlikely.
+       Caller address  (8 bytes)       Return address to the caller.
+       Pointer to mem  (8 bytes)       Pointer to target memory area. Can be
+                                       NULL, but not all such calls might be
+                                       recorded.
+
+In case of KMEMTRACE_EVENT_ALLOC events, the next fields follow:
+
+       Requested bytes (8 bytes)       Total number of requested bytes,
+                                       unsigned, must not be zero.
+       Allocated bytes (8 bytes)       Total number of actually allocated
+                                       bytes, unsigned, must not be lower
+                                       than requested bytes.
+       Requested flags (4 bytes)       GFP flags supplied by the caller.
+       Target CPU      (4 bytes)       Signed integer, valid for event id 1.
+                                       If equal to -1, target CPU is the same
+                                       as origin CPU, but the reverse might
+                                       not be true.
+
+The data is made available in the same endianness the machine has.
+
+Other event ids and type ids may be defined and added. Other fields may be
+added by increasing event size, but see below for details.
+Every modification to the ABI, including new id definitions, are followed
+by bumping the ABI version by one.
+
+Adding new data to the packet (features) is done at the end of the mandatory
+data:
+       Feature size    (2 byte)
+       Feature ID      (1 byte)
+       Feature data    (Feature size - 3 bytes)
+
+
+Users:
+       kmemtrace-user - git://repo.or.cz/kmemtrace-user.git
+
index 803b131..fd9a3e6 100644 (file)
@@ -15,31 +15,31 @@ Introduction
 
 Ftrace is an internal tracer designed to help out developers and
 designers of systems to find what is going on inside the kernel.
-It can be used for debugging or analyzing latencies and performance
-issues that take place outside of user-space.
+It can be used for debugging or analyzing latencies and
+performance issues that take place outside of user-space.
 
 Although ftrace is the function tracer, it also includes an
-infrastructure that allows for other types of tracing. Some of the
-tracers that are currently in ftrace include a tracer to trace
-context switches, the time it takes for a high priority task to
-run after it was woken up, the time interrupts are disabled, and
-more (ftrace allows for tracer plugins, which means that the list of
-tracers can always grow).
+infrastructure that allows for other types of tracing. Some of
+the tracers that are currently in ftrace include a tracer to
+trace context switches, the time it takes for a high priority
+task to run after it was woken up, the time interrupts are
+disabled, and more (ftrace allows for tracer plugins, which
+means that the list of tracers can always grow).
 
 
 The File System
 ---------------
 
-Ftrace uses the debugfs file system to hold the control files as well
-as the files to display output.
+Ftrace uses the debugfs file system to hold the control files as
+well as the files to display output.
 
 To mount the debugfs system:
 
   # mkdir /debug
   # mount -t debugfs nodev /debug
 
-(Note: it is more common to mount at /sys/kernel/debug, but for simplicity
- this document will use /debug)
+( Note: it is more common to mount at /sys/kernel/debug, but for
 simplicity this document will use /debug)
 
 That's it! (assuming that you have ftrace configured into your kernel)
 
@@ -50,90 +50,124 @@ of ftrace. Here is a list of some of the key files:
 
  Note: all time values are in microseconds.
 
-  current_tracer: This is used to set or display the current tracer
-               that is configured.
-
-  available_tracers: This holds the different types of tracers that
-               have been compiled into the kernel. The tracers
-               listed here can be configured by echoing their name
-               into current_tracer.
-
-  tracing_enabled: This sets or displays whether the current_tracer
-               is activated and tracing or not. Echo 0 into this
-               file to disable the tracer or 1 to enable it.
-
-  trace: This file holds the output of the trace in a human readable
-               format (described below).
-
-  latency_trace: This file shows the same trace but the information
-               is organized more to display possible latencies
-               in the system (described below).
-
-  trace_pipe: The output is the same as the "trace" file but this
-               file is meant to be streamed with live tracing.
-               Reads from this file will block until new data
-               is retrieved. Unlike the "trace" and "latency_trace"
-               files, this file is a consumer. This means reading
-               from this file causes sequential reads to display
-               more current data. Once data is read from this
-               file, it is consumed, and will not be read
-               again with a sequential read. The "trace" and
-               "latency_trace" files are static, and if the
-               tracer is not adding more data, they will display
-               the same information every time they are read.
-
-  trace_options: This file lets the user control the amount of data
-               that is displayed in one of the above output
-               files.
-
-  trace_max_latency: Some of the tracers record the max latency.
-               For example, the time interrupts are disabled.
-               This time is saved in this file. The max trace
-               will also be stored, and displayed by either
-               "trace" or "latency_trace".  A new max trace will
-               only be recorded if the latency is greater than
-               the value in this file. (in microseconds)
-
-  buffer_size_kb: This sets or displays the number of kilobytes each CPU
-               buffer can hold. The tracer buffers are the same size
-               for each CPU. The displayed number is the size of the
-               CPU buffer and not total size of all buffers. The
-               trace buffers are allocated in pages (blocks of memory
-               that the kernel uses for allocation, usually 4 KB in size).
-               If the last page allocated has room for more bytes
-               than requested, the rest of the page will be used,
-               making the actual allocation bigger than requested.
-               (Note, the size may not be a multiple of the page size due
-               to buffer managment overhead.)
-
-               This can only be updated when the current_tracer
-               is set to "nop".
-
-  tracing_cpumask: This is a mask that lets the user only trace
-               on specified CPUS. The format is a hex string
-               representing the CPUS.
-
-  set_ftrace_filter: When dynamic ftrace is configured in (see the
-               section below "dynamic ftrace"), the code is dynamically
-               modified (code text rewrite) to disable calling of the
-               function profiler (mcount). This lets tracing be configured
-               in with practically no overhead in performance.  This also
-               has a side effect of enabling or disabling specific functions
-               to be traced. Echoing names of functions into this file
-               will limit the trace to only those functions.
-
-  set_ftrace_notrace: This has an effect opposite to that of
-               set_ftrace_filter. Any function that is added here will not
-               be traced. If a function exists in both set_ftrace_filter
-               and set_ftrace_notrace, the function will _not_ be traced.
-
-  set_ftrace_pid: Have the function tracer only trace a single thread.
-
-  available_filter_functions: This lists the functions that ftrace
-               has processed and can trace. These are the function
-               names that you can pass to "set_ftrace_filter" or
-               "set_ftrace_notrace". (See the section "dynamic ftrace"
-               below for more details.)
+  current_tracer:
+
+       This is used to set or display the current tracer
+       that is configured.
+
+  available_tracers:
+
+       This holds the different types of tracers that
+       have been compiled into the kernel. The
+       tracers listed here can be configured by
+       echoing their name into current_tracer.
+
+  tracing_enabled:
+
+       This sets or displays whether the current_tracer
+       is activated and tracing or not. Echo 0 into this
+       file to disable the tracer or 1 to enable it.
+
+  trace:
+
+       This file holds the output of the trace in a human
+       readable format (described below).
+
+  latency_trace:
+
+       This file shows the same trace but the information
+       is organized more to display possible latencies
+       in the system (described below).
+
+  trace_pipe:
+
+       The output is the same as the "trace" file but this
+       file is meant to be streamed with live tracing.
+       Reads from this file will block until new data
+       is retrieved. Unlike the "trace" and "latency_trace"
+       files, this file is a consumer. This means reading
+       from this file causes sequential reads to display
+       more current data. Once data is read from this
+       file, it is consumed, and will not be read
+       again with a sequential read. The "trace" and
+       "latency_trace" files are static, and if the
+       tracer is not adding more data, they will display
+       the same information every time they are read.
+
+  trace_options:
+
+       This file lets the user control the amount of data
+       that is displayed in one of the above output
+       files.
+
+  tracing_max_latency:
+
+       Some of the tracers record the max latency.
+       For example, the time interrupts are disabled.
+       This time is saved in this file. The max trace
+       will also be stored, and displayed by either
+       "trace" or "latency_trace".  A new max trace will
+       only be recorded if the latency is greater than
+       the value in this file. (in microseconds)
+
+  buffer_size_kb:
+
+       This sets or displays the number of kilobytes each CPU
+       buffer can hold. The tracer buffers are the same size
+       for each CPU. The displayed number is the size of the
+       CPU buffer and not total size of all buffers. The
+       trace buffers are allocated in pages (blocks of memory
+       that the kernel uses for allocation, usually 4 KB in size).
+       If the last page allocated has room for more bytes
+       than requested, the rest of the page will be used,
+       making the actual allocation bigger than requested.
+       ( Note, the size may not be a multiple of the page size
+         due to buffer managment overhead. )
+
+       This can only be updated when the current_tracer
+       is set to "nop".
+
+  tracing_cpumask:
+
+       This is a mask that lets the user only trace
+       on specified CPUS. The format is a hex string
+       representing the CPUS.
+
+  set_ftrace_filter:
+
+       When dynamic ftrace is configured in (see the
+       section below "dynamic ftrace"), the code is dynamically
+       modified (code text rewrite) to disable calling of the
+       function profiler (mcount). This lets tracing be configured
+       in with practically no overhead in performance.  This also
+       has a side effect of enabling or disabling specific functions
+       to be traced. Echoing names of functions into this file
+       will limit the trace to only those functions.
+
+  set_ftrace_notrace:
+
+       This has an effect opposite to that of
+       set_ftrace_filter. Any function that is added here will not
+       be traced. If a function exists in both set_ftrace_filter
+       and set_ftrace_notrace, the function will _not_ be traced.
+
+  set_ftrace_pid:
+
+       Have the function tracer only trace a single thread.
+
+  set_graph_function:
+
+       Set a "trigger" function where tracing should start
+       with the function graph tracer (See the section
+       "dynamic ftrace" for more details).
+
+  available_filter_functions:
+
+       This lists the functions that ftrace
+       has processed and can trace. These are the function
+       names that you can pass to "set_ftrace_filter" or
+       "set_ftrace_notrace". (See the section "dynamic ftrace"
+       below for more details.)
 
 
 The Tracers
@@ -141,36 +175,66 @@ The Tracers
 
 Here is the list of current tracers that may be configured.
 
-  function - function tracer that uses mcount to trace all functions.
+  "function"
+
+       Function call tracer to trace all kernel functions.
+
+  "function_graph_tracer"
+
+       Similar to the function tracer except that the
+       function tracer probes the functions on their entry
+       whereas the function graph tracer traces on both entry
+       and exit of the functions. It then provides the ability
+       to draw a graph of function calls similar to C code
+       source.
 
-  sched_switch - traces the context switches between tasks.
+  "sched_switch"
 
-  irqsoff - traces the areas that disable interrupts and saves
-               the trace with the longest max latency.
-               See tracing_max_latency.  When a new max is recorded,
-               it replaces the old trace. It is best to view this
-               trace via the latency_trace file.
+       Traces the context switches and wakeups between tasks.
 
-  preemptoff - Similar to irqsoff but traces and records the amount of
-               time for which preemption is disabled.
+  "irqsoff"
 
-  preemptirqsoff - Similar to irqsoff and preemptoff, but traces and
-                records the largest time for which irqs and/or preemption
-                is disabled.
+       Traces the areas that disable interrupts and saves
+       the trace with the longest max latency.
+       See tracing_max_latency. When a new max is recorded,
+       it replaces the old trace. It is best to view this
+       trace via the latency_trace file.
 
-  wakeup - Traces and records the max latency that it takes for
-               the highest priority task to get scheduled after
-               it has been woken up.
+  "preemptoff"
 
-  nop - This is not a tracer. To remove all tracers from tracing
-               simply echo "nop" into current_tracer.
+       Similar to irqsoff but traces and records the amount of
+       time for which preemption is disabled.
+
+  "preemptirqsoff"
+
+       Similar to irqsoff and preemptoff, but traces and
+       records the largest time for which irqs and/or preemption
+       is disabled.
+
+  "wakeup"
+
+       Traces and records the max latency that it takes for
+       the highest priority task to get scheduled after
+       it has been woken up.
+
+  "hw-branch-tracer"
+
+       Uses the BTS CPU feature on x86 CPUs to traces all
+       branches executed.
+
+  "nop"
+
+       This is the "trace nothing" tracer. To remove all
+       tracers from tracing simply echo "nop" into
+       current_tracer.
 
 
 Examples of using the tracer
 ----------------------------
 
-Here are typical examples of using the tracers when controlling them only
-with the debugfs interface (without using any user-land utilities).
+Here are typical examples of using the tracers when controlling
+them only with the debugfs interface (without using any
+user-land utilities).
 
 Output format:
 --------------
@@ -187,16 +251,16 @@ Here is an example of the output format of the file "trace"
             bash-4251  [01] 10152.583855: _atomic_dec_and_lock <-dput
                              --------
 
-A header is printed with the tracer name that is represented by the trace.
-In this case the tracer is "function". Then a header showing the format. Task
-name "bash", the task PID "4251", the CPU that it was running on
-"01", the timestamp in <secs>.<usecs> format, the function name that was
-traced "path_put" and the parent function that called this function
-"path_walk". The timestamp is the time at which the function was
-entered.
+A header is printed with the tracer name that is represented by
+the trace. In this case the tracer is "function". Then a header
+showing the format. Task name "bash", the task PID "4251", the
+CPU that it was running on "01", the timestamp in <secs>.<usecs>
+format, the function name that was traced "path_put" and the
+parent function that called this function "path_walk". The
+timestamp is the time at which the function was entered.
 
-The sched_switch tracer also includes tracing of task wakeups and
-context switches.
+The sched_switch tracer also includes tracing of task wakeups
+and context switches.
 
      ksoftirqd/1-7     [01]  1453.070013:      7:115:R   +  2916:115:S
      ksoftirqd/1-7     [01]  1453.070013:      7:115:R   +    10:115:S
@@ -205,8 +269,8 @@ context switches.
      kondemand/1-2916  [01]  1453.070013:   2916:115:S ==>     7:115:R
      ksoftirqd/1-7     [01]  1453.070013:      7:115:S ==>     0:140:R
 
-Wake ups are represented by a "+" and the context switches are shown as
-"==>".  The format is:
+Wake ups are represented by a "+" and the context switches are
+shown as "==>".  The format is:
 
  Context switches:
 
@@ -220,19 +284,20 @@ Wake ups are represented by a "+" and the context switches are shown as
 
   <pid>:<prio>:<state>    +  <pid>:<prio>:<state>
 
-The prio is the internal kernel priority, which is the inverse of the
-priority that is usually displayed by user-space tools. Zero represents
-the highest priority (99). Prio 100 starts the "nice" priorities with
-100 being equal to nice -20 and 139 being nice 19. The prio "140" is
-reserved for the idle task which is the lowest priority thread (pid 0).
+The prio is the internal kernel priority, which is the inverse
+of the priority that is usually displayed by user-space tools.
+Zero represents the highest priority (99). Prio 100 starts the
+"nice" priorities with 100 being equal to nice -20 and 139 being
+nice 19. The prio "140" is reserved for the idle task which is
+the lowest priority thread (pid 0).
 
 
 Latency trace format
 --------------------
 
-For traces that display latency times, the latency_trace file gives
-somewhat more information to see why a latency happened. Here is a typical
-trace.
+For traces that display latency times, the latency_trace file
+gives somewhat more information to see why a latency happened.
+Here is a typical trace.
 
 # tracer: irqsoff
 #
@@ -259,20 +324,20 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
   <idle>-0     0d.s1   98us : trace_hardirqs_on (do_softirq)
 
 
+This shows that the current tracer is "irqsoff" tracing the time
+for which interrupts were disabled. It gives the trace version
+and the version of the kernel upon which this was executed on
+(2.6.26-rc8). Then it displays the max latency in microsecs (97
+us). The number of trace entries displayed and the total number
+recorded (both are three: #3/3). The type of preemption that was
+used (PREEMPT). VP, KP, SP, and HP are always zero and are
+reserved for later use. #P is the number of online CPUS (#P:2).
 
-This shows that the current tracer is "irqsoff" tracing the time for which
-interrupts were disabled. It gives the trace version and the version
-of the kernel upon which this was executed on (2.6.26-rc8). Then it displays
-the max latency in microsecs (97 us). The number of trace entries displayed
-and the total number recorded (both are three: #3/3). The type of
-preemption that was used (PREEMPT). VP, KP, SP, and HP are always zero
-and are reserved for later use. #P is the number of online CPUS (#P:2).
-
-The task is the process that was running when the latency occurred.
-(swapper pid: 0).
+The task is the process that was running when the latency
+occurred. (swapper pid: 0).
 
-The start and stop (the functions in which the interrupts were disabled and
-enabled respectively) that caused the latencies:
+The start and stop (the functions in which the interrupts were
+disabled and enabled respectively) that caused the latencies:
 
   apic_timer_interrupt is where the interrupts were disabled.
   do_softirq is where they were enabled again.
@@ -308,12 +373,12 @@ The above is mostly meaningful for kernel developers.
        latency_trace file is relative to the start of the trace.
 
   delay: This is just to help catch your eye a bit better. And
-       needs to be fixed to be only relative to the same CPU.
-       The marks are determined by the difference between this
-       current trace and the next trace.
-        '!' - greater than preempt_mark_thresh (default 100)
-        '+' - greater than 1 microsecond
-        ' ' - less than or equal to 1 microsecond.
+        needs to be fixed to be only relative to the same CPU.
+        The marks are determined by the difference between this
+        current trace and the next trace.
+         '!' - greater than preempt_mark_thresh (default 100)
+         '+' - greater than 1 microsecond
+         ' ' - less than or equal to 1 microsecond.
 
   The rest is the same as the 'trace' file.
 
@@ -321,14 +386,15 @@ The above is mostly meaningful for kernel developers.
 trace_options
 -------------
 
-The trace_options file is used to control what gets printed in the trace
-output. To see what is available, simply cat the file:
+The trace_options file is used to control what gets printed in
+the trace output. To see what is available, simply cat the file:
 
   cat /debug/tracing/trace_options
   print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
- noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
 noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
 
-To disable one of the options, echo in the option prepended with "no".
+To disable one of the options, echo in the option prepended with
+"no".
 
   echo noprint-parent > /debug/tracing/trace_options
 
@@ -338,8 +404,8 @@ To enable an option, leave off the "no".
 
 Here are the available options:
 
-  print-parent - On function traces, display the calling function
-               as well as the function being traced.
+  print-parent - On function traces, display the calling (parent)
+                function as well as the function being traced.
 
   print-parent:
    bash-4000  [01]  1477.606694: simple_strtoul <-strict_strtoul
@@ -348,15 +414,16 @@ Here are the available options:
    bash-4000  [01]  1477.606694: simple_strtoul
 
 
-  sym-offset - Display not only the function name, but also the offset
-               in the function. For example, instead of seeing just
-               "ktime_get", you will see "ktime_get+0xb/0x20".
+  sym-offset - Display not only the function name, but also the
+              offset in the function. For example, instead of
+              seeing just "ktime_get", you will see
+              "ktime_get+0xb/0x20".
 
   sym-offset:
    bash-4000  [01]  1477.606694: simple_strtoul+0x6/0xa0
 
-  sym-addr - this will also display the function address as well as
-               the function name.
+  sym-addr - this will also display the function address as well
+            as the function name.
 
   sym-addr:
    bash-4000  [01]  1477.606694: simple_strtoul <c0339346>
@@ -366,35 +433,41 @@ Here are the available options:
     bash  4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \
     (+0.000ms): simple_strtoul (strict_strtoul)
 
-  raw - This will display raw numbers. This option is best for use with
-       user applications that can translate the raw numbers better than
-       having it done in the kernel.
+  raw - This will display raw numbers. This option is best for
+       use with user applications that can translate the raw
+       numbers better than having it done in the kernel.
 
-  hex - Similar to raw, but the numbers will be in a hexadecimal format.
+  hex - Similar to raw, but the numbers will be in a hexadecimal
+       format.
 
   bin - This will print out the formats in raw binary.
 
   block - TBD (needs update)
 
-  stacktrace - This is one of the options that changes the trace itself.
-               When a trace is recorded, so is the stack of functions.
-               This allows for back traces of trace sites.
+  stacktrace - This is one of the options that changes the trace
+              itself. When a trace is recorded, so is the stack
+              of functions. This allows for back traces of
+              trace sites.
 
-  userstacktrace - This option changes the trace.
-                  It records a stacktrace of the current userspace thread.
+  userstacktrace - This option changes the trace. It records a
+                  stacktrace of the current userspace thread.
 
-  sym-userobj - when user stacktrace are enabled, look up which object the
-               address belongs to, and print a relative address
-               This is especially useful when ASLR is on, otherwise you don't
-               get a chance to resolve the address to object/file/line after the app is no
-               longer running
+  sym-userobj - when user stacktrace are enabled, look up which
+               object the address belongs to, and print a
+               relative address. This is especially useful when
+               ASLR is on, otherwise you don't get a chance to
+               resolve the address to object/file/line after
+               the app is no longer running
 
-               The lookup is performed when you read trace,trace_pipe,latency_trace. Example:
+               The lookup is performed when you read
+               trace,trace_pipe,latency_trace. Example:
 
                a.out-1623  [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
 x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
 
-  sched-tree - TBD (any users??)
+  sched-tree - trace all tasks that are on the runqueue, at
+              every scheduling event. Will add overhead if
+              there's a lot of tasks running at once.
 
 
 sched_switch
@@ -431,18 +504,19 @@ of how to use it.
  [...]
 
 
-As we have discussed previously about this format, the header shows
-the name of the trace and points to the options. The "FUNCTION"
-is a misnomer since here it represents the wake ups and context
-switches.
+As we have discussed previously about this format, the header
+shows the name of the trace and points to the options. The
+"FUNCTION" is a misnomer since here it represents the wake ups
+and context switches.
 
-The sched_switch file only lists the wake ups (represented with '+')
-and context switches ('==>') with the previous task or current task
-first followed by the next task or task waking up. The format for both
-of these is PID:KERNEL-PRIO:TASK-STATE. Remember that the KERNEL-PRIO
-is the inverse of the actual priority with zero (0) being the highest
-priority and the nice values starting at 100 (nice -20). Below is
-a quick chart to map the kernel priority to user land priorities.
+The sched_switch file only lists the wake ups (represented with
+'+') and context switches ('==>') with the previous task or
+current task first followed by the next task or task waking up.
+The format for both of these is PID:KERNEL-PRIO:TASK-STATE.
+Remember that the KERNEL-PRIO is the inverse of the actual
+priority with zero (0) being the highest priority and the nice
+values starting at 100 (nice -20). Below is a quick chart to map
+the kernel priority to user land priorities.
 
   Kernel priority: 0 to 99    ==> user RT priority 99 to 0
   Kernel priority: 100 to 139 ==> user nice -20 to 19
@@ -463,10 +537,10 @@ The task states are:
 ftrace_enabled
 --------------
 
-The following tracers (listed below) give different output depending
-on whether or not the sysctl ftrace_enabled is set. To set ftrace_enabled,
-one can either use the sysctl function or set it via the proc
-file system interface.
+The following tracers (listed below) give different output
+depending on whether or not the sysctl ftrace_enabled is set. To
+set ftrace_enabled, one can either use the sysctl function or
+set it via the proc file system interface.
 
   sysctl kernel.ftrace_enabled=1
 
@@ -474,12 +548,12 @@ file system interface.
 
   echo 1 > /proc/sys/kernel/ftrace_enabled
 
-To disable ftrace_enabled simply replace the '1' with '0' in
-the above commands.
+To disable ftrace_enabled simply replace the '1' with '0' in the
+above commands.
 
-When ftrace_enabled is set the tracers will also record the functions
-that are within the trace. The descriptions of the tracers
-will also show an example with ftrace enabled.
+When ftrace_enabled is set the tracers will also record the
+functions that are within the trace. The descriptions of the
+tracers will also show an example with ftrace enabled.
 
 
 irqsoff
@@ -487,17 +561,18 @@ irqsoff
 
 When interrupts are disabled, the CPU can not react to any other
 external event (besides NMIs and SMIs). This prevents the timer
-interrupt from triggering or the mouse interrupt from letting the
-kernel know of a new mouse event. The result is a latency with the
-reaction time.
+interrupt from triggering or the mouse interrupt from letting
+the kernel know of a new mouse event. The result is a latency
+with the reaction time.
 
-The irqsoff tracer tracks the time for which interrupts are disabled.
-When a new maximum latency is hit, the tracer saves the trace leading up
-to that latency point so that every time a new maximum is reached, the old
-saved trace is discarded and the new trace is saved.
+The irqsoff tracer tracks the time for which interrupts are
+disabled. When a new maximum latency is hit, the tracer saves
+the trace leading up to that latency point so that every time a
+new maximum is reached, the old saved trace is discarded and the
+new trace is saved.
 
-To reset the maximum, echo 0 into tracing_max_latency. Here is an
-example:
+To reset the maximum, echo 0 into tracing_max_latency. Here is
+an example:
 
  # echo irqsoff > /debug/tracing/current_tracer
  # echo 0 > /debug/tracing/tracing_max_latency
@@ -532,10 +607,11 @@ irqsoff latency trace v1.1.5 on 2.6.26
 
 
 Here we see that that we had a latency of 12 microsecs (which is
-very good). The _write_lock_irq in sys_setpgid disabled interrupts.
-The difference between the 12 and the displayed timestamp 14us occurred
-because the clock was incremented between the time of recording the max
-latency and the time of recording the function that had that latency.
+very good). The _write_lock_irq in sys_setpgid disabled
+interrupts. The difference between the 12 and the displayed
+timestamp 14us occurred because the clock was incremented
+between the time of recording the max latency and the time of
+recording the function that had that latency.
 
 Note the above example had ftrace_enabled not set. If we set the
 ftrace_enabled, we get a much larger output:
@@ -586,24 +662,24 @@ irqsoff latency trace v1.1.5 on 2.6.26-rc8
 
 
 Here we traced a 50 microsecond latency. But we also see all the
-functions that were called during that time. Note that by enabling
-function tracing, we incur an added overhead. This overhead may
-extend the latency times. But nevertheless, this trace has provided
-some very helpful debugging information.
+functions that were called during that time. Note that by
+enabling function tracing, we incur an added overhead. This
+overhead may extend the latency times. But nevertheless, this
+trace has provided some very helpful debugging information.
 
 
 preemptoff
 ----------
 
-When preemption is disabled, we may be able to receive interrupts but
-the task cannot be preempted and a higher priority task must wait
-for preemption to be enabled again before it can preempt a lower
-priority task.
+When preemption is disabled, we may be able to receive
+interrupts but the task cannot be preempted and a higher
+priority task must wait for preemption to be enabled again
+before it can preempt a lower priority task.
 
 The preemptoff tracer traces the places that disable preemption.
-Like the irqsoff tracer, it records the maximum latency for which preemption
-was disabled. The control of preemptoff tracer is much like the irqsoff
-tracer.
+Like the irqsoff tracer, it records the maximum latency for
+which preemption was disabled. The control of preemptoff tracer
+is much like the irqsoff tracer.
 
  # echo preemptoff > /debug/tracing/current_tracer
  # echo 0 > /debug/tracing/tracing_max_latency
@@ -637,11 +713,12 @@ preemptoff latency trace v1.1.5 on 2.6.26-rc8
     sshd-4261  0d.s1   30us : trace_preempt_on (__do_softirq)
 
 
-This has some more changes. Preemption was disabled when an interrupt
-came in (notice the 'h'), and was enabled while doing a softirq.
-(notice the 's'). But we also see that interrupts have been disabled
-when entering the preempt off section and leaving it (the 'd').
-We do not know if interrupts were enabled in the mean time.
+This has some more changes. Preemption was disabled when an
+interrupt came in (notice the 'h'), and was enabled while doing
+a softirq. (notice the 's'). But we also see that interrupts
+have been disabled when entering the preempt off section and
+leaving it (the 'd'). We do not know if interrupts were enabled
+in the mean time.
 
 # tracer: preemptoff
 #
@@ -700,28 +777,30 @@ preemptoff latency trace v1.1.5 on 2.6.26-rc8
     sshd-4261  0d.s1   64us : trace_preempt_on (__do_softirq)
 
 
-The above is an example of the preemptoff trace with ftrace_enabled
-set. Here we see that interrupts were disabled the entire time.
-The irq_enter code lets us know that we entered an interrupt 'h'.
-Before that, the functions being traced still show that it is not
-in an interrupt, but we can see from the functions themselves that
-this is not the case.
+The above is an example of the preemptoff trace with
+ftrace_enabled set. Here we see that interrupts were disabled
+the entire time. The irq_enter code lets us know that we entered
+an interrupt 'h'. Before that, the functions being traced still
+show that it is not in an interrupt, but we can see from the
+functions themselves that this is not the case.
 
-Notice that __do_softirq when called does not have a preempt_count.
-It may seem that we missed a preempt enabling. What really happened
-is that the preempt count is held on the thread's stack and we
-switched to the softirq stack (4K stacks in effect). The code
-does not copy the preempt count, but because interrupts are disabled,
-we do not need to worry about it. Having a tracer like this is good
-for letting people know what really happens inside the kernel.
+Notice that __do_softirq when called does not have a
+preempt_count. It may seem that we missed a preempt enabling.
+What really happened is that the preempt count is held on the
+thread's stack and we switched to the softirq stack (4K stacks
+in effect). The code does not copy the preempt count, but
+because interrupts are disabled, we do not need to worry about
+it. Having a tracer like this is good for letting people know
+what really happens inside the kernel.
 
 
 preemptirqsoff
 --------------
 
-Knowing the locations that have interrupts disabled or preemption
-disabled for the longest times is helpful. But sometimes we would
-like to know when either preemption and/or interrupts are disabled.
+Knowing the locations that have interrupts disabled or
+preemption disabled for the longest times is helpful. But
+sometimes we would like to know when either preemption and/or
+interrupts are disabled.
 
 Consider the following code:
 
@@ -741,11 +820,13 @@ The preemptoff tracer will record the total length of
 call_function_with_irqs_and_preemption_off() and
 call_function_with_preemption_off().
 
-But neither will trace the time that interrupts and/or preemption
-is disabled. This total time is the time that we can not schedule.
-To record this time, use the preemptirqsoff tracer.
+But neither will trace the time that interrupts and/or
+preemption is disabled. This total time is the time that we can
+not schedule. To record this time, use the preemptirqsoff
+tracer.
 
-Again, using this trace is much like the irqsoff and preemptoff tracers.
+Again, using this trace is much like the irqsoff and preemptoff
+tracers.
 
  # echo preemptirqsoff > /debug/tracing/current_tracer
  # echo 0 > /debug/tracing/tracing_max_latency
@@ -781,9 +862,10 @@ preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
 
 
 The trace_hardirqs_off_thunk is called from assembly on x86 when
-interrupts are disabled in the assembly code. Without the function
-tracing, we do not know if interrupts were enabled within the preemption
-points. We do see that it started with preemption enabled.
+interrupts are disabled in the assembly code. Without the
+function tracing, we do not know if interrupts were enabled
+within the preemption points. We do see that it started with
+preemption enabled.
 
 Here is a trace with ftrace_enabled set:
 
@@ -871,40 +953,42 @@ preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
     sshd-4261  0d.s1  105us : trace_preempt_on (__do_softirq)
 
 
-This is a very interesting trace. It started with the preemption of
-the ls task. We see that the task had the "need_resched" bit set
-via the 'N' in the trace.  Interrupts were disabled before the spin_lock
-at the beginning of the trace. We see that a schedule took place to run
-sshd.  When the interrupts were enabled, we took an interrupt.
-On return from the interrupt handler, the softirq ran. We took another
-interrupt while running the softirq as we see from the capital 'H'.
+This is a very interesting trace. It started with the preemption
+of the ls task. We see that the task had the "need_resched" bit
+set via the 'N' in the trace.  Interrupts were disabled before
+the spin_lock at the beginning of the trace. We see that a
+schedule took place to run sshd.  When the interrupts were
+enabled, we took an interrupt. On return from the interrupt
+handler, the softirq ran. We took another interrupt while
+running the softirq as we see from the capital 'H'.
 
 
 wakeup
 ------
 
-In a Real-Time environment it is very important to know the wakeup
-time it takes for the highest priority task that is woken up to the
-time that it executes. This is also known as "schedule latency".
-I stress the point that this is about RT tasks. It is also important
-to know the scheduling latency of non-RT tasks, but the average
-schedule latency is better for non-RT tasks. Tools like
-LatencyTop are more appropriate for such measurements.
+In a Real-Time environment it is very important to know the
+wakeup time it takes for the highest priority task that is woken
+up to the time that it executes. This is also known as "schedule
+latency". I stress the point that this is about RT tasks. It is
+also important to know the scheduling latency of non-RT tasks,
+but the average schedule latency is better for non-RT tasks.
+Tools like LatencyTop are more appropriate for such
+measurements.
 
 Real-Time environments are interested in the worst case latency.
-That is the longest latency it takes for something to happen, and
-not the average. We can have a very fast scheduler that may only
-have a large latency once in a while, but that would not work well
-with Real-Time tasks.  The wakeup tracer was designed to record
-the worst case wakeups of RT tasks. Non-RT tasks are not recorded
-because the tracer only records one worst case and tracing non-RT
-tasks that are unpredictable will overwrite the worst case latency
-of RT tasks.
-
-Since this tracer only deals with RT tasks, we will run this slightly
-differently than we did with the previous tracers. Instead of performing
-an 'ls', we will run 'sleep 1' under 'chrt' which changes the
-priority of the task.
+That is the longest latency it takes for something to happen,
+and not the average. We can have a very fast scheduler that may
+only have a large latency once in a while, but that would not
+work well with Real-Time tasks.  The wakeup tracer was designed
+to record the worst case wakeups of RT tasks. Non-RT tasks are
+not recorded because the tracer only records one worst case and
+tracing non-RT tasks that are unpredictable will overwrite the
+worst case latency of RT tasks.
+
+Since this tracer only deals with RT tasks, we will run this
+slightly differently than we did with the previous tracers.
+Instead of performing an 'ls', we will run 'sleep 1' under
+'chrt' which changes the priority of the task.
 
  # echo wakeup > /debug/tracing/current_tracer
  # echo 0 > /debug/tracing/tracing_max_latency
@@ -934,17 +1018,16 @@ wakeup latency trace v1.1.5 on 2.6.26-rc8
   <idle>-0     1d..4    4us : schedule (cpu_idle)
 
 
+Running this on an idle system, we see that it only took 4
+microseconds to perform the task switch.  Note, since the trace
+marker in the schedule is before the actual "switch", we stop
+the tracing when the recorded task is about to schedule in. This
+may change if we add a new marker at the end of the scheduler.
 
-Running this on an idle system, we see that it only took 4 microseconds
-to perform the task switch.  Note, since the trace marker in the
-schedule is before the actual "switch", we stop the tracing when
-the recorded task is about to schedule in. This may change if
-we add a new marker at the end of the scheduler.
-
-Notice that the recorded task is 'sleep' with the PID of 4901 and it
-has an rt_prio of 5. This priority is user-space priority and not
-the internal kernel priority. The policy is 1 for SCHED_FIFO and 2
-for SCHED_RR.
+Notice that the recorded task is 'sleep' with the PID of 4901
+and it has an rt_prio of 5. This priority is user-space priority
+and not the internal kernel priority. The policy is 1 for
+SCHED_FIFO and 2 for SCHED_RR.
 
 Doing the same with chrt -r 5 and ftrace_enabled set.
 
@@ -1001,24 +1084,25 @@ ksoftirq-7     1d..6   49us : _spin_unlock (tracing_record_cmdline)
 ksoftirq-7     1d..6   49us : sub_preempt_count (_spin_unlock)
 ksoftirq-7     1d..4   50us : schedule (__cond_resched)
 
-The interrupt went off while running ksoftirqd. This task runs at
-SCHED_OTHER. Why did not we see the 'N' set early? This may be
-a harmless bug with x86_32 and 4K stacks. On x86_32 with 4K stacks
-configured, the interrupt and softirq run with their own stack.
-Some information is held on the top of the task's stack (need_resched
-and preempt_count are both stored there). The setting of the NEED_RESCHED
-bit is done directly to the task's stack, but the reading of the
-NEED_RESCHED is done by looking at the current stack, which in this case
-is the stack for the hard interrupt. This hides the fact that NEED_RESCHED
-has been set. We do not see the 'N' until we switch back to the task's
+The interrupt went off while running ksoftirqd. This task runs
+at SCHED_OTHER. Why did not we see the 'N' set early? This may
+be a harmless bug with x86_32 and 4K stacks. On x86_32 with 4K
+stacks configured, the interrupt and softirq run with their own
+stack. Some information is held on the top of the task's stack
+(need_resched and preempt_count are both stored there). The
+setting of the NEED_RESCHED bit is done directly to the task's
+stack, but the reading of the NEED_RESCHED is done by looking at
+the current stack, which in this case is the stack for the hard
+interrupt. This hides the fact that NEED_RESCHED has been set.
+We do not see the 'N' until we switch back to the task's
 assigned stack.
 
 function
 --------
 
 This tracer is the function tracer. Enabling the function tracer
-can be done from the debug file system. Make sure the ftrace_enabled is
-set; otherwise this tracer is a nop.
+can be done from the debug file system. Make sure the
+ftrace_enabled is set; otherwise this tracer is a nop.
 
  # sysctl kernel.ftrace_enabled=1
  # echo function > /debug/tracing/current_tracer
@@ -1048,14 +1132,15 @@ set; otherwise this tracer is a nop.
 [...]
 
 
-Note: function tracer uses ring buffers to store the above entries.
-The newest data may overwrite the oldest data. Sometimes using echo to
-stop the trace is not sufficient because the tracing could have overwritten
-the data that you wanted to record. For this reason, it is sometimes better to
-disable tracing directly from a program. This allows you to stop the
-tracing at the point that you hit the part that you are interested in.
-To disable the tracing directly from a C program, something like following
-code snippet can be used:
+Note: function tracer uses ring buffers to store the above
+entries. The newest data may overwrite the oldest data.
+Sometimes using echo to stop the trace is not sufficient because
+the tracing could have overwritten the data that you wanted to
+record. For this reason, it is sometimes better to disable
+tracing directly from a program. This allows you to stop the
+tracing at the point that you hit the part that you are
+interested in. To disable the tracing directly from a C program,
+something like following code snippet can be used:
 
 int trace_fd;
 [...]
@@ -1070,10 +1155,10 @@ int main(int argc, char *argv[]) {
 }
 
 Note: Here we hard coded the path name. The debugfs mount is not
-guaranteed to be at /debug (and is more commonly at /sys/kernel/debug).
-For simple one time traces, the above is sufficent. For anything else,
-a search through /proc/mounts may be needed to find where the debugfs
-file-system is mounted.
+guaranteed to be at /debug (and is more commonly at
+/sys/kernel/debug). For simple one time traces, the above is
+sufficent. For anything else, a search through /proc/mounts may
+be needed to find where the debugfs file-system is mounted.
 
 
 Single thread tracing
@@ -1152,49 +1237,297 @@ int main (int argc, char **argv)
         return 0;
 }
 
+
+hw-branch-tracer (x86 only)
+---------------------------
+
+This tracer uses the x86 last branch tracing hardware feature to
+collect a branch trace on all cpus with relatively low overhead.
+
+The tracer uses a fixed-size circular buffer per cpu and only
+traces ring 0 branches. The trace file dumps that buffer in the
+following format:
+
+# tracer: hw-branch-tracer
+#
+# CPU#        TO  <-  FROM
+   0  scheduler_tick+0xb5/0x1bf          <-  task_tick_idle+0x5/0x6
+   2  run_posix_cpu_timers+0x2b/0x72a    <-  run_posix_cpu_timers+0x25/0x72a
+   0  scheduler_tick+0x139/0x1bf         <-  scheduler_tick+0xed/0x1bf
+   0  scheduler_tick+0x17c/0x1bf         <-  scheduler_tick+0x148/0x1bf
+   2  run_posix_cpu_timers+0x9e/0x72a    <-  run_posix_cpu_timers+0x5e/0x72a
+   0  scheduler_tick+0x1b6/0x1bf         <-  scheduler_tick+0x1aa/0x1bf
+
+
+The tracer may be used to dump the trace for the oops'ing cpu on
+a kernel oops into the system log. To enable this,
+ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
+can either use the sysctl function or set it via the proc system
+interface.
+
+  sysctl kernel.ftrace_dump_on_oops=1
+
+or
+
+  echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
+
+
+Here's an example of such a dump after a null pointer
+dereference in a kernel module:
+
+[57848.105921] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+[57848.106019] IP: [<ffffffffa0000006>] open+0x6/0x14 [oops]
+[57848.106019] PGD 2354e9067 PUD 2375e7067 PMD 0
+[57848.106019] Oops: 0002 [#1] SMP
+[57848.106019] last sysfs file: /sys/devices/pci0000:00/0000:00:1e.0/0000:20:05.0/local_cpus
+[57848.106019] Dumping ftrace buffer:
+[57848.106019] ---------------------------------
+[...]
+[57848.106019]    0  chrdev_open+0xe6/0x165      <-  cdev_put+0x23/0x24
+[57848.106019]    0  chrdev_open+0x117/0x165     <-  chrdev_open+0xfa/0x165
+[57848.106019]    0  chrdev_open+0x120/0x165     <-  chrdev_open+0x11c/0x165
+[57848.106019]    0  chrdev_open+0x134/0x165     <-  chrdev_open+0x12b/0x165
+[57848.106019]    0  open+0x0/0x14 [oops]        <-  chrdev_open+0x144/0x165
+[57848.106019]    0  page_fault+0x0/0x30         <-  open+0x6/0x14 [oops]
+[57848.106019]    0  error_entry+0x0/0x5b        <-  page_fault+0x4/0x30
+[57848.106019]    0  error_kernelspace+0x0/0x31          <-  error_entry+0x59/0x5b
+[57848.106019]    0  error_sti+0x0/0x1   <-  error_kernelspace+0x2d/0x31
+[57848.106019]    0  page_fault+0x9/0x30         <-  error_sti+0x0/0x1
+[57848.106019]    0  do_page_fault+0x0/0x881     <-  page_fault+0x1a/0x30
+[...]
+[57848.106019]    0  do_page_fault+0x66b/0x881   <-  is_prefetch+0x1ee/0x1f2
+[57848.106019]    0  do_page_fault+0x6e0/0x881   <-  do_page_fault+0x67a/0x881
+[57848.106019]    0  oops_begin+0x0/0x96         <-  do_page_fault+0x6e0/0x881
+[57848.106019]    0  trace_hw_branch_oops+0x0/0x2d       <-  oops_begin+0x9/0x96
+[...]
+[57848.106019]    0  ds_suspend_bts+0x2a/0xe3    <-  ds_suspend_bts+0x1a/0xe3
+[57848.106019] ---------------------------------
+[57848.106019] CPU 0
+[57848.106019] Modules linked in: oops
+[57848.106019] Pid: 5542, comm: cat Tainted: G        W  2.6.28 #23
+[57848.106019] RIP: 0010:[<ffffffffa0000006>]  [<ffffffffa0000006>] open+0x6/0x14 [oops]
+[57848.106019] RSP: 0018:ffff880235457d48  EFLAGS: 00010246
+[...]
+
+
+function graph tracer
+---------------------------
+
+This tracer is similar to the function tracer except that it
+probes a function on its entry and its exit. This is done by
+using a dynamically allocated stack of return addresses in each
+task_struct. On function entry the tracer overwrites the return
+address of each function traced to set a custom probe. Thus the
+original return address is stored on the stack of return address
+in the task_struct.
+
+Probing on both ends of a function leads to special features
+such as:
+
+- measure of a function's time execution
+- having a reliable call stack to draw function calls graph
+
+This tracer is useful in several situations:
+
+- you want to find the reason of a strange kernel behavior and
+  need to see what happens in detail on any areas (or specific
+  ones).
+
+- you are experiencing weird latencies but it's difficult to
+  find its origin.
+
+- you want to find quickly which path is taken by a specific
+  function
+
+- you just want to peek inside a working kernel and want to see
+  what happens there.
+
+# tracer: function_graph
+#
+# CPU  DURATION                  FUNCTION CALLS
+# |     |   |                     |   |   |   |
+
+ 0)               |  sys_open() {
+ 0)               |    do_sys_open() {
+ 0)               |      getname() {
+ 0)               |        kmem_cache_alloc() {
+ 0)   1.382 us    |          __might_sleep();
+ 0)   2.478 us    |        }
+ 0)               |        strncpy_from_user() {
+ 0)               |          might_fault() {
+ 0)   1.389 us    |            __might_sleep();
+ 0)   2.553 us    |          }
+ 0)   3.807 us    |        }
+ 0)   7.876 us    |      }
+ 0)               |      alloc_fd() {
+ 0)   0.668 us    |        _spin_lock();
+ 0)   0.570 us    |        expand_files();
+ 0)   0.586 us    |        _spin_unlock();
+
+
+There are several columns that can be dynamically
+enabled/disabled. You can use every combination of options you
+want, depending on your needs.
+
+- The cpu number on which the function executed is default
+  enabled.  It is sometimes better to only trace one cpu (see
+  tracing_cpu_mask file) or you might sometimes see unordered
+  function calls while cpu tracing switch.
+
+       hide: echo nofuncgraph-cpu > /debug/tracing/trace_options
+       show: echo funcgraph-cpu > /debug/tracing/trace_options
+
+- The duration (function's time of execution) is displayed on
+  the closing bracket line of a function or on the same line
+  than the current function in case of a leaf one. It is default
+  enabled.
+
+       hide: echo nofuncgraph-duration > /debug/tracing/trace_options
+       show: echo funcgraph-duration > /debug/tracing/trace_options
+
+- The overhead field precedes the duration field in case of
+  reached duration thresholds.
+
+       hide: echo nofuncgraph-overhead > /debug/tracing/trace_options
+       show: echo funcgraph-overhead > /debug/tracing/trace_options
+       depends on: funcgraph-duration
+
+  ie:
+
+  0)               |    up_write() {
+  0)   0.646 us    |      _spin_lock_irqsave();
+  0)   0.684 us    |      _spin_unlock_irqrestore();
+  0)   3.123 us    |    }
+  0)   0.548 us    |    fput();
+  0) + 58.628 us   |  }
+
+  [...]
+
+  0)               |      putname() {
+  0)               |        kmem_cache_free() {
+  0)   0.518 us    |          __phys_addr();
+  0)   1.757 us    |        }
+  0)   2.861 us    |      }
+  0) ! 115.305 us  |    }
+  0) ! 116.402 us  |  }
+
+  + means that the function exceeded 10 usecs.
+  ! means that the function exceeded 100 usecs.
+
+
+- The task/pid field displays the thread cmdline and pid which
+  executed the function. It is default disabled.
+
+       hide: echo nofuncgraph-proc > /debug/tracing/trace_options
+       show: echo funcgraph-proc > /debug/tracing/trace_options
+
+  ie:
+
+  # tracer: function_graph
+  #
+  # CPU  TASK/PID        DURATION                  FUNCTION CALLS
+  # |    |    |           |   |                     |   |   |   |
+  0)    sh-4802     |               |                  d_free() {
+  0)    sh-4802     |               |                    call_rcu() {
+  0)    sh-4802     |               |                      __call_rcu() {
+  0)    sh-4802     |   0.616 us    |                        rcu_process_gp_end();
+  0)    sh-4802     |   0.586 us    |                        check_for_new_grace_period();
+  0)    sh-4802     |   2.899 us    |                      }
+  0)    sh-4802     |   4.040 us    |                    }
+  0)    sh-4802     |   5.151 us    |                  }
+  0)    sh-4802     | + 49.370 us   |                }
+
+
+- The absolute time field is an absolute timestamp given by the
+  system clock since it started. A snapshot of this time is
+  given on each entry/exit of functions
+
+       hide: echo nofuncgraph-abstime > /debug/tracing/trace_options
+       show: echo funcgraph-abstime > /debug/tracing/trace_options
+
+  ie:
+
+  #
+  #      TIME       CPU  DURATION                  FUNCTION CALLS
+  #       |         |     |   |                     |   |   |   |
+  360.774522 |   1)   0.541 us    |                                          }
+  360.774522 |   1)   4.663 us    |                                        }
+  360.774523 |   1)   0.541 us    |                                        __wake_up_bit();
+  360.774524 |   1)   6.796 us    |                                      }
+  360.774524 |   1)   7.952 us    |                                    }
+  360.774525 |   1)   9.063 us    |                                  }
+  360.774525 |   1)   0.615 us    |                                  journal_mark_dirty();
+  360.774527 |   1)   0.578 us    |                                  __brelse();
+  360.774528 |   1)               |                                  reiserfs_prepare_for_journal() {
+  360.774528 |   1)               |                                    unlock_buffer() {
+  360.774529 |   1)               |                                      wake_up_bit() {
+  360.774529 |   1)               |                                        bit_waitqueue() {
+  360.774530 |   1)   0.594 us    |                                          __phys_addr();
+
+
+You can put some comments on specific functions by using
+trace_printk() For example, if you want to put a comment inside
+the __might_sleep() function, you just have to include
+<linux/ftrace.h> and call trace_printk() inside __might_sleep()
+
+trace_printk("I'm a comment!\n")
+
+will produce:
+
+ 1)               |             __might_sleep() {
+ 1)               |                /* I'm a comment! */
+ 1)   1.449 us    |             }
+
+
+You might find other useful features for this tracer in the
+following "dynamic ftrace" section such as tracing only specific
+functions or tasks.
+
 dynamic ftrace
 --------------
 
 If CONFIG_DYNAMIC_FTRACE is set, the system will run with
 virtually no overhead when function tracing is disabled. The way
 this works is the mcount function call (placed at the start of
-every kernel function, produced by the -pg switch in gcc), starts
-of pointing to a simple return. (Enabling FTRACE will include the
--pg switch in the compiling of the kernel.)
+every kernel function, produced by the -pg switch in gcc),
+starts of pointing to a simple return. (Enabling FTRACE will
+include the -pg switch in the compiling of the kernel.)
 
 At compile time every C file object is run through the
 recordmcount.pl script (located in the scripts directory). This
 script will process the C object using objdump to find all the
-locations in the .text section that call mcount. (Note, only
-the .text section is processed, since processing other sections
-like .init.text may cause races due to those sections being freed).
+locations in the .text section that call mcount. (Note, only the
+.text section is processed, since processing other sections like
+.init.text may cause races due to those sections being freed).
 
-A new section called "__mcount_loc" is created that holds references
-to all the mcount call sites in the .text section. This section is
-compiled back into the original object. The final linker will add
-all these references into a single table.
+A new section called "__mcount_loc" is created that holds
+references to all the mcount call sites in the .text section.
+This section is compiled back into the original object. The
+final linker will add all these references into a single table.
 
 On boot up, before SMP is initialized, the dynamic ftrace code
-scans this table and updates all the locations into nops. It also
-records the locations, which are added to the available_filter_functions
-list.  Modules are processed as they are loaded and before they are
-executed.  When a module is unloaded, it also removes its functions from
-the ftrace function list. This is automatic in the module unload
-code, and the module author does not need to worry about it.
-
-When tracing is enabled, kstop_machine is called to prevent races
-with the CPUS executing code being modified (which can cause the
-CPU to do undesireable things), and the nops are patched back
-to calls. But this time, they do not call mcount (which is just
-a function stub). They now call into the ftrace infrastructure.
+scans this table and updates all the locations into nops. It
+also records the locations, which are added to the
+available_filter_functions list.  Modules are processed as they
+are loaded and before they are executed.  When a module is
+unloaded, it also removes its functions from the ftrace function
+list. This is automatic in the module unload code, and the
+module author does not need to worry about it.
+
+When tracing is enabled, kstop_machine is called to prevent
+races with the CPUS executing code being modified (which can
+cause the CPU to do undesireable things), and the nops are
+patched back to calls. But this time, they do not call mcount
+(which is just a function stub). They now call into the ftrace
+infrastructure.
 
 One special side-effect to the recording of the functions being
 traced is that we can now selectively choose which functions we
-wish to trace and which ones we want the mcount calls to remain as
-nops.
+wish to trace and which ones we want the mcount calls to remain
+as nops.
 
-Two files are used, one for enabling and one for disabling the tracing
-of specified functions. They are:
+Two files are used, one for enabling and one for disabling the
+tracing of specified functions. They are:
 
   set_ftrace_filter
 
@@ -1202,8 +1535,8 @@ and
 
   set_ftrace_notrace
 
-A list of available functions that you can add to these files is listed
-in:
+A list of available functions that you can add to these files is
+listed in:
 
    available_filter_functions
 
@@ -1240,8 +1573,8 @@ hrtimer_interrupt
 sys_nanosleep
 
 
-Perhaps this is not enough. The filters also allow simple wild cards.
-Only the following are currently available
+Perhaps this is not enough. The filters also allow simple wild
+cards. Only the following are currently available
 
   <match>*  - will match functions that begin with <match>
   *<match>  - will match functions that end with <match>
@@ -1251,9 +1584,9 @@ These are the only wild cards which are supported.
 
   <match>*<match> will not work.
 
-Note: It is better to use quotes to enclose the wild cards, otherwise
-  the shell may expand the parameters into names of files in the local
-  directory.
+Note: It is better to use quotes to enclose the wild cards,
+      otherwise the shell may expand the parameters into names
+      of files in the local directory.
 
  # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter
 
@@ -1299,7 +1632,8 @@ This is because the '>' and '>>' act just like they do in bash.
 To rewrite the filters, use '>'
 To append to the filters, use '>>'
 
-To clear out a filter so that all functions will be recorded again:
+To clear out a filter so that all functions will be recorded
+again:
 
  # echo > /debug/tracing/set_ftrace_filter
  # cat /debug/tracing/set_ftrace_filter
@@ -1331,7 +1665,8 @@ hrtimer_get_res
 hrtimer_init_sleeper
 
 
-The set_ftrace_notrace prevents those functions from being traced.
+The set_ftrace_notrace prevents those functions from being
+traced.
 
  # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace
 
@@ -1353,13 +1688,75 @@ Produces:
 
 We can see that there's no more lock or preempt tracing.
 
+
+Dynamic ftrace with the function graph tracer
+---------------------------------------------
+
+Although what has been explained above concerns both the
+function tracer and the function-graph-tracer, there are some
+special features only available in the function-graph tracer.
+
+If you want to trace only one function and all of its children,
+you just have to echo its name into set_graph_function:
+
+ echo __do_fault > set_graph_function
+
+will produce the following "expanded" trace of the __do_fault()
+function:
+
+ 0)               |  __do_fault() {
+ 0)               |    filemap_fault() {
+ 0)               |      find_lock_page() {
+ 0)   0.804 us    |        find_get_page();
+ 0)               |        __might_sleep() {
+ 0)   1.329 us    |        }
+ 0)   3.904 us    |      }
+ 0)   4.979 us    |    }
+ 0)   0.653 us    |    _spin_lock();
+ 0)   0.578 us    |    page_add_file_rmap();
+ 0)   0.525 us    |    native_set_pte_at();
+ 0)   0.585 us    |    _spin_unlock();
+ 0)               |    unlock_page() {
+ 0)   0.541 us    |      page_waitqueue();
+ 0)   0.639 us    |      __wake_up_bit();
+ 0)   2.786 us    |    }
+ 0) + 14.237 us   |  }
+ 0)               |  __do_fault() {
+ 0)               |    filemap_fault() {
+ 0)               |      find_lock_page() {
+ 0)   0.698 us    |        find_get_page();
+ 0)               |        __might_sleep() {
+ 0)   1.412 us    |        }
+ 0)   3.950 us    |      }
+ 0)   5.098 us    |    }
+ 0)   0.631 us    |    _spin_lock();
+ 0)   0.571 us    |    page_add_file_rmap();
+ 0)   0.526 us    |    native_set_pte_at();
+ 0)   0.586 us    |    _spin_unlock();
+ 0)               |    unlock_page() {
+ 0)   0.533 us    |      page_waitqueue();
+ 0)   0.638 us    |      __wake_up_bit();
+ 0)   2.793 us    |    }
+ 0) + 14.012 us   |  }
+
+You can also expand several functions at once:
+
+ echo sys_open > set_graph_function
+ echo sys_close >> set_graph_function
+
+Now if you want to go back to trace all functions you can clear
+this special filter via:
+
+ echo > set_graph_function
+
+
 trace_pipe
 ----------
 
-The trace_pipe outputs the same content as the trace file, but the effect
-on the tracing is different. Every read from trace_pipe is consumed.
-This means that subsequent reads will be different. The trace
-is live.
+The trace_pipe outputs the same content as the trace file, but
+the effect on the tracing is different. Every read from
+trace_pipe is consumed. This means that subsequent reads will be
+different. The trace is live.
 
  # echo function > /debug/tracing/current_tracer
  # cat /debug/tracing/trace_pipe > /tmp/trace.out &
@@ -1387,38 +1784,45 @@ is live.
             bash-4043  [00] 41.267111: select_task_rq_rt <-try_to_wake_up
 
 
-Note, reading the trace_pipe file will block until more input is added.
-By changing the tracer, trace_pipe will issue an EOF. We needed
-to set the function tracer _before_ we "cat" the trace_pipe file.
+Note, reading the trace_pipe file will block until more input is
+added. By changing the tracer, trace_pipe will issue an EOF. We
+needed to set the function tracer _before_ we "cat" the
+trace_pipe file.
 
 
 trace entries
 -------------
 
-Having too much or not enough data can be troublesome in diagnosing
-an issue in the kernel. The file buffer_size_kb is used to modify
-the size of the internal trace buffers. The number listed
-is the number of entries that can be recorded per CPU. To know
-the full size, multiply the number of possible CPUS with the
-number of entries.
+Having too much or not enough data can be troublesome in
+diagnosing an issue in the kernel. The file buffer_size_kb is
+used to modify the size of the internal trace buffers. The
+number listed is the number of entries that can be recorded per
+CPU. To know the full size, multiply the number of possible CPUS
+with the number of entries.
 
  # cat /debug/tracing/buffer_size_kb
 1408 (units kilobytes)
 
-Note, to modify this, you must have tracing completely disabled. To do that,
-echo "nop" into the current_tracer. If the current_tracer is not set
-to "nop", an EINVAL error will be returned.
+Note, to modify this, you must have tracing completely disabled.
+To do that, echo "nop" into the current_tracer. If the
+current_tracer is not set to "nop", an EINVAL error will be
+returned.
 
  # echo nop > /debug/tracing/current_tracer
  # echo 10000 > /debug/tracing/buffer_size_kb
  # cat /debug/tracing/buffer_size_kb
 10000 (units kilobytes)
 
-The number of pages which will be allocated is limited to a percentage
-of available memory. Allocating too much will produce an error.
+The number of pages which will be allocated is limited to a
+percentage of available memory. Allocating too much will produce
+an error.
 
  # echo 1000000000000 > /debug/tracing/buffer_size_kb
 -bash: echo: write error: Cannot allocate memory
  # cat /debug/tracing/buffer_size_kb
 85
 
+-----------
+
+More details can be found in the source code, in the
+kernel/tracing/*.c files.
index 28de395..7643483 100644 (file)
@@ -49,6 +49,7 @@ parameter is applicable:
        ISAPNP  ISA PnP code is enabled.
        ISDN    Appropriate ISDN support is enabled.
        JOY     Appropriate joystick support is enabled.
+       KMEMTRACE kmemtrace is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
        LOOP    Loopback device support is enabled.
@@ -1047,6 +1048,15 @@ and is between 256 and 4096 characters. It is defined in the file
                        use the HighMem zone if it exists, and the Normal
                        zone if it does not.
 
+       kmemtrace.enable=       [KNL,KMEMTRACE] Format: { yes | no }
+                               Controls whether kmemtrace is enabled
+                               at boot-time.
+
+       kmemtrace.subbufs=n     [KNL,KMEMTRACE] Overrides the number of
+                       subbufs kmemtrace's relay channel has. Set this
+                       higher than default (KMEMTRACE_N_SUBBUFS in code) if
+                       you experience buffer overruns.
+
        movablecore=nn[KMG]     [KNL,X86-32,IA-64,PPC,X86-64] This parameter
                        is similar to kernelcore except it specifies the
                        amount of memory used for migratable allocations.
@@ -2334,6 +2344,8 @@ and is between 256 and 4096 characters. It is defined in the file
 
        tp720=          [HW,PS2]
 
+       trace_buf_size=nn[KMG] [ftrace] will set tracing buffer size.
+
        trix=           [HW,OSS] MediaTrix AudioTrix Pro
                        Format:
                        <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
index 4887730..938ea22 100644 (file)
@@ -27,33 +27,37 @@ lock-class.
 State
 -----
 
-The validator tracks lock-class usage history into 5 separate state bits:
+The validator tracks lock-class usage history into 4n + 1 separate state bits:
 
-- 'ever held in hardirq context'                    [ == hardirq-safe   ]
-- 'ever held in softirq context'                    [ == softirq-safe   ]
-- 'ever held with hardirqs enabled'                 [ == hardirq-unsafe ]
-- 'ever held with softirqs and hardirqs enabled'    [ == softirq-unsafe ]
+- 'ever held in STATE context'
+- 'ever head as readlock in STATE context'
+- 'ever head with STATE enabled'
+- 'ever head as readlock with STATE enabled'
+
+Where STATE can be either one of (kernel/lockdep_states.h)
+ - hardirq
+ - softirq
+ - reclaim_fs
 
 - 'ever used'                                       [ == !unused        ]
 
-When locking rules are violated, these state bits are presented in the
-locking error messages, inside curlies.  A contrived example:
+When locking rules are violated, these state bits are presented in the
+locking error messages, inside curlies. A contrived example:
 
    modprobe/2287 is trying to acquire lock:
-    (&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24
+    (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
 
    but task is already holding lock:
-    (&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24
+    (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
 
 
-The bit position indicates hardirq, softirq, hardirq-read,
-softirq-read respectively, and the character displayed in each
-indicates:
+The bit position indicates STATE, STATE-read, for each of the states listed
+above, and the character displayed in each indicates:
 
    '.'  acquired while irqs disabled
    '+'  acquired in irq context
    '-'  acquired with irqs enabled
-   '?' read acquired in irq context with irqs enabled.
+   '?'  acquired in irq context with irqs enabled.
 
 Unused mutexes cannot be part of the cause of an error.
 
index 9e592c7..535aeb9 100644 (file)
@@ -113,6 +113,8 @@ On all -  write a character to /proc/sysrq-trigger.  e.g.:
 
 'x'    - Used by xmon interface on ppc/powerpc platforms.
 
+'z'    - Dump the ftrace buffer
+
 '0'-'9' - Sets the console log level, controlling which kernel messages
           will be printed to your console. ('0', for example would make
           it so that only emergency messages like PANICs or OOPSes would
index 6f0a044..4ff43c6 100644 (file)
@@ -45,8 +45,8 @@ In include/trace/subsys.h :
 #include <linux/tracepoint.h>
 
 DECLARE_TRACE(subsys_eventname,
-       TPPROTO(int firstarg, struct task_struct *p),
-       TPARGS(firstarg, p));
+       TP_PROTO(int firstarg, struct task_struct *p),
+       TP_ARGS(firstarg, p));
 
 In subsys/file.c (where the tracing statement must be added) :
 
@@ -66,10 +66,10 @@ Where :
     - subsys is the name of your subsystem.
     - eventname is the name of the event to trace.
 
-- TPPROTO(int firstarg, struct task_struct *p) is the prototype of the
+- TP_PROTO(int firstarg, struct task_struct *p) is the prototype of the
   function called by this tracepoint.
 
-- TPARGS(firstarg, p) are the parameters names, same as found in the
+- TP_ARGS(firstarg, p) are the parameters names, same as found in the
   prototype.
 
 Connecting a function (probe) to a tracepoint is done by providing a
diff --git a/Documentation/vm/kmemtrace.txt b/Documentation/vm/kmemtrace.txt
new file mode 100644 (file)
index 0000000..a956d9b
--- /dev/null
@@ -0,0 +1,126 @@
+                       kmemtrace - Kernel Memory Tracer
+
+                         by Eduard - Gabriel Munteanu
+                            <eduard.munteanu@linux360.ro>
+
+I. Introduction
+===============
+
+kmemtrace helps kernel developers figure out two things:
+1) how different allocators (SLAB, SLUB etc.) perform
+2) how kernel code allocates memory and how much
+
+To do this, we trace every allocation and export information to the userspace
+through the relay interface. We export things such as the number of requested
+bytes, the number of bytes actually allocated (i.e. including internal
+fragmentation), whether this is a slab allocation or a plain kmalloc() and so
+on.
+
+The actual analysis is performed by a userspace tool (see section III for
+details on where to get it from). It logs the data exported by the kernel,
+processes it and (as of writing this) can provide the following information:
+- the total amount of memory allocated and fragmentation per call-site
+- the amount of memory allocated and fragmentation per allocation
+- total memory allocated and fragmentation in the collected dataset
+- number of cross-CPU allocation and frees (makes sense in NUMA environments)
+
+Moreover, it can potentially find inconsistent and erroneous behavior in
+kernel code, such as using slab free functions on kmalloc'ed memory or
+allocating less memory than requested (but not truly failed allocations).
+
+kmemtrace also makes provisions for tracing on some arch and analysing the
+data on another.
+
+II. Design and goals
+====================
+
+kmemtrace was designed to handle rather large amounts of data. Thus, it uses
+the relay interface to export whatever is logged to userspace, which then
+stores it. Analysis and reporting is done asynchronously, that is, after the
+data is collected and stored. By design, it allows one to log and analyse
+on different machines and different arches.
+
+As of writing this, the ABI is not considered stable, though it might not
+change much. However, no guarantees are made about compatibility yet. When
+deemed stable, the ABI should still allow easy extension while maintaining
+backward compatibility. This is described further in Documentation/ABI.
+
+Summary of design goals:
+       - allow logging and analysis to be done across different machines
+       - be fast and anticipate usage in high-load environments (*)
+       - be reasonably extensible
+       - make it possible for GNU/Linux distributions to have kmemtrace
+       included in their repositories
+
+(*) - one of the reasons Pekka Enberg's original userspace data analysis
+    tool's code was rewritten from Perl to C (although this is more than a
+    simple conversion)
+
+
+III. Quick usage guide
+======================
+
+1) Get a kernel that supports kmemtrace and build it accordingly (i.e. enable
+CONFIG_KMEMTRACE).
+
+2) Get the userspace tool and build it:
+$ git-clone git://repo.or.cz/kmemtrace-user.git                # current repository
+$ cd kmemtrace-user/
+$ ./autogen.sh
+$ ./configure
+$ make
+
+3) Boot the kmemtrace-enabled kernel if you haven't, preferably in the
+'single' runlevel (so that relay buffers don't fill up easily), and run
+kmemtrace:
+# '$' does not mean user, but root here.
+$ mount -t debugfs none /sys/kernel/debug
+$ mount -t proc none /proc
+$ cd path/to/kmemtrace-user/
+$ ./kmemtraced
+Wait a bit, then stop it with CTRL+C.
+$ cat /sys/kernel/debug/kmemtrace/total_overruns       # Check if we didn't
+                                                       # overrun, should
+                                                       # be zero.
+$ (Optionally) [Run kmemtrace_check separately on each cpu[0-9]*.out file to
+               check its correctness]
+$ ./kmemtrace-report
+
+Now you should have a nice and short summary of how the allocator performs.
+
+IV. FAQ and known issues
+========================
+
+Q: 'cat /sys/kernel/debug/kmemtrace/total_overruns' is non-zero, how do I fix
+this? Should I worry?
+A: If it's non-zero, this affects kmemtrace's accuracy, depending on how
+large the number is. You can fix it by supplying a higher
+'kmemtrace.subbufs=N' kernel parameter.
+---
+
+Q: kmemtrace_check reports errors, how do I fix this? Should I worry?
+A: This is a bug and should be reported. It can occur for a variety of
+reasons:
+       - possible bugs in relay code
+       - possible misuse of relay by kmemtrace
+       - timestamps being collected unorderly
+Or you may fix it yourself and send us a patch.
+---
+
+Q: kmemtrace_report shows many errors, how do I fix this? Should I worry?
+A: This is a known issue and I'm working on it. These might be true errors
+in kernel code, which may have inconsistent behavior (e.g. allocating memory
+with kmem_cache_alloc() and freeing it with kfree()). Pekka Enberg pointed
+out this behavior may work with SLAB, but may fail with other allocators.
+
+It may also be due to lack of tracing in some unusual allocator functions.
+
+We don't want bug reports regarding this issue yet.
+---
+
+V. See also
+===========
+
+Documentation/kernel-parameters.txt
+Documentation/ABI/testing/debugfs-kmemtrace
+
index 1c2ca1d..3c3ce2b 100644 (file)
@@ -2623,6 +2623,12 @@ M:       jason.wessel@windriver.com
 L:     kgdb-bugreport@lists.sourceforge.net
 S:     Maintained
 
+KMEMTRACE
+P:     Eduard - Gabriel Munteanu
+M:     eduard.munteanu@linux360.ro
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 KPROBES
 P:     Ananth N Mavinakayanahalli
 M:     ananth@in.ibm.com
index 550dab2..a092dc7 100644 (file)
@@ -6,6 +6,7 @@ config OPROFILE
        tristate "OProfile system profiling (EXPERIMENTAL)"
        depends on PROFILING
        depends on HAVE_OPROFILE
+       depends on TRACING_SUPPORT
        select TRACING
        select RING_BUFFER
        help
diff --git a/arch/alpha/include/asm/ftrace.h b/arch/alpha/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index d953e23..8897146 100644 (file)
@@ -14,17 +14,4 @@ typedef struct {
 
 void ack_bad_irq(unsigned int irq);
 
-#define HARDIRQ_BITS   12
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially nestable IRQ sources in the system
- * to nest on a single CPU. On Alpha, interrupts are masked at the CPU
- * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode)
- * so we really only have 8 nestable IRQs, but allow some overhead
- */
-#if (1 << HARDIRQ_BITS) < 16
-#error HARDIRQ_BITS is too low!
-#endif
-
 #endif /* _ALPHA_HARDIRQ_H */
diff --git a/arch/avr32/include/asm/ftrace.h b/arch/avr32/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index 2673543..015bc75 100644 (file)
@@ -20,15 +20,4 @@ void ack_bad_irq(unsigned int irq);
 
 #endif /* __ASSEMBLY__ */
 
-#define HARDIRQ_BITS   12
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
 #endif /* __ASM_AVR32_HARDIRQ_H */
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/cris/include/asm/ftrace.h b/arch/cris/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/h8300/include/asm/ftrace.h b/arch/h8300/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index 153e727..294a3b1 100644 (file)
@@ -22,6 +22,9 @@ config IA64
        select HAVE_OPROFILE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
+       select HAVE_FUNCTION_TRACER
        select HAVE_DMA_ATTRS
        select HAVE_KVM
        select HAVE_ARCH_TRACEHOOK
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..d20db3c
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef _ASM_IA64_FTRACE_H
+#define _ASM_IA64_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_INSN_SIZE        32 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
+#define mcount _mcount
+
+#include <asm/kprobes.h>
+/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
+#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
+#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       /* second bundle, insn 2 */
+       return addr - 0x12;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _ASM_IA64_FTRACE_H */
index 140e495..d514cd9 100644 (file)
 
 #define local_softirq_pending()                (local_cpu_data->softirq_pending)
 
-#define HARDIRQ_BITS   14
-
-/*
- * The hardirq mask has to be large enough to have space for potentially all IRQ sources
- * in the system nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
 extern void __iomem *ipi_base_addr;
 
 void ack_bad_irq(unsigned int irq);
index c381ea9..ab6e7ec 100644 (file)
@@ -2,6 +2,10 @@
 # Makefile for the linux kernel.
 #
 
+ifdef CONFIG_DYNAMIC_FTRACE
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
 extra-y        := head.o init_task.o vmlinux.lds
 
 obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o      \
@@ -28,6 +32,7 @@ obj-$(CONFIG_IA64_CYCLONE)    += cyclone.o
 obj-$(CONFIG_CPU_FREQ)         += cpufreq/
 obj-$(CONFIG_IA64_MCA_RECOVERY)        += mca_recovery.o
 obj-$(CONFIG_KPROBES)          += kprobes.o jprobes.o
+obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR)  += uncached.o
index e5341e2..7e3382b 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
+#include <asm/ftrace.h>
 
 #include "minstate.h"
 
@@ -1404,6 +1405,105 @@ GLOBAL_ENTRY(unw_init_running)
        br.ret.sptk.many rp
 END(unw_init_running)
 
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+GLOBAL_ENTRY(_mcount)
+       br ftrace_stub
+END(_mcount)
+
+.here:
+       br.ret.sptk.many b0
+
+GLOBAL_ENTRY(ftrace_caller)
+       alloc out0 = ar.pfs, 8, 0, 4, 0
+       mov out3 = r0
+       ;;
+       mov out2 = b0
+       add r3 = 0x20, r3
+       mov out1 = r1;
+       br.call.sptk.many b0 = ftrace_patch_gp
+       //this might be called from module, so we must patch gp
+ftrace_patch_gp:
+       movl gp=__gp
+       mov b0 = r3
+       ;;
+.global ftrace_call;
+ftrace_call:
+{
+       .mlx
+       nop.m 0x0
+       movl r3 = .here;;
+}
+       alloc loc0 = ar.pfs, 4, 4, 2, 0
+       ;;
+       mov loc1 = b0
+       mov out0 = b0
+       mov loc2 = r8
+       mov loc3 = r15
+       ;;
+       adds out0 = -MCOUNT_INSN_SIZE, out0
+       mov out1 = in2
+       mov b6 = r3
+
+       br.call.sptk.many b0 = b6
+       ;;
+       mov ar.pfs = loc0
+       mov b0 = loc1
+       mov r8 = loc2
+       mov r15 = loc3
+       br ftrace_stub
+       ;;
+END(ftrace_caller)
+
+#else
+GLOBAL_ENTRY(_mcount)
+       movl r2 = ftrace_stub
+       movl r3 = ftrace_trace_function;;
+       ld8 r3 = [r3];;
+       ld8 r3 = [r3];;
+       cmp.eq p7,p0 = r2, r3
+(p7)   br.sptk.many ftrace_stub
+       ;;
+
+       alloc loc0 = ar.pfs, 4, 4, 2, 0
+       ;;
+       mov loc1 = b0
+       mov out0 = b0
+       mov loc2 = r8
+       mov loc3 = r15
+       ;;
+       adds out0 = -MCOUNT_INSN_SIZE, out0
+       mov out1 = in2
+       mov b6 = r3
+
+       br.call.sptk.many b0 = b6
+       ;;
+       mov ar.pfs = loc0
+       mov b0 = loc1
+       mov r8 = loc2
+       mov r15 = loc3
+       br ftrace_stub
+       ;;
+END(_mcount)
+#endif
+
+GLOBAL_ENTRY(ftrace_stub)
+       mov r3 = b0
+       movl r2 = _mcount_ret_helper
+       ;;
+       mov b6 = r2
+       mov b7 = r3
+       br.ret.sptk.many b6
+
+_mcount_ret_helper:
+       mov b0 = r42
+       mov r1 = r41
+       mov ar.pfs = r40
+       br b7
+END(ftrace_stub)
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
        .rodata
        .align 8
        .globl sys_call_table
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..7fc8c96
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Dynamic function tracing support.
+ *
+ * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
+ *
+ * For licencing details, see COPYING.
+ *
+ * Defines low-level handling of mcount calls when the kernel
+ * is compiled with the -pg flag. When using dynamic ftrace, the
+ * mcount call-sites get patched lazily with NOP till they are
+ * enabled. All code mutation routines here take effect atomically.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/patch.h>
+
+/* In IA64, each function will be added below two bundles with -pg option */
+static unsigned char __attribute__((aligned(8)))
+ftrace_orig_code[MCOUNT_INSN_SIZE] = {
+       0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
+       0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
+       0x05, 0x00, 0xc4, 0x00,             /* mov r42=b0 */
+       0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
+       0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
+       0x08, 0x00, 0x00, 0x50              /* br.call.sptk.many b0 = _mcount;; */
+};
+
+struct ftrace_orig_insn {
+       u64 dummy1, dummy2, dummy3;
+       u64 dummy4:64-41+13;
+       u64 imm20:20;
+       u64 dummy5:3;
+       u64 sign:1;
+       u64 dummy6:4;
+};
+
+/* mcount stub will be converted below for nop */
+static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
+       0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
+       0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
+       0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
+       0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
+       0x00, 0x00, 0x04, 0x00
+};
+
+static unsigned char *ftrace_nop_replace(void)
+{
+       return ftrace_nop_code;
+}
+
+/*
+ * mcount stub will be converted below for call
+ * Note: Just the last instruction is changed against nop
+ * */
+static unsigned char __attribute__((aligned(8)))
+ftrace_call_code[MCOUNT_INSN_SIZE] = {
+       0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
+       0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
+       0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
+       0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
+       0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
+       0xf8, 0xff, 0xff, 0xc8
+};
+
+struct ftrace_call_insn {
+       u64 dummy1, dummy2;
+       u64 dummy3:48;
+       u64 imm39_l:16;
+       u64 imm39_h:23;
+       u64 dummy4:13;
+       u64 imm20:20;
+       u64 dummy5:3;
+       u64 i:1;
+       u64 dummy6:4;
+};
+
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+       struct ftrace_call_insn *code = (void *)ftrace_call_code;
+       unsigned long offset = addr - (ip + 0x10);
+
+       code->imm39_l = offset >> 24;
+       code->imm39_h = offset >> 40;
+       code->imm20 = offset >> 4;
+       code->i = offset >> 63;
+       return ftrace_call_code;
+}
+
+static int
+ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+                  unsigned char *new_code, int do_check)
+{
+       unsigned char replaced[MCOUNT_INSN_SIZE];
+
+       /*
+        * Note: Due to modules and __init, code can
+        *  disappear and change, we need to protect against faulting
+        *  as well as code changing. We do this by using the
+        *  probe_kernel_* functions.
+        *
+        * No real locking needed, this code is run through
+        * kstop_machine, or before SMP starts.
+        */
+
+       if (!do_check)
+               goto skip_check;
+
+       /* read the text we want to modify */
+       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* Make sure it is what we expect it to be */
+       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+               return -EINVAL;
+
+skip_check:
+       /* replace the text with the new text */
+       if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE))
+               return -EPERM;
+       flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
+
+       return 0;
+}
+
+static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
+       unsigned long ip = rec->ip;
+
+       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+       if (rec->flags & FTRACE_FL_CONVERTED) {
+               struct ftrace_call_insn *call_insn, *tmp_call;
+
+               call_insn = (void *)ftrace_call_code;
+               tmp_call = (void *)replaced;
+               call_insn->imm39_l = tmp_call->imm39_l;
+               call_insn->imm39_h = tmp_call->imm39_h;
+               call_insn->imm20 = tmp_call->imm20;
+               call_insn->i = tmp_call->i;
+               if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
+                       return -EINVAL;
+               return 0;
+       } else {
+               struct ftrace_orig_insn *call_insn, *tmp_call;
+
+               call_insn = (void *)ftrace_orig_code;
+               tmp_call = (void *)replaced;
+               call_insn->sign = tmp_call->sign;
+               call_insn->imm20 = tmp_call->imm20;
+               if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
+                       return -EINVAL;
+               return 0;
+       }
+}
+
+int ftrace_make_nop(struct module *mod,
+                   struct dyn_ftrace *rec, unsigned long addr)
+{
+       int ret;
+       char *new;
+
+       ret = ftrace_make_nop_check(rec, addr);
+       if (ret)
+               return ret;
+       new = ftrace_nop_replace();
+       return ftrace_modify_code(rec->ip, NULL, new, 0);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long ip = rec->ip;
+       unsigned char *old, *new;
+
+       old=  ftrace_nop_replace();
+       new = ftrace_call_replace(ip, addr);
+       return ftrace_modify_code(ip, old, new, 1);
+}
+
+/* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long ip;
+       unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
+
+       if (func == ftrace_stub)
+               return 0;
+       ip = ((struct fnptr *)func)->ip;
+
+       ia64_patch_imm64(addr + 2, ip);
+
+       flush_icache_range(addr, addr + 16);
+       return 0;
+}
+
+/* run from kstop_machine */
+int __init ftrace_dyn_arch_init(void *data)
+{
+       *(unsigned long *)data = 0;
+
+       return 0;
+}
index 6da1f20..2d31186 100644 (file)
@@ -112,3 +112,9 @@ EXPORT_SYMBOL_GPL(esi_call_phys);
 #endif
 extern char ia64_ivt[];
 EXPORT_SYMBOL(ia64_ivt);
+
+#include <asm/ftrace.h>
+#ifdef CONFIG_FUNCTION_TRACER
+/* mcount is defined in assembly */
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/m68k/include/asm/ftrace.h b/arch/m68k/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/um/include/asm/ftrace.h b/arch/um/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index 3175837..bdcee12 100644 (file)
@@ -34,6 +34,7 @@ config X86
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
        select HAVE_KVM
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
index 5b301b7..eb2221d 100644 (file)
@@ -123,6 +123,11 @@ void clflush_cache_range(void *addr, unsigned int size);
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);
 extern const int rodata_test_data;
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
 #endif
 
 #ifdef CONFIG_DEBUG_RODATA_TEST
index 63a79c7..81937a5 100644 (file)
@@ -111,6 +111,8 @@ enum fixed_addresses {
 #ifdef CONFIG_PARAVIRT
        FIX_PARAVIRT_BOOTMAP,
 #endif
+       FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */
+       FIX_TEXT_POKE1,
        __end_of_permanent_fixed_addresses,
 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
        FIX_OHCI1394_BASE,
index b55b4a7..db24c22 100644 (file)
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-#ifndef __ASSEMBLY__
-
-/*
- * Stack of return addresses for functions
- * of a thread.
- * Used in struct thread_info
- */
-struct ftrace_ret_stack {
-       unsigned long ret;
-       unsigned long func;
-       unsigned long long calltime;
-};
-
-/*
- * Primary handler of a function return.
- * It relays on ftrace_return_to_handler.
- * Defined in entry_32/64.S
- */
-extern void return_to_handler(void);
-
-#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
 #endif /* _ASM_X86_FTRACE_H */
index 8e0f8d1..8672303 100644 (file)
@@ -80,8 +80,6 @@
 
 #define PTRACE_SINGLEBLOCK     33      /* resume execution until next branch */
 
-#ifdef CONFIG_X86_PTRACE_BTS
-
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 
@@ -140,6 +138,5 @@ struct ptrace_bts_config {
    BTS records are read from oldest to newest.
    Returns number of BTS records drained.
 */
-#endif /* CONFIG_X86_PTRACE_BTS */
 
 #endif /* _ASM_X86_PTRACE_ABI_H */
index 4c80f15..2d903b7 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/kprobes.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
+#include <linux/memory.h>
 #include <asm/alternative.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
@@ -12,7 +13,9 @@
 #include <asm/nmi.h>
 #include <asm/vsyscall.h>
 #include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
 #include <asm/io.h>
+#include <asm/fixmap.h>
 
 #define MAX_PATCH_LEN (255-1)
 
@@ -226,6 +229,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
 {
        u8 **ptr;
 
+       mutex_lock(&text_mutex);
        for (ptr = start; ptr < end; ptr++) {
                if (*ptr < text)
                        continue;
@@ -234,6 +238,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
                /* turn DS segment override prefix into lock prefix */
                text_poke(*ptr, ((unsigned char []){0xf0}), 1);
        };
+       mutex_unlock(&text_mutex);
 }
 
 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
@@ -243,6 +248,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
        if (noreplace_smp)
                return;
 
+       mutex_lock(&text_mutex);
        for (ptr = start; ptr < end; ptr++) {
                if (*ptr < text)
                        continue;
@@ -251,6 +257,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
                /* turn lock prefix into DS segment override prefix */
                text_poke(*ptr, ((unsigned char []){0x3E}), 1);
        };
+       mutex_unlock(&text_mutex);
 }
 
 struct smp_alt_module {
@@ -500,15 +507,16 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
  * It means the size must be writable atomically and the address must be aligned
  * in a way that permits an atomic write. It also makes sure we fit on a single
  * page.
+ *
+ * Note: Must be called under text_mutex.
  */
 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
 {
+       unsigned long flags;
        char *vaddr;
-       int nr_pages = 2;
        struct page *pages[2];
        int i;
 
-       might_sleep();
        if (!core_kernel_text((unsigned long)addr)) {
                pages[0] = vmalloc_to_page(addr);
                pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -518,14 +526,17 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
                pages[1] = virt_to_page(addr + PAGE_SIZE);
        }
        BUG_ON(!pages[0]);
-       if (!pages[1])
-               nr_pages = 1;
-       vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
-       BUG_ON(!vaddr);
-       local_irq_disable();
+       set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+       if (pages[1])
+               set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+       vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+       local_irq_save(flags);
        memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
-       local_irq_enable();
-       vunmap(vaddr);
+       local_irq_restore(flags);
+       clear_fixmap(FIX_TEXT_POKE0);
+       if (pages[1])
+               clear_fixmap(FIX_TEXT_POKE1);
+       local_flush_tlb();
        sync_core();
        /* Could also do a CLFLUSH here to speed up CPU recovery; but
           that causes hangs on some VIA CPUs. */
index 22590cf..5e40f54 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/cpufreq.h>
 #include <linux/compiler.h>
 #include <linux/dmi.h>
-#include <linux/ftrace.h>
+#include <trace/power.h>
 
 #include <linux/acpi.h>
 #include <acpi/processor.h>
@@ -70,6 +70,8 @@ struct acpi_cpufreq_data {
 
 static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
 
+DEFINE_TRACE(power_mark);
+
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance *acpi_perf_data;
 
index 191117f..c1c04bf 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/string.h>
 #include <linux/bitops.h>
 #include <linux/smp.h>
+#include <linux/sched.h>
 #include <linux/thread_info.h>
 #include <linux/module.h>
 
@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
 
        /*
         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
-        * with P/T states and does not stop in deep C-states
+        * with P/T states and does not stop in deep C-states.
+        *
+        * It is also reliable across cores and sockets. (but not across
+        * cabinets - we turn it off in that case explicitly.)
         */
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+               set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
+               sched_clock_stable = 1;
        }
 
        /*
index 87d103d..95ea5fa 100644 (file)
 #include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
+#include <linux/ftrace.h>
 #include <linux/kexec.h>
 #include <linux/bug.h>
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
+#include <linux/ftrace.h>
 
 #include <asm/stacktrace.h>
 
@@ -195,6 +197,11 @@ unsigned __kprobes long oops_begin(void)
        int cpu;
        unsigned long flags;
 
+       /* notify the hw-branch tracer so it may disable tracing and
+          add the last trace to the trace buffer -
+          the earlier this happens, the more useful the trace. */
+       trace_hw_branch_oops();
+
        oops_enter();
 
        /* racy, but better than risking deadlock. */
index 231bdd3..a85da17 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/list.h>
 
+#include <asm/cacheflush.h>
 #include <asm/ftrace.h>
 #include <linux/ftrace.h>
 #include <asm/nops.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_kernel_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_kernel_text_ro();
+       return 0;
+}
+
 union ftrace_code_union {
        char code[MCOUNT_INSN_SIZE];
        struct {
@@ -82,7 +95,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  * are the same as what exists.
  */
 
-static atomic_t in_nmi = ATOMIC_INIT(0);
+static atomic_t nmi_running = ATOMIC_INIT(0);
 static int mod_code_status;            /* holds return value of text write */
 static int mod_code_write;             /* set when NMI should do the write */
 static void *mod_code_ip;              /* holds the IP to write to */
@@ -111,12 +124,16 @@ static void ftrace_mod_code(void)
         */
        mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
                                             MCOUNT_INSN_SIZE);
+
+       /* if we fail, then kill any new writers */
+       if (mod_code_status)
+               mod_code_write = 0;
 }
 
 void ftrace_nmi_enter(void)
 {
-       atomic_inc(&in_nmi);
-       /* Must have in_nmi seen before reading write flag */
+       atomic_inc(&nmi_running);
+       /* Must have nmi_running seen before reading write flag */
        smp_mb();
        if (mod_code_write) {
                ftrace_mod_code();
@@ -126,22 +143,21 @@ void ftrace_nmi_enter(void)
 
 void ftrace_nmi_exit(void)
 {
-       /* Finish all executions before clearing in_nmi */
+       /* Finish all executions before clearing nmi_running */
        smp_wmb();
-       atomic_dec(&in_nmi);
+       atomic_dec(&nmi_running);
 }
 
 static void wait_for_nmi(void)
 {
-       int waited = 0;
+       if (!atomic_read(&nmi_running))
+               return;
 
-       while (atomic_read(&in_nmi)) {
-               waited = 1;
+       do {
                cpu_relax();
-       }
+       } while (atomic_read(&nmi_running));
 
-       if (waited)
-               nmi_wait_count++;
+       nmi_wait_count++;
 }
 
 static int
@@ -368,100 +384,8 @@ int ftrace_disable_ftrace_graph_caller(void)
        return ftrace_mod_jmp(ip, old_offset, new_offset);
 }
 
-#else /* CONFIG_DYNAMIC_FTRACE */
-
-/*
- * These functions are picked from those used on
- * this page for dynamic ftrace. They have been
- * simplified to ignore all traces in NMI context.
- */
-static atomic_t in_nmi;
-
-void ftrace_nmi_enter(void)
-{
-       atomic_inc(&in_nmi);
-}
-
-void ftrace_nmi_exit(void)
-{
-       atomic_dec(&in_nmi);
-}
-
 #endif /* !CONFIG_DYNAMIC_FTRACE */
 
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func, int *depth)
-{
-       int index;
-
-       if (!current->ret_stack)
-               return -EBUSY;
-
-       /* The return trace stack is full */
-       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(&current->trace_overrun);
-               return -EBUSY;
-       }
-
-       index = ++current->curr_ret_stack;
-       barrier();
-       current->ret_stack[index].ret = ret;
-       current->ret_stack[index].func = func;
-       current->ret_stack[index].calltime = time;
-       *depth = index;
-
-       return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
-{
-       int index;
-
-       index = current->curr_ret_stack;
-
-       if (unlikely(index < 0)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic, otherwise we have no where to go */
-               *ret = (unsigned long)panic;
-               return;
-       }
-
-       *ret = current->ret_stack[index].ret;
-       trace->func = current->ret_stack[index].func;
-       trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(&current->trace_overrun);
-       trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
-
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(void)
-{
-       struct ftrace_graph_ret trace;
-       unsigned long ret;
-
-       pop_return_trace(&trace, &ret);
-       trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_graph_return(&trace);
-
-       if (unlikely(!ret)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic. What else to do? */
-               ret = (unsigned long)panic;
-       }
-
-       return ret;
-}
-
 /*
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
@@ -476,7 +400,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                                &return_to_handler;
 
        /* Nmi's are currently unsupported */
-       if (unlikely(atomic_read(&in_nmi)))
+       if (unlikely(in_nmi()))
                return;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -512,16 +436,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                return;
        }
 
-       if (unlikely(!__kernel_text_address(old))) {
-               ftrace_graph_stop();
-               *parent = old;
-               WARN_ON(1);
-               return;
-       }
-
-       calltime = cpu_clock(raw_smp_processor_id());
+       calltime = trace_clock_local();
 
-       if (push_return_trace(old, calltime,
+       if (ftrace_push_return_trace(old, calltime,
                                self_addr, &trace.depth) == -EBUSY) {
                *parent = old;
                return;
index 6afa523..8c03705 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 #include <linux/clockchips.h>
-#include <linux/ftrace.h>
+#include <trace/power.h>
 #include <asm/system.h>
 #include <asm/apic.h>
 #include <asm/idle.h>
@@ -22,6 +22,9 @@ EXPORT_SYMBOL(idle_nomwait);
 
 struct kmem_cache *task_xstate_cachep;
 
+DEFINE_TRACE(power_start);
+DEFINE_TRACE(power_end);
+
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        *dst = *src;
index b81125f..c7da368 100644 (file)
@@ -55,7 +55,8 @@ config KVM_AMD
 
 config KVM_TRACE
        bool "KVM trace support"
-       depends on KVM && MARKERS && SYSFS
+       depends on KVM && SYSFS
+       select MARKERS
        select RELAY
        select DEBUG_FS
        default n
index db81e9a..749559e 100644 (file)
@@ -1054,17 +1054,47 @@ static noinline int do_test_wp_bit(void)
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
 
+static int kernel_set_to_readonly;
+
+void set_kernel_text_rw(void)
+{
+       unsigned long start = PFN_ALIGN(_text);
+       unsigned long size = PFN_ALIGN(_etext) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx for read write\n",
+                start, start+size);
+
+       set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+       unsigned long start = PFN_ALIGN(_text);
+       unsigned long size = PFN_ALIGN(_etext) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx for read only\n",
+                start, start+size);
+
+       set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+}
+
 void mark_rodata_ro(void)
 {
        unsigned long start = PFN_ALIGN(_text);
        unsigned long size = PFN_ALIGN(_etext) - start;
 
-#ifndef CONFIG_DYNAMIC_FTRACE
-       /* Dynamic tracing modifies the kernel text section */
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
        printk(KERN_INFO "Write protecting the kernel text: %luk\n",
                size >> 10);
 
+       kernel_set_to_readonly = 1;
+
 #ifdef CONFIG_CPA_DEBUG
        printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
                start, start+size);
@@ -1073,7 +1103,6 @@ void mark_rodata_ro(void)
        printk(KERN_INFO "Testing CPA: write protecting again\n");
        set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
 #endif
-#endif /* CONFIG_DYNAMIC_FTRACE */
 
        start += size;
        size = (unsigned long)__end_rodata - start;
index 54efa57..1753e80 100644 (file)
@@ -734,21 +734,48 @@ void __init mem_init(void)
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
 
+static int kernel_set_to_readonly;
+
+void set_kernel_text_rw(void)
+{
+       unsigned long start = PFN_ALIGN(_stext);
+       unsigned long end = PFN_ALIGN(__start_rodata);
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx for read write\n",
+                start, end);
+
+       set_memory_rw(start, (end - start) >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+       unsigned long start = PFN_ALIGN(_stext);
+       unsigned long end = PFN_ALIGN(__start_rodata);
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx for read only\n",
+                start, end);
+
+       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+}
+
 void mark_rodata_ro(void)
 {
        unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
        unsigned long rodata_start =
                ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-       /* Dynamic tracing modifies the kernel text section */
-       start = rodata_start;
-#endif
-
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
        set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 
+       kernel_set_to_readonly = 1;
+
        /*
         * The rodata section (but not the kernel text!) should also be
         * not-executable.
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index 0cbb3b8..e7d1278 100644 (file)
@@ -44,22 +44,6 @@ config LBD
 
          If unsure, say N.
 
-config BLK_DEV_IO_TRACE
-       bool "Support for tracing block io actions"
-       depends on SYSFS
-       select RELAY
-       select DEBUG_FS
-       select TRACEPOINTS
-       help
-         Say Y here if you want to be able to trace the block layer actions
-         on a given queue. Tracing allows you to see any traffic happening
-         on a block device queue. For more information (and the userspace
-         support tools needed), fetch the blktrace tools from:
-
-         git://git.kernel.dk/blktrace.git
-
-         If unsure, say N.
-
 config BLK_DEV_BSG
        bool "Block layer SG support v4 (EXPERIMENTAL)"
        depends on EXPERIMENTAL
index bfe7304..e9fa4dd 100644 (file)
@@ -13,6 +13,5 @@ obj-$(CONFIG_IOSCHED_AS)      += as-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)      += cfq-iosched.o
 
-obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
 obj-$(CONFIG_BLOCK_COMPAT)     += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)        += blk-integrity.o
diff --git a/block/blktrace.c b/block/blktrace.c
deleted file mode 100644 (file)
index 028120a..0000000
+++ /dev/null
@@ -1,860 +0,0 @@
-/*
- * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-#include <linux/kernel.h>
-#include <linux/blkdev.h>
-#include <linux/blktrace_api.h>
-#include <linux/percpu.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/debugfs.h>
-#include <linux/time.h>
-#include <trace/block.h>
-#include <asm/uaccess.h>
-
-static unsigned int blktrace_seq __read_mostly = 1;
-
-/* Global reference count of probes */
-static DEFINE_MUTEX(blk_probe_mutex);
-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
-
-static int blk_register_tracepoints(void);
-static void blk_unregister_tracepoints(void);
-
-/*
- * Send out a notify message.
- */
-static void trace_note(struct blk_trace *bt, pid_t pid, int action,
-                      const void *data, size_t len)
-{
-       struct blk_io_trace *t;
-
-       t = relay_reserve(bt->rchan, sizeof(*t) + len);
-       if (t) {
-               const int cpu = smp_processor_id();
-
-               t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
-               t->time = ktime_to_ns(ktime_get());
-               t->device = bt->dev;
-               t->action = action;
-               t->pid = pid;
-               t->cpu = cpu;
-               t->pdu_len = len;
-               memcpy((void *) t + sizeof(*t), data, len);
-       }
-}
-
-/*
- * Send out a notify for this process, if we haven't done so since a trace
- * started
- */
-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
-{
-       tsk->btrace_seq = blktrace_seq;
-       trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
-}
-
-static void trace_note_time(struct blk_trace *bt)
-{
-       struct timespec now;
-       unsigned long flags;
-       u32 words[2];
-
-       getnstimeofday(&now);
-       words[0] = now.tv_sec;
-       words[1] = now.tv_nsec;
-
-       local_irq_save(flags);
-       trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
-       local_irq_restore(flags);
-}
-
-void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
-{
-       int n;
-       va_list args;
-       unsigned long flags;
-       char *buf;
-
-       local_irq_save(flags);
-       buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
-       va_start(args, fmt);
-       n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
-       va_end(args);
-
-       trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(__trace_note_message);
-
-static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
-                        pid_t pid)
-{
-       if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
-               return 1;
-       if (sector < bt->start_lba || sector > bt->end_lba)
-               return 1;
-       if (bt->pid && pid != bt->pid)
-               return 1;
-
-       return 0;
-}
-
-/*
- * Data direction bit lookup
- */
-static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
-
-/* The ilog2() calls fall out because they're constant */
-#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
-         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
-
-/*
- * The worker for the various blk_add_trace*() types. Fills out a
- * blk_io_trace structure and places it in a per-cpu subbuffer.
- */
-static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
-                    int rw, u32 what, int error, int pdu_len, void *pdu_data)
-{
-       struct task_struct *tsk = current;
-       struct blk_io_trace *t;
-       unsigned long flags;
-       unsigned long *sequence;
-       pid_t pid;
-       int cpu;
-
-       if (unlikely(bt->trace_state != Blktrace_running))
-               return;
-
-       what |= ddir_act[rw & WRITE];
-       what |= MASK_TC_BIT(rw, BARRIER);
-       what |= MASK_TC_BIT(rw, SYNCIO);
-       what |= MASK_TC_BIT(rw, AHEAD);
-       what |= MASK_TC_BIT(rw, META);
-       what |= MASK_TC_BIT(rw, DISCARD);
-
-       pid = tsk->pid;
-       if (unlikely(act_log_check(bt, what, sector, pid)))
-               return;
-
-       /*
-        * A word about the locking here - we disable interrupts to reserve
-        * some space in the relay per-cpu buffer, to prevent an irq
-        * from coming in and stepping on our toes.
-        */
-       local_irq_save(flags);
-
-       if (unlikely(tsk->btrace_seq != blktrace_seq))
-               trace_note_tsk(bt, tsk);
-
-       t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
-       if (t) {
-               cpu = smp_processor_id();
-               sequence = per_cpu_ptr(bt->sequence, cpu);
-
-               t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
-               t->sequence = ++(*sequence);
-               t->time = ktime_to_ns(ktime_get());
-               t->sector = sector;
-               t->bytes = bytes;
-               t->action = what;
-               t->pid = pid;
-               t->device = bt->dev;
-               t->cpu = cpu;
-               t->error = error;
-               t->pdu_len = pdu_len;
-
-               if (pdu_len)
-                       memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
-       }
-
-       local_irq_restore(flags);
-}
-
-static struct dentry *blk_tree_root;
-static DEFINE_MUTEX(blk_tree_mutex);
-
-static void blk_trace_cleanup(struct blk_trace *bt)
-{
-       debugfs_remove(bt->msg_file);
-       debugfs_remove(bt->dropped_file);
-       relay_close(bt->rchan);
-       free_percpu(bt->sequence);
-       free_percpu(bt->msg_data);
-       kfree(bt);
-       mutex_lock(&blk_probe_mutex);
-       if (atomic_dec_and_test(&blk_probes_ref))
-               blk_unregister_tracepoints();
-       mutex_unlock(&blk_probe_mutex);
-}
-
-int blk_trace_remove(struct request_queue *q)
-{
-       struct blk_trace *bt;
-
-       bt = xchg(&q->blk_trace, NULL);
-       if (!bt)
-               return -EINVAL;
-
-       if (bt->trace_state == Blktrace_setup ||
-           bt->trace_state == Blktrace_stopped)
-               blk_trace_cleanup(bt);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(blk_trace_remove);
-
-static int blk_dropped_open(struct inode *inode, struct file *filp)
-{
-       filp->private_data = inode->i_private;
-
-       return 0;
-}
-
-static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
-                               size_t count, loff_t *ppos)
-{
-       struct blk_trace *bt = filp->private_data;
-       char buf[16];
-
-       snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
-
-       return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
-}
-
-static const struct file_operations blk_dropped_fops = {
-       .owner =        THIS_MODULE,
-       .open =         blk_dropped_open,
-       .read =         blk_dropped_read,
-};
-
-static int blk_msg_open(struct inode *inode, struct file *filp)
-{
-       filp->private_data = inode->i_private;
-
-       return 0;
-}
-
-static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
-                               size_t count, loff_t *ppos)
-{
-       char *msg;
-       struct blk_trace *bt;
-
-       if (count > BLK_TN_MAX_MSG)
-               return -EINVAL;
-
-       msg = kmalloc(count, GFP_KERNEL);
-       if (msg == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(msg, buffer, count)) {
-               kfree(msg);
-               return -EFAULT;
-       }
-
-       bt = filp->private_data;
-       __trace_note_message(bt, "%s", msg);
-       kfree(msg);
-
-       return count;
-}
-
-static const struct file_operations blk_msg_fops = {
-       .owner =        THIS_MODULE,
-       .open =         blk_msg_open,
-       .write =        blk_msg_write,
-};
-
-/*
- * Keep track of how many times we encountered a full subbuffer, to aid
- * the user space app in telling how many lost events there were.
- */
-static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
-                                    void *prev_subbuf, size_t prev_padding)
-{
-       struct blk_trace *bt;
-
-       if (!relay_buf_full(buf))
-               return 1;
-
-       bt = buf->chan->private_data;
-       atomic_inc(&bt->dropped);
-       return 0;
-}
-
-static int blk_remove_buf_file_callback(struct dentry *dentry)
-{
-       struct dentry *parent = dentry->d_parent;
-       debugfs_remove(dentry);
-
-       /*
-       * this will fail for all but the last file, but that is ok. what we
-       * care about is the top level buts->name directory going away, when
-       * the last trace file is gone. Then we don't have to rmdir() that
-       * manually on trace stop, so it nicely solves the issue with
-       * force killing of running traces.
-       */
-
-       debugfs_remove(parent);
-       return 0;
-}
-
-static struct dentry *blk_create_buf_file_callback(const char *filename,
-                                                  struct dentry *parent,
-                                                  int mode,
-                                                  struct rchan_buf *buf,
-                                                  int *is_global)
-{
-       return debugfs_create_file(filename, mode, parent, buf,
-                                       &relay_file_operations);
-}
-
-static struct rchan_callbacks blk_relay_callbacks = {
-       .subbuf_start           = blk_subbuf_start_callback,
-       .create_buf_file        = blk_create_buf_file_callback,
-       .remove_buf_file        = blk_remove_buf_file_callback,
-};
-
-/*
- * Setup everything required to start tracing
- */
-int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-                       struct blk_user_trace_setup *buts)
-{
-       struct blk_trace *old_bt, *bt = NULL;
-       struct dentry *dir = NULL;
-       int ret, i;
-
-       if (!buts->buf_size || !buts->buf_nr)
-               return -EINVAL;
-
-       strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
-       buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
-
-       /*
-        * some device names have larger paths - convert the slashes
-        * to underscores for this to work as expected
-        */
-       for (i = 0; i < strlen(buts->name); i++)
-               if (buts->name[i] == '/')
-                       buts->name[i] = '_';
-
-       ret = -ENOMEM;
-       bt = kzalloc(sizeof(*bt), GFP_KERNEL);
-       if (!bt)
-               goto err;
-
-       bt->sequence = alloc_percpu(unsigned long);
-       if (!bt->sequence)
-               goto err;
-
-       bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
-       if (!bt->msg_data)
-               goto err;
-
-       ret = -ENOENT;
-
-       if (!blk_tree_root) {
-               blk_tree_root = debugfs_create_dir("block", NULL);
-               if (!blk_tree_root)
-                       return -ENOMEM;
-       }
-
-       dir = debugfs_create_dir(buts->name, blk_tree_root);
-
-       if (!dir)
-               goto err;
-
-       bt->dir = dir;
-       bt->dev = dev;
-       atomic_set(&bt->dropped, 0);
-
-       ret = -EIO;
-       bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
-       if (!bt->dropped_file)
-               goto err;
-
-       bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
-       if (!bt->msg_file)
-               goto err;
-
-       bt->rchan = relay_open("trace", dir, buts->buf_size,
-                               buts->buf_nr, &blk_relay_callbacks, bt);
-       if (!bt->rchan)
-               goto err;
-
-       bt->act_mask = buts->act_mask;
-       if (!bt->act_mask)
-               bt->act_mask = (u16) -1;
-
-       bt->start_lba = buts->start_lba;
-       bt->end_lba = buts->end_lba;
-       if (!bt->end_lba)
-               bt->end_lba = -1ULL;
-
-       bt->pid = buts->pid;
-       bt->trace_state = Blktrace_setup;
-
-       mutex_lock(&blk_probe_mutex);
-       if (atomic_add_return(1, &blk_probes_ref) == 1) {
-               ret = blk_register_tracepoints();
-               if (ret)
-                       goto probe_err;
-       }
-       mutex_unlock(&blk_probe_mutex);
-
-       ret = -EBUSY;
-       old_bt = xchg(&q->blk_trace, bt);
-       if (old_bt) {
-               (void) xchg(&q->blk_trace, old_bt);
-               goto err;
-       }
-
-       return 0;
-probe_err:
-       atomic_dec(&blk_probes_ref);
-       mutex_unlock(&blk_probe_mutex);
-err:
-       if (bt) {
-               if (bt->msg_file)
-                       debugfs_remove(bt->msg_file);
-               if (bt->dropped_file)
-                       debugfs_remove(bt->dropped_file);
-               free_percpu(bt->sequence);
-               free_percpu(bt->msg_data);
-               if (bt->rchan)
-                       relay_close(bt->rchan);
-               kfree(bt);
-       }
-       return ret;
-}
-
-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-                   char __user *arg)
-{
-       struct blk_user_trace_setup buts;
-       int ret;
-
-       ret = copy_from_user(&buts, arg, sizeof(buts));
-       if (ret)
-               return -EFAULT;
-
-       ret = do_blk_trace_setup(q, name, dev, &buts);
-       if (ret)
-               return ret;
-
-       if (copy_to_user(arg, &buts, sizeof(buts)))
-               return -EFAULT;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(blk_trace_setup);
-
-int blk_trace_startstop(struct request_queue *q, int start)
-{
-       struct blk_trace *bt;
-       int ret;
-
-       if ((bt = q->blk_trace) == NULL)
-               return -EINVAL;
-
-       /*
-        * For starting a trace, we can transition from a setup or stopped
-        * trace. For stopping a trace, the state must be running
-        */
-       ret = -EINVAL;
-       if (start) {
-               if (bt->trace_state == Blktrace_setup ||
-                   bt->trace_state == Blktrace_stopped) {
-                       blktrace_seq++;
-                       smp_mb();
-                       bt->trace_state = Blktrace_running;
-
-                       trace_note_time(bt);
-                       ret = 0;
-               }
-       } else {
-               if (bt->trace_state == Blktrace_running) {
-                       bt->trace_state = Blktrace_stopped;
-                       relay_flush(bt->rchan);
-                       ret = 0;
-               }
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(blk_trace_startstop);
-
-/**
- * blk_trace_ioctl: - handle the ioctls associated with tracing
- * @bdev:      the block device
- * @cmd:       the ioctl cmd
- * @arg:       the argument data, if any
- *
- **/
-int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
-{
-       struct request_queue *q;
-       int ret, start = 0;
-       char b[BDEVNAME_SIZE];
-
-       q = bdev_get_queue(bdev);
-       if (!q)
-               return -ENXIO;
-
-       mutex_lock(&bdev->bd_mutex);
-
-       switch (cmd) {
-       case BLKTRACESETUP:
-               bdevname(bdev, b);
-               ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
-               break;
-       case BLKTRACESTART:
-               start = 1;
-       case BLKTRACESTOP:
-               ret = blk_trace_startstop(q, start);
-               break;
-       case BLKTRACETEARDOWN:
-               ret = blk_trace_remove(q);
-               break;
-       default:
-               ret = -ENOTTY;
-               break;
-       }
-
-       mutex_unlock(&bdev->bd_mutex);
-       return ret;
-}
-
-/**
- * blk_trace_shutdown: - stop and cleanup trace structures
- * @q:    the request queue associated with the device
- *
- **/
-void blk_trace_shutdown(struct request_queue *q)
-{
-       if (q->blk_trace) {
-               blk_trace_startstop(q, 0);
-               blk_trace_remove(q);
-       }
-}
-
-/*
- * blktrace probes
- */
-
-/**
- * blk_add_trace_rq - Add a trace for a request oriented action
- * @q:         queue the io is for
- * @rq:                the source request
- * @what:      the action
- *
- * Description:
- *     Records an action against a request. Will log the bio offset + size.
- *
- **/
-static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
-                                   u32 what)
-{
-       struct blk_trace *bt = q->blk_trace;
-       int rw = rq->cmd_flags & 0x03;
-
-       if (likely(!bt))
-               return;
-
-       if (blk_discard_rq(rq))
-               rw |= (1 << BIO_RW_DISCARD);
-
-       if (blk_pc_request(rq)) {
-               what |= BLK_TC_ACT(BLK_TC_PC);
-               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
-                               sizeof(rq->cmd), rq->cmd);
-       } else  {
-               what |= BLK_TC_ACT(BLK_TC_FS);
-               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
-                               rw, what, rq->errors, 0, NULL);
-       }
-}
-
-static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
-{
-       blk_add_trace_rq(q, rq, BLK_TA_ABORT);
-}
-
-static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
-{
-       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
-}
-
-static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
-{
-       blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
-}
-
-static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq)
-{
-       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
-}
-
-static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq)
-{
-       blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
-}
-
-/**
- * blk_add_trace_bio - Add a trace for a bio oriented action
- * @q:         queue the io is for
- * @bio:       the source bio
- * @what:      the action
- *
- * Description:
- *     Records an action against a bio. Will log the bio offset + size.
- *
- **/
-static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-                                    u32 what)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (likely(!bt))
-               return;
-
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-                       !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
-}
-
-static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
-{
-       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
-}
-
-static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
-{
-       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
-}
-
-static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio)
-{
-       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
-}
-
-static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio)
-{
-       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
-}
-
-static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
-{
-       blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
-}
-
-static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw)
-{
-       if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
-       else {
-               struct blk_trace *bt = q->blk_trace;
-
-               if (bt)
-                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
-       }
-}
-
-
-static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw)
-{
-       if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
-       else {
-               struct blk_trace *bt = q->blk_trace;
-
-               if (bt)
-                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL);
-       }
-}
-
-static void blk_add_trace_plug(struct request_queue *q)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (bt)
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
-}
-
-static void blk_add_trace_unplug_io(struct request_queue *q)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
-
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
-                               sizeof(rpdu), &rpdu);
-       }
-}
-
-static void blk_add_trace_unplug_timer(struct request_queue *q)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
-
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
-                               sizeof(rpdu), &rpdu);
-       }
-}
-
-static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
-                               unsigned int pdu)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (bt) {
-               __be64 rpdu = cpu_to_be64(pdu);
-
-               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                               BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
-                               sizeof(rpdu), &rpdu);
-       }
-}
-
-/**
- * blk_add_trace_remap - Add a trace for a remap operation
- * @q:         queue the io is for
- * @bio:       the source bio
- * @dev:       target device
- * @from:      source sector
- * @to:                target sector
- *
- * Description:
- *     Device mapper or raid target sometimes need to split a bio because
- *     it spans a stripe (or similar). Add a trace for that action.
- *
- **/
-static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
-                                      dev_t dev, sector_t from, sector_t to)
-{
-       struct blk_trace *bt = q->blk_trace;
-       struct blk_io_trace_remap r;
-
-       if (likely(!bt))
-               return;
-
-       r.device = cpu_to_be32(dev);
-       r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
-       r.sector = cpu_to_be64(to);
-
-       __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
-                       !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
-}
-
-/**
- * blk_add_driver_data - Add binary message with driver-specific data
- * @q:         queue the io is for
- * @rq:                io request
- * @data:      driver-specific data
- * @len:       length of driver-specific data
- *
- * Description:
- *     Some drivers might want to write driver-specific data per request.
- *
- **/
-void blk_add_driver_data(struct request_queue *q,
-                        struct request *rq,
-                        void *data, size_t len)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (likely(!bt))
-               return;
-
-       if (blk_pc_request(rq))
-               __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
-                               rq->errors, len, data);
-       else
-               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
-                               0, BLK_TA_DRV_DATA, rq->errors, len, data);
-}
-EXPORT_SYMBOL_GPL(blk_add_driver_data);
-
-static int blk_register_tracepoints(void)
-{
-       int ret;
-
-       ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
-       WARN_ON(ret);
-       ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
-       WARN_ON(ret);
-       ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
-       WARN_ON(ret);
-       ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
-       WARN_ON(ret);
-       ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
-       WARN_ON(ret);
-       ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
-       WARN_ON(ret);
-       ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
-       WARN_ON(ret);
-       ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
-       WARN_ON(ret);
-       ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
-       WARN_ON(ret);
-       ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
-       WARN_ON(ret);
-       ret = register_trace_block_getrq(blk_add_trace_getrq);
-       WARN_ON(ret);
-       ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
-       WARN_ON(ret);
-       ret = register_trace_block_plug(blk_add_trace_plug);
-       WARN_ON(ret);
-       ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
-       WARN_ON(ret);
-       ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
-       WARN_ON(ret);
-       ret = register_trace_block_split(blk_add_trace_split);
-       WARN_ON(ret);
-       ret = register_trace_block_remap(blk_add_trace_remap);
-       WARN_ON(ret);
-       return 0;
-}
-
-static void blk_unregister_tracepoints(void)
-{
-       unregister_trace_block_remap(blk_add_trace_remap);
-       unregister_trace_block_split(blk_add_trace_split);
-       unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
-       unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
-       unregister_trace_block_plug(blk_add_trace_plug);
-       unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
-       unregister_trace_block_getrq(blk_add_trace_getrq);
-       unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
-       unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
-       unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
-       unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
-       unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
-       unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
-       unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
-       unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
-       unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
-       unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
-
-       tracepoint_synchronize_unregister();
-}
index 33a9351..30659ce 100644 (file)
@@ -283,7 +283,7 @@ static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
 }
 static struct sysrq_key_op sysrq_ftrace_dump_op = {
        .handler        = sysrq_ftrace_dump,
-       .help_msg       = "dumpZ-ftrace-buffer",
+       .help_msg       = "dump-ftrace-buffer(Z)",
        .action_msg     = "Dump ftrace buffer",
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
index e76d715..f0e99d4 100644 (file)
@@ -161,7 +161,7 @@ struct op_sample
 {
        entry->event = ring_buffer_lock_reserve
                (op_ring_buffer_write, sizeof(struct op_sample) +
-                size * sizeof(entry->sample->data[0]), &entry->irq_flags);
+                size * sizeof(entry->sample->data[0]));
        if (entry->event)
                entry->sample = ring_buffer_event_data(entry->event);
        else
@@ -178,8 +178,7 @@ struct op_sample
 
 int op_cpu_buffer_write_commit(struct op_entry *entry)
 {
-       return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
-                                        entry->irq_flags);
+       return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
 }
 
 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
index 6d72024..8a17f7e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kmod.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
+#include <linux/blktrace_api.h>
 
 #include "check.h"
 
@@ -294,6 +295,9 @@ static struct attribute_group part_attr_group = {
 
 static struct attribute_group *part_attr_groups[] = {
        &part_attr_group,
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+       &blk_trace_attr_group,
+#endif
        NULL
 };
 
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index 5406e70..0e0f39b 100644 (file)
 #define BRANCH_PROFILE()
 #endif
 
+#ifdef CONFIG_EVENT_TRACER
+#define FTRACE_EVENTS()        VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
+                       *(_ftrace_events)                               \
+                       VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+#else
+#define FTRACE_EVENTS()
+#endif
+
+#ifdef CONFIG_TRACING
+#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
+                        *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+                        VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
+#else
+#define TRACE_PRINTKS()
+#endif
+
 /* .data section */
 #define DATA_DATA                                                      \
        *(.data)                                                        \
@@ -81,7 +97,9 @@
        *(__tracepoints)                                                \
        VMLINUX_SYMBOL(__stop___tracepoints) = .;                       \
        LIKELY_PROFILE()                                                \
-       BRANCH_PROFILE()
+       BRANCH_PROFILE()                                                \
+       TRACE_PRINTKS()                                                 \
+       FTRACE_EVENTS()
 
 #define RO_DATA(align)                                                 \
        . = ALIGN((align));                                             \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
index 6e91587..d960889 100644 (file)
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
 
 #ifdef __KERNEL__
 #if defined(CONFIG_BLK_DEV_IO_TRACE)
+
+#include <linux/sysfs.h>
+
 struct blk_trace {
        int trace_state;
        struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 extern int blk_trace_startstop(struct request_queue *q, int start);
 extern int blk_trace_remove(struct request_queue *q);
 
+extern struct attribute_group blk_trace_attr_group;
+
 #else /* !CONFIG_BLK_DEV_IO_TRACE */
 #define blk_trace_ioctl(bdev, cmd, arg)                (-ENOTTY)
 #define blk_trace_shutdown(q)                  do { } while (0)
index 677432b..e1583f2 100644 (file)
@@ -1,15 +1,18 @@
 #ifndef _LINUX_FTRACE_H
 #define _LINUX_FTRACE_H
 
-#include <linux/linkage.h>
-#include <linux/fs.h>
-#include <linux/ktime.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/trace_clock.h>
 #include <linux/kallsyms.h>
+#include <linux/linkage.h>
 #include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
 #include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <asm/ftrace.h>
 
 #ifdef CONFIG_FUNCTION_TRACER
 
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
                   loff_t *ppos);
 #endif
 
+struct ftrace_func_command {
+       struct list_head        list;
+       char                    *name;
+       int                     (*func)(char *func, char *cmd,
+                                       char *params, int enable);
+};
+
 #ifdef CONFIG_DYNAMIC_FTRACE
-/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
-#include <asm/ftrace.h>
+
+int ftrace_arch_code_modify_prepare(void);
+int ftrace_arch_code_modify_post_process(void);
+
+struct seq_file;
+
+struct ftrace_probe_ops {
+       void                    (*func)(unsigned long ip,
+                                       unsigned long parent_ip,
+                                       void **data);
+       int                     (*callback)(unsigned long ip, void **data);
+       void                    (*free)(void **data);
+       int                     (*print)(struct seq_file *m,
+                                        unsigned long ip,
+                                        struct ftrace_probe_ops *ops,
+                                        void *data);
+};
+
+extern int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                             void *data);
+extern void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                               void *data);
+extern void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
+extern void unregister_ftrace_function_probe_all(char *glob);
 
 enum {
        FTRACE_FL_FREE          = (1 << 0),
@@ -119,6 +154,9 @@ struct dyn_ftrace {
 int ftrace_force_update(void);
 void ftrace_set_filter(unsigned char *buf, int len, int reset);
 
+int register_ftrace_command(struct ftrace_func_command *cmd);
+int unregister_ftrace_command(struct ftrace_func_command *cmd);
+
 /* defined in arch */
 extern int ftrace_ip_converted(unsigned long ip);
 extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +164,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
 extern void ftrace_caller(void);
 extern void ftrace_call(void);
 extern void mcount_call(void);
+
+#ifndef FTRACE_ADDR
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+#endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 extern void ftrace_graph_caller(void);
 extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +178,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 #endif
 
 /**
- * ftrace_make_nop - convert code into top
+ * ftrace_make_nop - convert code into nop
  * @mod: module structure if called by module load initialization
  * @rec: the mcount call site record
  * @addr: the address that the call site should be calling
@@ -181,7 +223,6 @@ extern int ftrace_make_nop(struct module *mod,
  */
 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
 
-
 /* May be defined in arch */
 extern int ftrace_arch_read_dyn_info(char *buf, int size);
 
@@ -198,6 +239,14 @@ extern void ftrace_enable_daemon(void);
 # define ftrace_disable_daemon()               do { } while (0)
 # define ftrace_enable_daemon()                        do { } while (0)
 static inline void ftrace_release(void *start, unsigned long size) { }
+static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+       return -EINVAL;
+}
+static inline int unregister_ftrace_command(char *cmd_name)
+{
+       return -EINVAL;
+}
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 /* totally disable ftrace - can not re-enable after this */
@@ -233,24 +282,25 @@ static inline void __ftrace_enabled_restore(int enabled)
 #endif
 }
 
-#ifdef CONFIG_FRAME_POINTER
-/* TODO: need to fix this for ARM */
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
-#else
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 0UL
-# define CALLER_ADDR2 0UL
-# define CALLER_ADDR3 0UL
-# define CALLER_ADDR4 0UL
-# define CALLER_ADDR5 0UL
-# define CALLER_ADDR6 0UL
-#endif
+#ifndef HAVE_ARCH_CALLER_ADDR
+# ifdef CONFIG_FRAME_POINTER
+#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#  define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+#  define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+#  define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+#  define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+#  define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+#  define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+# else
+#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#  define CALLER_ADDR1 0UL
+#  define CALLER_ADDR2 0UL
+#  define CALLER_ADDR3 0UL
+#  define CALLER_ADDR4 0UL
+#  define CALLER_ADDR5 0UL
+#  define CALLER_ADDR6 0UL
+# endif
+#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
 
 #ifdef CONFIG_IRQSOFF_TRACER
   extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +318,6 @@ static inline void __ftrace_enabled_restore(int enabled)
 # define trace_preempt_off(a0, a1)             do { } while (0)
 #endif
 
-#ifdef CONFIG_TRACING
-extern int ftrace_dump_on_oops;
-
-extern void tracing_start(void);
-extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
-
-extern void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
-
-/**
- * ftrace_printk - printf formatting in the ftrace buffer
- * @fmt: the printf format for printing
- *
- * Note: __ftrace_printk is an internal function for ftrace_printk and
- *       the @ip is passed in via the ftrace_printk macro.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving ftrace_printks scattered around in
- * your code.
- */
-# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
-extern int
-__ftrace_printk(unsigned long ip, const char *fmt, ...)
-       __attribute__ ((format (printf, 2, 3)));
-extern void ftrace_dump(void);
-#else
-static inline void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
-static inline int
-ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
-
-static inline void tracing_start(void) { }
-static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
-static inline int
-ftrace_printk(const char *fmt, ...)
-{
-       return 0;
-}
-static inline void ftrace_dump(void) { }
-#endif
-
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 extern void ftrace_init(void);
 extern void ftrace_init_module(struct module *mod,
@@ -327,36 +329,6 @@ ftrace_init_module(struct module *mod,
                   unsigned long *start, unsigned long *end) { }
 #endif
 
-enum {
-       POWER_NONE = 0,
-       POWER_CSTATE = 1,
-       POWER_PSTATE = 2,
-};
-
-struct power_trace {
-#ifdef CONFIG_POWER_TRACER
-       ktime_t                 stamp;
-       ktime_t                 end;
-       int                     type;
-       int                     state;
-#endif
-};
-
-#ifdef CONFIG_POWER_TRACER
-extern void trace_power_start(struct power_trace *it, unsigned int type,
-                                       unsigned int state);
-extern void trace_power_mark(struct power_trace *it, unsigned int type,
-                                       unsigned int state);
-extern void trace_power_end(struct power_trace *it);
-#else
-static inline void trace_power_start(struct power_trace *it, unsigned int type,
-                                       unsigned int state) { }
-static inline void trace_power_mark(struct power_trace *it, unsigned int type,
-                                       unsigned int state) { }
-static inline void trace_power_end(struct power_trace *it) { }
-#endif
-
-
 /*
  * Structure that defines an entry function trace.
  */
@@ -379,6 +351,30 @@ struct ftrace_graph_ret {
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
+/*
+ * Stack of return addresses for functions
+ * of a thread.
+ * Used in struct thread_info
+ */
+struct ftrace_ret_stack {
+       unsigned long ret;
+       unsigned long func;
+       unsigned long long calltime;
+};
+
+/*
+ * Primary handler of a function return.
+ * It relays on ftrace_return_to_handler.
+ * Defined in entry_32/64.S
+ */
+extern void return_to_handler(void);
+
+extern int
+ftrace_push_return_trace(unsigned long ret, unsigned long long time,
+                        unsigned long func, int *depth);
+extern void
+ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
+
 /*
  * Sometimes we don't want to trace a function with the function
  * graph tracer but we want them to keep traced by the usual function
@@ -490,6 +486,21 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
        return tsk->trace & TSK_TRACE_FL_GRAPH;
 }
 
+extern int ftrace_dump_on_oops;
+
 #endif /* CONFIG_TRACING */
 
+
+#ifdef CONFIG_HW_BRANCH_TRACER
+
+void trace_hw_branch(u64 from, u64 to);
+void trace_hw_branch_oops(void);
+
+#else /* CONFIG_HW_BRANCH_TRACER */
+
+static inline void trace_hw_branch(u64 from, u64 to) {}
+static inline void trace_hw_branch_oops(void) {}
+
+#endif /* CONFIG_HW_BRANCH_TRACER */
+
 #endif /* _LINUX_FTRACE_H */
index 366a054..dca7bf8 100644 (file)
@@ -2,7 +2,7 @@
 #define _LINUX_FTRACE_IRQ_H
 
 
-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+#ifdef CONFIG_FTRACE_NMI_ENTER
 extern void ftrace_nmi_enter(void);
 extern void ftrace_nmi_exit(void);
 #else
index f832883..faa1cf8 100644 (file)
  * - bits 0-7 are the preemption count (max preemption depth: 256)
  * - bits 8-15 are the softirq count (max # of softirqs: 256)
  *
- * The hardirq count can be overridden per architecture, the default is:
+ * The hardirq count can in theory reach the same as NR_IRQS.
+ * In reality, the number of nested IRQS is limited to the stack
+ * size as well. For archs with over 1000 IRQS it is not practical
+ * to expect that they will all nest. We give a max of 10 bits for
+ * hardirq nesting. An arch may choose to give less than 10 bits.
+ * m68k expects it to be 8.
  *
- * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
- * - ( bit 28 is the PREEMPT_ACTIVE flag. )
+ * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
+ * - bit 26 is the NMI_MASK
+ * - bit 28 is the PREEMPT_ACTIVE flag
  *
  * PREEMPT_MASK: 0x000000ff
  * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x0fff0000
+ * HARDIRQ_MASK: 0x03ff0000
+ *     NMI_MASK: 0x04000000
  */
 #define PREEMPT_BITS   8
 #define SOFTIRQ_BITS   8
+#define NMI_BITS       1
 
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS   12
+#define MAX_HARDIRQ_BITS 10
 
-#ifndef MAX_HARDIRQS_PER_CPU
-#define MAX_HARDIRQS_PER_CPU NR_IRQS
+#ifndef HARDIRQ_BITS
+# define HARDIRQ_BITS  MAX_HARDIRQ_BITS
 #endif
 
-/*
- * The hardirq mask has to be large enough to have space for potentially
- * all IRQ sources in the system nesting on a single CPU.
- */
-#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
-# error HARDIRQ_BITS is too low!
-#endif
+#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
+#error HARDIRQ_BITS too high!
 #endif
 
 #define PREEMPT_SHIFT  0
 #define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
 #define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT      (HARDIRQ_SHIFT + HARDIRQ_BITS)
 
 #define __IRQ_MASK(x)  ((1UL << (x))-1)
 
 #define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
 #define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
 #define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK       (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
 
 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET     (1UL << NMI_SHIFT)
 
-#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
+#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
 #error PREEMPT_ACTIVE is too low!
 #endif
 
 #define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
 #define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
-#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+                                | NMI_MASK))
 
 /*
  * Are we doing bottom half or hardware interrupt processing?
 #define in_softirq()           (softirq_count())
 #define in_interrupt()         (irq_count())
 
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi()       (preempt_count() & NMI_MASK)
+
 #if defined(CONFIG_PREEMPT)
 # define PREEMPT_INATOMIC_BASE kernel_locked()
 # define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
  */
 extern void irq_exit(void);
 
-#define nmi_enter()                            \
-       do {                                    \
-               ftrace_nmi_enter();             \
-               lockdep_off();                  \
-               rcu_nmi_enter();                \
-               __irq_enter();                  \
+#define nmi_enter()                                            \
+       do {                                                    \
+               ftrace_nmi_enter();                             \
+               BUG_ON(in_nmi());                               \
+               add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+               lockdep_off();                                  \
+               rcu_nmi_enter();                                \
+               trace_hardirq_enter();                          \
        } while (0)
 
-#define nmi_exit()                             \
-       do {                                    \
-               __irq_exit();                   \
-               rcu_nmi_exit();                 \
-               lockdep_on();                   \
-               ftrace_nmi_exit();              \
+#define nmi_exit()                                             \
+       do {                                                    \
+               trace_hardirq_exit();                           \
+               rcu_nmi_exit();                                 \
+               lockdep_on();                                   \
+               BUG_ON(!in_nmi());                              \
+               sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+               ftrace_nmi_exit();                              \
        } while (0)
 
 #endif /* LINUX_HARDIRQ_H */
index 7fa3718..7742798 100644 (file)
@@ -242,6 +242,19 @@ extern struct ratelimit_state printk_ratelimit_state;
 extern int printk_ratelimit(void);
 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
                                   unsigned int interval_msec);
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define printk_once(x...) ({                   \
+       static int __print_once = 1;            \
+                                               \
+       if (__print_once) {                     \
+               __print_once = 0;               \
+               printk(x);                      \
+       }                                       \
+})
+
 #else
 static inline int vprintk(const char *s, va_list args)
        __attribute__ ((format (printf, 1, 0)));
@@ -253,6 +266,10 @@ static inline int printk_ratelimit(void) { return 0; }
 static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
                                          unsigned int interval_msec)   \
                { return false; }
+
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_once(x...) printk(x)
+
 #endif
 
 extern int printk_needs_cpu(int cpu);
@@ -367,6 +384,125 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
        ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
 #endif
 
+/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_on/tracing_off and tracing_start()/tracing_stop
+ *
+ * Use tracing_on/tracing_off when you want to quickly turn on or off
+ * tracing. It simply enables or disables the recording of the trace events.
+ * This also corresponds to the user space debugfs/tracing/tracing_on
+ * file, which gives a means for the kernel and userspace to interact.
+ * Place a tracing_off() in the kernel where you want tracing to end.
+ * From user space, examine the trace, and then echo 1 > tracing_on
+ * to continue tracing.
+ *
+ * tracing_stop/tracing_start has slightly more overhead. It is used
+ * by things like suspend to ram where disabling the recording of the
+ * trace is not enough, but tracing must actually stop because things
+ * like calling smp_processor_id() may crash the system.
+ *
+ * Most likely, you want to use tracing_on/tracing_off.
+ */
+#ifdef CONFIG_RING_BUFFER
+void tracing_on(void);
+void tracing_off(void);
+/* trace_off_permanent stops recording with no way to bring it back */
+void tracing_off_permanent(void);
+int tracing_is_on(void);
+#else
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline void tracing_off_permanent(void) { }
+static inline int tracing_is_on(void) { return 0; }
+#endif
+#ifdef CONFIG_TRACING
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+static inline void __attribute__ ((format (printf, 1, 2)))
+____trace_printk_check_format(const char *fmt, ...)
+{
+}
+#define __trace_printk_check_format(fmt, args...)                      \
+do {                                                                   \
+       if (0)                                                          \
+               ____trace_printk_check_format(fmt, ##args);             \
+} while (0)
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk and
+ *       the @ip is passed in via the trace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code.
+ */
+
+#define trace_printk(fmt, args...)                                     \
+do {                                                                   \
+       static const char *trace_printk_fmt                             \
+       __attribute__((section("__trace_printk_fmt")));                 \
+                                                                       \
+       if (!trace_printk_fmt)                                          \
+               trace_printk_fmt = fmt;                                 \
+                                                                       \
+       __trace_printk_check_format(fmt, ##args);                       \
+       __trace_printk(_THIS_IP_, trace_printk_fmt, ##args);            \
+} while (0)
+
+extern int
+__trace_printk(unsigned long ip, const char *fmt, ...)
+       __attribute__ ((format (printf, 2, 3)));
+
+#define ftrace_vprintk(fmt, vargs)                                     \
+do {                                                                   \
+       static const char *trace_printk_fmt                             \
+       __attribute__((section("__trace_printk_fmt")));                 \
+                                                                       \
+       if (!trace_printk_fmt)                                          \
+               trace_printk_fmt = fmt;                                 \
+                                                                       \
+       __ftrace_vprintk(_THIS_IP_, trace_printk_fmt, vargs);           \
+} while (0)
+
+extern int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(void);
+#else
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+static inline int
+trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
+static inline int
+trace_printk(const char *fmt, ...)
+{
+       return 0;
+}
+static inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+       return 0;
+}
+static inline void ftrace_dump(void) { }
+#endif /* CONFIG_TRACING */
+
 /*
  *      Display an IP address in readable format.
  */
index 23bf02f..5a58ea3 100644 (file)
@@ -20,43 +20,10 @@ struct lockdep_map;
 #include <linux/stacktrace.h>
 
 /*
- * Lock-class usage-state bits:
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
  */
-enum lock_usage_bit
-{
-       LOCK_USED = 0,
-       LOCK_USED_IN_HARDIRQ,
-       LOCK_USED_IN_SOFTIRQ,
-       LOCK_ENABLED_SOFTIRQS,
-       LOCK_ENABLED_HARDIRQS,
-       LOCK_USED_IN_HARDIRQ_READ,
-       LOCK_USED_IN_SOFTIRQ_READ,
-       LOCK_ENABLED_SOFTIRQS_READ,
-       LOCK_ENABLED_HARDIRQS_READ,
-       LOCK_USAGE_STATES
-};
-
-/*
- * Usage-state bitmasks:
- */
-#define LOCKF_USED                     (1 << LOCK_USED)
-#define LOCKF_USED_IN_HARDIRQ          (1 << LOCK_USED_IN_HARDIRQ)
-#define LOCKF_USED_IN_SOFTIRQ          (1 << LOCK_USED_IN_SOFTIRQ)
-#define LOCKF_ENABLED_HARDIRQS         (1 << LOCK_ENABLED_HARDIRQS)
-#define LOCKF_ENABLED_SOFTIRQS         (1 << LOCK_ENABLED_SOFTIRQS)
-
-#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
-
-#define LOCKF_USED_IN_HARDIRQ_READ     (1 << LOCK_USED_IN_HARDIRQ_READ)
-#define LOCKF_USED_IN_SOFTIRQ_READ     (1 << LOCK_USED_IN_SOFTIRQ_READ)
-#define LOCKF_ENABLED_HARDIRQS_READ    (1 << LOCK_ENABLED_HARDIRQS_READ)
-#define LOCKF_ENABLED_SOFTIRQS_READ    (1 << LOCK_ENABLED_SOFTIRQS_READ)
-
-#define LOCKF_ENABLED_IRQS_READ \
-               (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
-#define LOCKF_USED_IN_IRQ_READ \
-               (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+#define XXX_LOCK_USAGE_STATES          (1+3*4)
 
 #define MAX_LOCKDEP_SUBCLASSES         8UL
 
@@ -97,7 +64,7 @@ struct lock_class {
         * IRQ/softirq usage tracking bits:
         */
        unsigned long                   usage_mask;
-       struct stack_trace              usage_traces[LOCK_USAGE_STATES];
+       struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
 
        /*
         * These fields represent a directed graph of lock dependencies,
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
        lock_set_class(lock, lock->name, lock->key, subclass, ip);
 }
 
-# define INIT_LOCKDEP                          .lockdep_recursion = 0,
+extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
+extern void lockdep_clear_current_reclaim_state(void);
+extern void lockdep_trace_alloc(gfp_t mask);
+
+# define INIT_LOCKDEP                          .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 
 #define lockdep_depth(tsk)     (debug_locks ? (tsk)->lockdep_depth : 0)
 
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
 # define lock_release(l, n, i)                 do { } while (0)
 # define lock_set_class(l, n, k, s, i)         do { } while (0)
 # define lock_set_subclass(l, s, i)            do { } while (0)
+# define lockdep_set_current_reclaim_state(g)  do { } while (0)
+# define lockdep_clear_current_reclaim_state() do { } while (0)
+# define lockdep_trace_alloc(g)                        do { } while (0)
 # define lockdep_init()                                do { } while (0)
 # define lockdep_info()                                do { } while (0)
 # define lockdep_init_map(lock, name, key, sub) \
index 3fdc108..86a6c0f 100644 (file)
@@ -99,4 +99,10 @@ enum mem_add_context { BOOT, HOTPLUG };
 #define hotplug_memory_notifier(fn, pri) do { } while (0)
 #endif
 
+/*
+ * Kernel text modification mutex, used for code patching. Users of this lock
+ * can sleep.
+ */
+extern struct mutex text_mutex;
+
 #endif /* _LINUX_MEMORY_H_ */
index 145a755..22d9878 100644 (file)
@@ -329,6 +329,11 @@ struct module
        unsigned int num_tracepoints;
 #endif
 
+#ifdef CONFIG_TRACING
+       const char **trace_bprintk_fmt_start;
+       unsigned int num_trace_bprintk_fmt;
+#endif
+
 #ifdef CONFIG_MODULE_UNLOAD
        /* What modules depend on me? */
        struct list_head modules_which_use_me;
index 7a0e5c4..3069ec7 100644 (file)
@@ -50,8 +50,10 @@ struct mutex {
        atomic_t                count;
        spinlock_t              wait_lock;
        struct list_head        wait_list;
-#ifdef CONFIG_DEBUG_MUTEXES
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
        struct thread_info      *owner;
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
        const char              *name;
        void                    *magic;
 #endif
@@ -68,7 +70,6 @@ struct mutex_waiter {
        struct list_head        list;
        struct task_struct      *task;
 #ifdef CONFIG_DEBUG_MUTEXES
-       struct mutex            *lock;
        void                    *magic;
 #endif
 };
index b3b3596..b1a0068 100644 (file)
@@ -8,7 +8,7 @@ struct ring_buffer;
 struct ring_buffer_iter;
 
 /*
- * Don't reference this struct directly, use functions below.
+ * Don't refer to this struct directly, use functions below.
  */
 struct ring_buffer_event {
        u32             type:2, len:3, time_delta:27;
@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
 
 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
 
-struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
-                        unsigned long length,
-                        unsigned long *flags);
+struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+                                                  unsigned long length);
 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
-                             struct ring_buffer_event *event,
-                             unsigned long flags);
+                             struct ring_buffer_event *event);
 int ring_buffer_write(struct ring_buffer *buffer,
                      unsigned long length, void *data);
 
@@ -124,14 +121,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
 u64 ring_buffer_time_stamp(int cpu);
 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
 
-void tracing_on(void);
-void tracing_off(void);
-void tracing_off_permanent(void);
+size_t ring_buffer_page_len(void *page);
+
 
 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer,
-                         void **data_page, int cpu, int full);
+int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+                         size_t len, int cpu, int full);
 
 enum ring_buffer_flags {
        RB_FL_OVERWRITE         = 1 << 0,
index a7c7698..5b9424e 100644 (file)
@@ -137,6 +137,8 @@ extern unsigned long nr_uninterruptible(void);
 extern unsigned long nr_active(void);
 extern unsigned long nr_iowait(void);
 
+extern unsigned long get_parent_ip(unsigned long addr);
+
 struct seq_file;
 struct cfs_rq;
 struct task_group;
@@ -331,7 +333,9 @@ extern signed long schedule_timeout(signed long timeout);
 extern signed long schedule_timeout_interruptible(signed long timeout);
 extern signed long schedule_timeout_killable(signed long timeout);
 extern signed long schedule_timeout_uninterruptible(signed long timeout);
+asmlinkage void __schedule(void);
 asmlinkage void schedule(void);
+extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
 
 struct nsproxy;
 struct user_namespace;
@@ -1327,6 +1331,7 @@ struct task_struct {
        int lockdep_depth;
        unsigned int lockdep_recursion;
        struct held_lock held_locks[MAX_LOCK_DEPTH];
+       gfp_t lockdep_reclaim_gfp;
 #endif
 
 /* journalling filesystem info */
@@ -1669,6 +1674,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
        return set_cpus_allowed_ptr(p, &new_mask);
 }
 
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+extern int sched_clock_stable;
+#endif
+
 extern unsigned long long sched_clock(void);
 
 extern void sched_clock_init(void);
index 6ca6a7b..f452365 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/page.h>          /* kmalloc_sizes.h needs PAGE_SIZE */
 #include <asm/cache.h>         /* kmalloc_sizes.h needs L1_CACHE_BYTES */
 #include <linux/compiler.h>
+#include <trace/kmemtrace.h>
 
 /* Size description struct for general caches. */
 struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
-static inline void *kmalloc(size_t size, gfp_t flags)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+extern size_t slab_buffer_size(struct kmem_cache *cachep);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
 {
+       return kmem_cache_alloc(cachep, flags);
+}
+static inline size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+       return 0;
+}
+#endif
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+       struct kmem_cache *cachep;
+       void *ret;
+
        if (__builtin_constant_p(size)) {
                int i = 0;
 
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
 found:
 #ifdef CONFIG_ZONE_DMA
                if (flags & GFP_DMA)
-                       return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
-                                               flags);
+                       cachep = malloc_sizes[i].cs_dmacachep;
+               else
 #endif
-               return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
+                       cachep = malloc_sizes[i].cs_cachep;
+
+               ret = kmem_cache_alloc_notrace(cachep, flags);
+
+               kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
+                                    size, slab_buffer_size(cachep), flags);
+
+               return ret;
        }
        return __kmalloc(size, flags);
 }
@@ -59,8 +85,25 @@ found:
 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+                                          gfp_t flags,
+                                          int nodeid);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+                             gfp_t flags,
+                             int nodeid)
+{
+       return kmem_cache_alloc_node(cachep, flags, nodeid);
+}
+#endif
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
+       struct kmem_cache *cachep;
+       void *ret;
+
        if (__builtin_constant_p(size)) {
                int i = 0;
 
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 found:
 #ifdef CONFIG_ZONE_DMA
                if (flags & GFP_DMA)
-                       return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
-                                               flags, node);
+                       cachep = malloc_sizes[i].cs_dmacachep;
+               else
 #endif
-               return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
-                                               flags, node);
+                       cachep = malloc_sizes[i].cs_cachep;
+
+               ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
+                                         ret, size, slab_buffer_size(cachep),
+                                         flags, node);
+
+               return ret;
        }
        return __kmalloc_node(size, flags, node);
 }
index 59a3fa4..0ec00b3 100644 (file)
@@ -3,14 +3,15 @@
 
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
-static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
+                                             gfp_t flags)
 {
        return kmem_cache_alloc_node(cachep, flags, -1);
 }
 
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return __kmalloc_node(size, flags, node);
 }
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  * kmalloc is the normal method of allocating memory
  * in the kernel.
  */
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
        return __kmalloc_node(size, flags, -1);
 }
 
-static inline void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__kmalloc(size_t size, gfp_t flags)
 {
        return kmalloc(size, flags);
 }
index 2f5c16b..9e3a575 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/gfp.h>
 #include <linux/workqueue.h>
 #include <linux/kobject.h>
+#include <trace/kmemtrace.h>
 
 enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
@@ -120,11 +121,24 @@ struct kmem_cache {
 
 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
 
+/*
+ * Maximum kmalloc object size handled by SLUB. Larger object allocations
+ * are passed through to the page allocator. The page allocator "fastpath"
+ * is relatively slow so we need this value sufficiently high so that
+ * performance critical objects are allocated through the SLUB fastpath.
+ *
+ * This should be dropped to PAGE_SIZE / 2 once the page allocator
+ * "fastpath" becomes competitive with the slab allocator fastpaths.
+ */
+#define SLUB_MAX_SIZE (PAGE_SIZE)
+
+#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
+
 /*
  * We keep the general caches in an array of slab caches that are used for
  * 2^x bytes of allocations.
  */
-extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
+extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
 
 /*
  * Sorry that the following has to be that ugly but some versions of GCC
@@ -204,15 +218,33 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+       return kmem_cache_alloc(s, gfpflags);
+}
+#endif
+
 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 {
-       return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+       unsigned int order = get_order(size);
+       void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
+                            size, PAGE_SIZE << order, flags);
+
+       return ret;
 }
 
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
+       void *ret;
+
        if (__builtin_constant_p(size)) {
-               if (size > PAGE_SIZE)
+               if (size > SLUB_MAX_SIZE)
                        return kmalloc_large(size, flags);
 
                if (!(flags & SLUB_DMA)) {
@@ -221,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
                        if (!s)
                                return ZERO_SIZE_PTR;
 
-                       return kmem_cache_alloc(s, flags);
+                       ret = kmem_cache_alloc_notrace(s, flags);
+
+                       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
+                                            _THIS_IP_, ret,
+                                            size, s->size, flags);
+
+                       return ret;
                }
        }
        return __kmalloc(size, flags);
@@ -231,16 +269,38 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+                                          gfp_t gfpflags,
+                                          int node);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+                             gfp_t gfpflags,
+                             int node)
+{
+       return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif
+
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
+       void *ret;
+
        if (__builtin_constant_p(size) &&
-               size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
+               size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
                        struct kmem_cache *s = kmalloc_slab(size);
 
                if (!s)
                        return ZERO_SIZE_PTR;
 
-               return kmem_cache_alloc_node(s, flags, node);
+               ret = kmem_cache_alloc_node_notrace(s, flags, node);
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _THIS_IP_, ret,
+                                         size, s->size, flags, node);
+
+               return ret;
        }
        return __kmalloc_node(size, flags, node);
 }
index d18fc19..27ac317 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>    /* for inline */
 #include <linux/types.h>       /* for size_t */
 #include <linux/stddef.h>      /* for NULL */
+#include <stdarg.h>
 
 extern char *strndup_user(const char __user *, long);
 
@@ -111,6 +112,12 @@ extern void argv_free(char **argv);
 
 extern bool sysfs_streq(const char *s1, const char *s2);
 
+#ifdef CONFIG_BINARY_PRINTF
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+#endif
+
 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
                        const void *from, size_t available);
 
index daf9685..51774eb 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/ktime.h>
 #include <linux/stddef.h>
 #include <linux/debugobjects.h>
+#include <linux/stringify.h>
 
 struct tvec_base;
 
@@ -21,52 +22,126 @@ struct timer_list {
        char start_comm[16];
        int start_pid;
 #endif
+#ifdef CONFIG_LOCKDEP
+       struct lockdep_map lockdep_map;
+#endif
 };
 
 extern struct tvec_base boot_tvec_bases;
 
+#ifdef CONFIG_LOCKDEP
+/*
+ * NB: because we have to copy the lockdep_map, setting the lockdep_map key
+ * (second argument) here is required, otherwise it could be initialised to
+ * the copy of the lockdep_map later! We use the pointer to and the string
+ * "<file>:<line>" as the key resp. the name of the lockdep_map.
+ */
+#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)                           \
+       .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
+#else
+#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
+#endif
+
 #define TIMER_INITIALIZER(_function, _expires, _data) {                \
                .entry = { .prev = TIMER_ENTRY_STATIC },        \
                .function = (_function),                        \
                .expires = (_expires),                          \
                .data = (_data),                                \
                .base = &boot_tvec_bases,                       \
+               __TIMER_LOCKDEP_MAP_INITIALIZER(                \
+                       __FILE__ ":" __stringify(__LINE__))     \
        }
 
 #define DEFINE_TIMER(_name, _function, _expires, _data)                \
        struct timer_list _name =                               \
                TIMER_INITIALIZER(_function, _expires, _data)
 
-void init_timer(struct timer_list *timer);
-void init_timer_deferrable(struct timer_list *timer);
+void init_timer_key(struct timer_list *timer,
+                   const char *name,
+                   struct lock_class_key *key);
+void init_timer_deferrable_key(struct timer_list *timer,
+                              const char *name,
+                              struct lock_class_key *key);
+
+#ifdef CONFIG_LOCKDEP
+#define init_timer(timer)                                              \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               init_timer_key((timer), #timer, &__key);                \
+       } while (0)
+
+#define init_timer_deferrable(timer)                                   \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               init_timer_deferrable_key((timer), #timer, &__key);     \
+       } while (0)
+
+#define init_timer_on_stack(timer)                                     \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               init_timer_on_stack_key((timer), #timer, &__key);       \
+       } while (0)
+
+#define setup_timer(timer, fn, data)                                   \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               setup_timer_key((timer), #timer, &__key, (fn), (data));\
+       } while (0)
+
+#define setup_timer_on_stack(timer, fn, data)                          \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               setup_timer_on_stack_key((timer), #timer, &__key,       \
+                                        (fn), (data));                 \
+       } while (0)
+#else
+#define init_timer(timer)\
+       init_timer_key((timer), NULL, NULL)
+#define init_timer_deferrable(timer)\
+       init_timer_deferrable_key((timer), NULL, NULL)
+#define init_timer_on_stack(timer)\
+       init_timer_on_stack_key((timer), NULL, NULL)
+#define setup_timer(timer, fn, data)\
+       setup_timer_key((timer), NULL, NULL, (fn), (data))
+#define setup_timer_on_stack(timer, fn, data)\
+       setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
+#endif
 
 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void init_timer_on_stack(struct timer_list *timer);
+extern void init_timer_on_stack_key(struct timer_list *timer,
+                                   const char *name,
+                                   struct lock_class_key *key);
 extern void destroy_timer_on_stack(struct timer_list *timer);
 #else
 static inline void destroy_timer_on_stack(struct timer_list *timer) { }
-static inline void init_timer_on_stack(struct timer_list *timer)
+static inline void init_timer_on_stack_key(struct timer_list *timer,
+                                          const char *name,
+                                          struct lock_class_key *key)
 {
-       init_timer(timer);
+       init_timer_key(timer, name, key);
 }
 #endif
 
-static inline void setup_timer(struct timer_list * timer,
+static inline void setup_timer_key(struct timer_list * timer,
+                               const char *name,
+                               struct lock_class_key *key,
                                void (*function)(unsigned long),
                                unsigned long data)
 {
        timer->function = function;
        timer->data = data;
-       init_timer(timer);
+       init_timer_key(timer, name, key);
 }
 
-static inline void setup_timer_on_stack(struct timer_list *timer,
+static inline void setup_timer_on_stack_key(struct timer_list *timer,
+                                       const char *name,
+                                       struct lock_class_key *key,
                                        void (*function)(unsigned long),
                                        unsigned long data)
 {
        timer->function = function;
        timer->data = data;
-       init_timer_on_stack(timer);
+       init_timer_on_stack_key(timer, name, key);
 }
 
 /**
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644 (file)
index 0000000..7a81303
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _LINUX_TRACE_CLOCK_H
+#define _LINUX_TRACE_CLOCK_H
+
+/*
+ * 3 trace clock variants, with differing scalability/precision
+ * tradeoffs:
+ *
+ *  -   local: CPU-local trace clock
+ *  -  medium: scalable global clock with some jitter
+ *  -  global: globally monotonic, serialized clock
+ */
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_local(void);
+extern u64 notrace trace_clock(void);
+extern u64 notrace trace_clock_global(void);
+
+#endif /* _LINUX_TRACE_CLOCK_H */
index 7570054..69b5698 100644 (file)
@@ -31,8 +31,8 @@ struct tracepoint {
                                         * Keep in sync with vmlinux.lds.h.
                                         */
 
-#define TPPROTO(args...)       args
-#define TPARGS(args...)                args
+#define TP_PROTO(args...)      args
+#define TP_ARGS(args...)               args
 
 #ifdef CONFIG_TRACEPOINTS
 
@@ -65,7 +65,7 @@ struct tracepoint {
        {                                                               \
                if (unlikely(__tracepoint_##name.state))                \
                        __DO_TRACE(&__tracepoint_##name,                \
-                               TPPROTO(proto), TPARGS(args));          \
+                               TP_PROTO(proto), TP_ARGS(args));        \
        }                                                               \
        static inline int register_trace_##name(void (*probe)(proto))   \
        {                                                               \
@@ -153,4 +153,11 @@ static inline void tracepoint_synchronize_unregister(void)
        synchronize_sched();
 }
 
+#define PARAMS(args...) args
+#define TRACE_FORMAT(name, proto, args, fmt)           \
+       DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+
+#define TRACE_EVENT(name, proto, args, struct, print, assign)  \
+       DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+
 #endif
index 25c6a1f..25b7068 100644 (file)
@@ -5,72 +5,72 @@
 #include <linux/tracepoint.h>
 
 DECLARE_TRACE(block_rq_abort,
-       TPPROTO(struct request_queue *q, struct request *rq),
-               TPARGS(q, rq));
+       TP_PROTO(struct request_queue *q, struct request *rq),
+             TP_ARGS(q, rq));
 
 DECLARE_TRACE(block_rq_insert,
-       TPPROTO(struct request_queue *q, struct request *rq),
-               TPARGS(q, rq));
+       TP_PROTO(struct request_queue *q, struct request *rq),
+             TP_ARGS(q, rq));
 
 DECLARE_TRACE(block_rq_issue,
-       TPPROTO(struct request_queue *q, struct request *rq),
-               TPARGS(q, rq));
+       TP_PROTO(struct request_queue *q, struct request *rq),
+             TP_ARGS(q, rq));
 
 DECLARE_TRACE(block_rq_requeue,
-       TPPROTO(struct request_queue *q, struct request *rq),
-               TPARGS(q, rq));
+       TP_PROTO(struct request_queue *q, struct request *rq),
+             TP_ARGS(q, rq));
 
 DECLARE_TRACE(block_rq_complete,
-       TPPROTO(struct request_queue *q, struct request *rq),
-               TPARGS(q, rq));
+       TP_PROTO(struct request_queue *q, struct request *rq),
+             TP_ARGS(q, rq));
 
 DECLARE_TRACE(block_bio_bounce,
-       TPPROTO(struct request_queue *q, struct bio *bio),
-               TPARGS(q, bio));
+       TP_PROTO(struct request_queue *q, struct bio *bio),
+             TP_ARGS(q, bio));
 
 DECLARE_TRACE(block_bio_complete,
-       TPPROTO(struct request_queue *q, struct bio *bio),
-               TPARGS(q, bio));
+       TP_PROTO(struct request_queue *q, struct bio *bio),
+             TP_ARGS(q, bio));
 
 DECLARE_TRACE(block_bio_backmerge,
-       TPPROTO(struct request_queue *q, struct bio *bio),
-               TPARGS(q, bio));
+       TP_PROTO(struct request_queue *q, struct bio *bio),
+             TP_ARGS(q, bio));
 
 DECLARE_TRACE(block_bio_frontmerge,
-       TPPROTO(struct request_queue *q, struct bio *bio),
-               TPARGS(q, bio));
+       TP_PROTO(struct request_queue *q, struct bio *bio),
+             TP_ARGS(q, bio));
 
 DECLARE_TRACE(block_bio_queue,
-       TPPROTO(struct request_queue *q, struct bio *bio),
-               TPARGS(q, bio));
+       TP_PROTO(struct request_queue *q, struct bio *bio),
+             TP_ARGS(q, bio));
 
 DECLARE_TRACE(block_getrq,
-       TPPROTO(struct request_queue *q, struct bio *bio, int rw),
-               TPARGS(q, bio, rw));
+       TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+             TP_ARGS(q, bio, rw));
 
 DECLARE_TRACE(block_sleeprq,
-       TPPROTO(struct request_queue *q, struct bio *bio, int rw),
-               TPARGS(q, bio, rw));
+       TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+             TP_ARGS(q, bio, rw));
 
 DECLARE_TRACE(block_plug,
-       TPPROTO(struct request_queue *q),
-               TPARGS(q));
+       TP_PROTO(struct request_queue *q),
+             TP_ARGS(q));
 
 DECLARE_TRACE(block_unplug_timer,
-       TPPROTO(struct request_queue *q),
-               TPARGS(q));
+       TP_PROTO(struct request_queue *q),
+             TP_ARGS(q));
 
 DECLARE_TRACE(block_unplug_io,
-       TPPROTO(struct request_queue *q),
-               TPARGS(q));
+       TP_PROTO(struct request_queue *q),
+             TP_ARGS(q));
 
 DECLARE_TRACE(block_split,
-       TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
-               TPARGS(q, bio, pdu));
+       TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
+             TP_ARGS(q, bio, pdu));
 
 DECLARE_TRACE(block_remap,
-       TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev,
-               sector_t from, sector_t to),
-               TPARGS(q, bio, dev, from, to));
+       TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
+                sector_t from, sector_t to),
+             TP_ARGS(q, bio, dev, from, to));
 
 #endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644 (file)
index 0000000..ff5d449
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _TRACE_IRQ_H
+#define _TRACE_IRQ_H
+
+#include <linux/interrupt.h>
+#include <linux/tracepoint.h>
+
+#include <trace/irq_event_types.h>
+
+#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644 (file)
index 0000000..43bcb74
--- /dev/null
@@ -0,0 +1,43 @@
+
+/* use <trace/irq.h> instead */
+#ifndef TRACE_FORMAT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq
+
+/*
+ * Tracepoint for entry of interrupt handler:
+ */
+TRACE_FORMAT(irq_handler_entry,
+       TP_PROTO(int irq, struct irqaction *action),
+       TP_ARGS(irq, action),
+       TP_FMT("irq=%d handler=%s", irq, action->name)
+       );
+
+/*
+ * Tracepoint for return of an interrupt handler:
+ */
+TRACE_EVENT(irq_handler_exit,
+
+       TP_PROTO(int irq, struct irqaction *action, int ret),
+
+       TP_ARGS(irq, action, ret),
+
+       TP_STRUCT__entry(
+               __field(        int,    irq     )
+               __field(        int,    ret     )
+       ),
+
+       TP_printk("irq=%d return=%s",
+                 __entry->irq, __entry->ret ? "handled" : "unhandled"),
+
+       TP_fast_assign(
+               __entry->irq    = irq;
+               __entry->ret    = ret;
+       )
+);
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644 (file)
index 0000000..ad8b785
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#ifndef _LINUX_KMEMTRACE_H
+#define _LINUX_KMEMTRACE_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/marker.h>
+
+enum kmemtrace_type_id {
+       KMEMTRACE_TYPE_KMALLOC = 0,     /* kmalloc() or kfree(). */
+       KMEMTRACE_TYPE_CACHE,           /* kmem_cache_*(). */
+       KMEMTRACE_TYPE_PAGES,           /* __get_free_pages() and friends. */
+};
+
+#ifdef CONFIG_KMEMTRACE
+
+extern void kmemtrace_init(void);
+
+extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                                            unsigned long call_site,
+                                            const void *ptr,
+                                            size_t bytes_req,
+                                            size_t bytes_alloc,
+                                            gfp_t gfp_flags,
+                                            int node);
+
+extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                                      unsigned long call_site,
+                                      const void *ptr);
+
+#else /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_init(void)
+{
+}
+
+static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                                            unsigned long call_site,
+                                            const void *ptr,
+                                            size_t bytes_req,
+                                            size_t bytes_alloc,
+                                            gfp_t gfp_flags,
+                                            int node)
+{
+}
+
+static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                                      unsigned long call_site,
+                                      const void *ptr)
+{
+}
+
+#endif /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
+                                       unsigned long call_site,
+                                       const void *ptr,
+                                       size_t bytes_req,
+                                       size_t bytes_alloc,
+                                       gfp_t gfp_flags)
+{
+       kmemtrace_mark_alloc_node(type_id, call_site, ptr,
+                                 bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_KMEMTRACE_H */
+
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644 (file)
index 0000000..5ca67df
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _TRACE_LOCKDEP_H
+#define _TRACE_LOCKDEP_H
+
+#include <linux/lockdep.h>
+#include <linux/tracepoint.h>
+
+#include <trace/lockdep_event_types.h>
+
+#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644 (file)
index 0000000..adccfcd
--- /dev/null
@@ -0,0 +1,44 @@
+
+#ifndef TRACE_FORMAT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lock
+
+#ifdef CONFIG_LOCKDEP
+
+TRACE_FORMAT(lock_acquire,
+       TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
+               int trylock, int read, int check,
+               struct lockdep_map *next_lock, unsigned long ip),
+       TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
+       TP_FMT("%s%s%s", trylock ? "try " : "",
+               read ? "read " : "", lock->name)
+       );
+
+TRACE_FORMAT(lock_release,
+       TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
+       TP_ARGS(lock, nested, ip),
+       TP_FMT("%s", lock->name)
+       );
+
+#ifdef CONFIG_LOCK_STAT
+
+TRACE_FORMAT(lock_contended,
+       TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+       TP_ARGS(lock, ip),
+       TP_FMT("%s", lock->name)
+       );
+
+TRACE_FORMAT(lock_acquired,
+       TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+       TP_ARGS(lock, ip),
+       TP_FMT("%s", lock->name)
+       );
+
+#endif
+#endif
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644 (file)
index 0000000..ef20466
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _TRACE_POWER_H
+#define _TRACE_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+enum {
+       POWER_NONE = 0,
+       POWER_CSTATE = 1,
+       POWER_PSTATE = 2,
+};
+
+struct power_trace {
+       ktime_t                 stamp;
+       ktime_t                 end;
+       int                     type;
+       int                     state;
+};
+
+DECLARE_TRACE(power_start,
+       TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
+             TP_ARGS(it, type, state));
+
+DECLARE_TRACE(power_mark,
+       TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
+             TP_ARGS(it, type, state));
+
+DECLARE_TRACE(power_end,
+       TP_PROTO(struct power_trace *it),
+             TP_ARGS(it));
+
+#endif /* _TRACE_POWER_H */
index 0d81098..4e372a1 100644 (file)
@@ -4,53 +4,6 @@
 #include <linux/sched.h>
 #include <linux/tracepoint.h>
 
-DECLARE_TRACE(sched_kthread_stop,
-       TPPROTO(struct task_struct *t),
-               TPARGS(t));
-
-DECLARE_TRACE(sched_kthread_stop_ret,
-       TPPROTO(int ret),
-               TPARGS(ret));
-
-DECLARE_TRACE(sched_wait_task,
-       TPPROTO(struct rq *rq, struct task_struct *p),
-               TPARGS(rq, p));
-
-DECLARE_TRACE(sched_wakeup,
-       TPPROTO(struct rq *rq, struct task_struct *p, int success),
-               TPARGS(rq, p, success));
-
-DECLARE_TRACE(sched_wakeup_new,
-       TPPROTO(struct rq *rq, struct task_struct *p, int success),
-               TPARGS(rq, p, success));
-
-DECLARE_TRACE(sched_switch,
-       TPPROTO(struct rq *rq, struct task_struct *prev,
-               struct task_struct *next),
-               TPARGS(rq, prev, next));
-
-DECLARE_TRACE(sched_migrate_task,
-       TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
-               TPARGS(p, orig_cpu, dest_cpu));
-
-DECLARE_TRACE(sched_process_free,
-       TPPROTO(struct task_struct *p),
-               TPARGS(p));
-
-DECLARE_TRACE(sched_process_exit,
-       TPPROTO(struct task_struct *p),
-               TPARGS(p));
-
-DECLARE_TRACE(sched_process_wait,
-       TPPROTO(struct pid *pid),
-               TPARGS(pid));
-
-DECLARE_TRACE(sched_process_fork,
-       TPPROTO(struct task_struct *parent, struct task_struct *child),
-               TPARGS(parent, child));
-
-DECLARE_TRACE(sched_signal_send,
-       TPPROTO(int sig, struct task_struct *p),
-               TPARGS(sig, p));
+#include <trace/sched_event_types.h>
 
 #endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644 (file)
index 0000000..fb37af6
--- /dev/null
@@ -0,0 +1,337 @@
+
+/* use <trace/sched.h> instead */
+#ifndef TRACE_EVENT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched
+
+/*
+ * Tracepoint for calling kthread_stop, performed to end a kthread:
+ */
+TRACE_EVENT(sched_kthread_stop,
+
+       TP_PROTO(struct task_struct *t),
+
+       TP_ARGS(t),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+       ),
+
+       TP_printk("task %s:%d", __entry->comm, __entry->pid),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+               __entry->pid    = t->pid;
+       )
+);
+
+/*
+ * Tracepoint for the return value of the kthread stopping:
+ */
+TRACE_EVENT(sched_kthread_stop_ret,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret),
+
+       TP_STRUCT__entry(
+               __field(        int,    ret     )
+       ),
+
+       TP_printk("ret %d", __entry->ret),
+
+       TP_fast_assign(
+               __entry->ret    = ret;
+       )
+);
+
+/*
+ * Tracepoint for waiting on task to unschedule:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ *        but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_wait_task,
+
+       TP_PROTO(struct rq *rq, struct task_struct *p),
+
+       TP_ARGS(rq, p),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+       ),
+
+       TP_printk("task %s:%d [%d]",
+                 __entry->comm, __entry->pid, __entry->prio),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid    = p->pid;
+               __entry->prio   = p->prio;
+       )
+);
+
+/*
+ * Tracepoint for waking up a task:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ *        but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_wakeup,
+
+       TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+
+       TP_ARGS(rq, p, success),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+               __field(        int,    success                 )
+       ),
+
+       TP_printk("task %s:%d [%d] success=%d",
+                 __entry->comm, __entry->pid, __entry->prio,
+                 __entry->success),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->prio           = p->prio;
+               __entry->success        = success;
+       )
+);
+
+/*
+ * Tracepoint for waking up a new task:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ *        but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_wakeup_new,
+
+       TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+
+       TP_ARGS(rq, p, success),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+               __field(        int,    success                 )
+       ),
+
+       TP_printk("task %s:%d [%d] success=%d",
+                 __entry->comm, __entry->pid, __entry->prio,
+                 __entry->success),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->prio           = p->prio;
+               __entry->success        = success;
+       )
+);
+
+/*
+ * Tracepoint for task switches, performed by the scheduler:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ *        but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_switch,
+
+       TP_PROTO(struct rq *rq, struct task_struct *prev,
+                struct task_struct *next),
+
+       TP_ARGS(rq, prev, next),
+
+       TP_STRUCT__entry(
+               __array(        char,   prev_comm,      TASK_COMM_LEN   )
+               __field(        pid_t,  prev_pid                        )
+               __field(        int,    prev_prio                       )
+               __array(        char,   next_comm,      TASK_COMM_LEN   )
+               __field(        pid_t,  next_pid                        )
+               __field(        int,    next_prio                       )
+       ),
+
+       TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
+               __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+               __entry->next_comm, __entry->next_pid, __entry->next_prio),
+
+       TP_fast_assign(
+               memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+               __entry->prev_pid       = prev->pid;
+               __entry->prev_prio      = prev->prio;
+               memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+               __entry->next_pid       = next->pid;
+               __entry->next_prio      = next->prio;
+       )
+);
+
+/*
+ * Tracepoint for a task being migrated:
+ */
+TRACE_EVENT(sched_migrate_task,
+
+       TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
+
+       TP_ARGS(p, orig_cpu, dest_cpu),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+               __field(        int,    orig_cpu                )
+               __field(        int,    dest_cpu                )
+       ),
+
+       TP_printk("task %s:%d [%d] from: %d  to: %d",
+                 __entry->comm, __entry->pid, __entry->prio,
+                 __entry->orig_cpu, __entry->dest_cpu),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->prio           = p->prio;
+               __entry->orig_cpu       = orig_cpu;
+               __entry->dest_cpu       = dest_cpu;
+       )
+);
+
+/*
+ * Tracepoint for freeing a task:
+ */
+TRACE_EVENT(sched_process_free,
+
+       TP_PROTO(struct task_struct *p),
+
+       TP_ARGS(p),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+       ),
+
+       TP_printk("task %s:%d [%d]",
+                 __entry->comm, __entry->pid, __entry->prio),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->prio           = p->prio;
+       )
+);
+
+/*
+ * Tracepoint for a task exiting:
+ */
+TRACE_EVENT(sched_process_exit,
+
+       TP_PROTO(struct task_struct *p),
+
+       TP_ARGS(p),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+       ),
+
+       TP_printk("task %s:%d [%d]",
+                 __entry->comm, __entry->pid, __entry->prio),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->prio           = p->prio;
+       )
+);
+
+/*
+ * Tracepoint for a waiting task:
+ */
+TRACE_EVENT(sched_process_wait,
+
+       TP_PROTO(struct pid *pid),
+
+       TP_ARGS(pid),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        int,    prio                    )
+       ),
+
+       TP_printk("task %s:%d [%d]",
+                 __entry->comm, __entry->pid, __entry->prio),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+               __entry->pid            = pid_nr(pid);
+               __entry->prio           = current->prio;
+       )
+);
+
+/*
+ * Tracepoint for do_fork:
+ */
+TRACE_EVENT(sched_process_fork,
+
+       TP_PROTO(struct task_struct *parent, struct task_struct *child),
+
+       TP_ARGS(parent, child),
+
+       TP_STRUCT__entry(
+               __array(        char,   parent_comm,    TASK_COMM_LEN   )
+               __field(        pid_t,  parent_pid                      )
+               __array(        char,   child_comm,     TASK_COMM_LEN   )
+               __field(        pid_t,  child_pid                       )
+       ),
+
+       TP_printk("parent %s:%d  child %s:%d",
+               __entry->parent_comm, __entry->parent_pid,
+               __entry->child_comm, __entry->child_pid),
+
+       TP_fast_assign(
+               memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
+               __entry->parent_pid     = parent->pid;
+               memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
+               __entry->child_pid      = child->pid;
+       )
+);
+
+/*
+ * Tracepoint for sending a signal:
+ */
+TRACE_EVENT(sched_signal_send,
+
+       TP_PROTO(int sig, struct task_struct *p),
+
+       TP_ARGS(sig, p),
+
+       TP_STRUCT__entry(
+               __field(        int,    sig                     )
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+       ),
+
+       TP_printk("sig: %d  task %s:%d",
+                 __entry->sig, __entry->comm, __entry->pid),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid    = p->pid;
+               __entry->sig    = sig;
+       )
+);
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644 (file)
index 0000000..df56f56
--- /dev/null
@@ -0,0 +1,5 @@
+/* trace/<type>_event_types.h here */
+
+#include <trace/sched_event_types.h>
+#include <trace/irq_event_types.h>
+#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644 (file)
index 0000000..fd13750
--- /dev/null
@@ -0,0 +1,5 @@
+/* trace/<type>.h here */
+
+#include <trace/sched.h>
+#include <trace/irq.h>
+#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644 (file)
index 0000000..7626523
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef __TRACE_WORKQUEUE_H
+#define __TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+
+DECLARE_TRACE(workqueue_insertion,
+          TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+          TP_ARGS(wq_thread, work));
+
+DECLARE_TRACE(workqueue_execution,
+          TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+          TP_ARGS(wq_thread, work));
+
+/* Trace the creation of one workqueue thread on a cpu */
+DECLARE_TRACE(workqueue_creation,
+          TP_PROTO(struct task_struct *wq_thread, int cpu),
+          TP_ARGS(wq_thread, cpu));
+
+DECLARE_TRACE(workqueue_destruction,
+          TP_PROTO(struct task_struct *wq_thread),
+          TP_ARGS(wq_thread));
+
+#endif /* __TRACE_WORKQUEUE_H */
index 95a6613..28deb70 100644 (file)
@@ -1005,7 +1005,7 @@ config TRACEPOINTS
 
 config MARKERS
        bool "Activate markers"
-       depends on TRACEPOINTS
+       select TRACEPOINTS
        help
          Place an empty function call at each marker site. Can be
          dynamically changed for a probe function.
index 6bf83af..20d784a 100644 (file)
@@ -71,6 +71,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
+#include <trace/kmemtrace.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/smp.h>
@@ -649,6 +650,7 @@ asmlinkage void __init start_kernel(void)
        enable_debug_pagealloc();
        cpu_hotplug_init();
        kmem_cache_init();
+       kmemtrace_init();
        debug_objects_mem_init();
        idr_init_cache();
        setup_per_cpu_pageset();
index e136ed8..0df6253 100644 (file)
@@ -41,7 +41,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
        return e;
 }
 
-__notrace_funcgraph int core_kernel_text(unsigned long addr)
+int core_kernel_text(unsigned long addr)
 {
        if (addr >= (unsigned long)_stext &&
            addr <= (unsigned long)_etext)
@@ -54,7 +54,7 @@ __notrace_funcgraph int core_kernel_text(unsigned long addr)
        return 0;
 }
 
-__notrace_funcgraph int __kernel_text_address(unsigned long addr)
+int __kernel_text_address(unsigned long addr)
 {
        if (core_kernel_text(addr))
                return 1;
index f51eaee..412370a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/rculist.h>
 #include <linux/hash.h>
+#include <trace/irq.h>
 #include <linux/bootmem.h>
 
 #include "internals.h"
@@ -329,6 +330,9 @@ irqreturn_t no_action(int cpl, void *dev_id)
        return IRQ_NONE;
 }
 
+DEFINE_TRACE(irq_handler_entry);
+DEFINE_TRACE(irq_handler_exit);
+
 /**
  * handle_IRQ_event - irq action chain handler
  * @irq:       the interrupt number
@@ -345,7 +349,9 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
                local_irq_enable_in_hardirq();
 
        do {
+               trace_irq_handler_entry(irq, action);
                ret = action->handler(irq, action->dev_id);
+               trace_irq_handler_exit(irq, action, ret);
                if (ret == IRQ_HANDLED)
                        status |= action->flags;
                retval |= ret;
index 7ba8cd9..479d4d5 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/kdebug.h>
+#include <linux/memory.h>
 
 #include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
@@ -699,9 +700,10 @@ int __kprobes register_kprobe(struct kprobe *p)
                goto out;
        }
 
+       mutex_lock(&text_mutex);
        ret = arch_prepare_kprobe(p);
        if (ret)
-               goto out;
+               goto out_unlock_text;
 
        INIT_HLIST_NODE(&p->hlist);
        hlist_add_head_rcu(&p->hlist,
@@ -710,6 +712,8 @@ int __kprobes register_kprobe(struct kprobe *p)
        if (kprobe_enabled)
                arch_arm_kprobe(p);
 
+out_unlock_text:
+       mutex_unlock(&text_mutex);
 out:
        mutex_unlock(&kprobe_mutex);
 
@@ -746,8 +750,11 @@ valid_p:
                 * enabled and not gone - otherwise, the breakpoint would
                 * already have been removed. We save on flushing icache.
                 */
-               if (kprobe_enabled && !kprobe_gone(old_p))
+               if (kprobe_enabled && !kprobe_gone(old_p)) {
+                       mutex_lock(&text_mutex);
                        arch_disarm_kprobe(p);
+                       mutex_unlock(&text_mutex);
+               }
                hlist_del_rcu(&old_p->hlist);
        } else {
                if (p->break_handler && !kprobe_gone(p))
@@ -1280,12 +1287,14 @@ static void __kprobes enable_all_kprobes(void)
        if (kprobe_enabled)
                goto already_enabled;
 
+       mutex_lock(&text_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                hlist_for_each_entry_rcu(p, node, head, hlist)
                        if (!kprobe_gone(p))
                                arch_arm_kprobe(p);
        }
+       mutex_unlock(&text_mutex);
 
        kprobe_enabled = true;
        printk(KERN_INFO "Kprobes globally enabled\n");
@@ -1310,6 +1319,7 @@ static void __kprobes disable_all_kprobes(void)
 
        kprobe_enabled = false;
        printk(KERN_INFO "Kprobes globally disabled\n");
+       mutex_lock(&text_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                hlist_for_each_entry_rcu(p, node, head, hlist) {
@@ -1318,6 +1328,7 @@ static void __kprobes disable_all_kprobes(void)
                }
        }
 
+       mutex_unlock(&text_mutex);
        mutex_unlock(&kprobe_mutex);
        /* Allow all currently running kprobes to complete */
        synchronize_sched();
index 06b0c35..cb70c1d 100644 (file)
@@ -41,6 +41,8 @@
 #include <linux/utsname.h>
 #include <linux/hash.h>
 #include <linux/ftrace.h>
+#include <linux/stringify.h>
+#include <trace/lockdep.h>
 
 #include <asm/sections.h>
 
@@ -310,12 +312,14 @@ EXPORT_SYMBOL(lockdep_on);
 #if VERBOSE
 # define HARDIRQ_VERBOSE       1
 # define SOFTIRQ_VERBOSE       1
+# define RECLAIM_VERBOSE       1
 #else
 # define HARDIRQ_VERBOSE       0
 # define SOFTIRQ_VERBOSE       0
+# define RECLAIM_VERBOSE       0
 #endif
 
-#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
+#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 /*
  * Quick filtering for interesting events:
  */
@@ -443,17 +447,18 @@ atomic_t nr_find_usage_backwards_recursions;
  * Locking printouts:
  */
 
+#define __USAGE(__STATE)                                               \
+       [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
+       [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
+       [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
+       [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
+
 static const char *usage_str[] =
 {
-       [LOCK_USED] =                   "initial-use ",
-       [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
-       [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
-       [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
-       [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
-       [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
-       [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
-       [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
-       [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
+#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+       [LOCK_USED] = "INITIAL USE",
 };
 
 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
@@ -461,46 +466,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 }
 
-void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
+static inline unsigned long lock_flag(enum lock_usage_bit bit)
 {
-       *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
-
-       if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
-               *c1 = '+';
-       else
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
-                       *c1 = '-';
+       return 1UL << bit;
+}
 
-       if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
-               *c2 = '+';
-       else
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
-                       *c2 = '-';
+static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
+{
+       char c = '.';
 
-       if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
-               *c3 = '-';
-       if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
-               *c3 = '+';
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
-                       *c3 = '?';
+       if (class->usage_mask & lock_flag(bit + 2))
+               c = '+';
+       if (class->usage_mask & lock_flag(bit)) {
+               c = '-';
+               if (class->usage_mask & lock_flag(bit + 2))
+                       c = '?';
        }
 
-       if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
-               *c4 = '-';
-       if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
-               *c4 = '+';
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
-                       *c4 = '?';
-       }
+       return c;
+}
+
+void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
+{
+       int i = 0;
+
+#define LOCKDEP_STATE(__STATE)                                                 \
+       usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
+       usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+
+       usage[i] = '\0';
 }
 
 static void print_lock_name(struct lock_class *class)
 {
-       char str[KSYM_NAME_LEN], c1, c2, c3, c4;
+       char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
        const char *name;
 
-       get_usage_chars(class, &c1, &c2, &c3, &c4);
+       get_usage_chars(class, usage);
 
        name = class->name;
        if (!name) {
@@ -513,7 +517,7 @@ static void print_lock_name(struct lock_class *class)
                if (class->subclass)
                        printk("/%d", class->subclass);
        }
-       printk("){%c%c%c%c}", c1, c2, c3, c4);
+       printk("){%s}", usage);
 }
 
 static void print_lockdep_cache(struct lockdep_map *lock)
@@ -1263,9 +1267,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
                        bit_backwards, bit_forwards, irqclass);
 }
 
-static int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-               struct held_lock *next)
+static const char *state_names[] = {
+#define LOCKDEP_STATE(__STATE) \
+       __stringify(__STATE),
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static const char *state_rnames[] = {
+#define LOCKDEP_STATE(__STATE) \
+       __stringify(__STATE)"-READ",
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline const char *state_name(enum lock_usage_bit bit)
+{
+       return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+}
+
+static int exclusive_bit(int new_bit)
+{
+       /*
+        * USED_IN
+        * USED_IN_READ
+        * ENABLED
+        * ENABLED_READ
+        *
+        * bit 0 - write/read
+        * bit 1 - used_in/enabled
+        * bit 2+  state
+        */
+
+       int state = new_bit & ~3;
+       int dir = new_bit & 2;
+
+       /*
+        * keep state, bit flip the direction and strip read.
+        */
+       return state | (dir ^ 2);
+}
+
+static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
+                          struct held_lock *next, enum lock_usage_bit bit)
 {
        /*
         * Prove that the new dependency does not connect a hardirq-safe
@@ -1273,38 +1317,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
         * the backwards-subgraph starting at <prev>, and the
         * forwards-subgraph starting at <next>:
         */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
-                                       LOCK_ENABLED_HARDIRQS, "hard"))
+       if (!check_usage(curr, prev, next, bit,
+                          exclusive_bit(bit), state_name(bit)))
                return 0;
 
+       bit++; /* _READ */
+
        /*
         * Prove that the new dependency does not connect a hardirq-safe-read
         * lock with a hardirq-unsafe lock - to achieve this we search
         * the backwards-subgraph starting at <prev>, and the
         * forwards-subgraph starting at <next>:
         */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
-                                       LOCK_ENABLED_HARDIRQS, "hard-read"))
+       if (!check_usage(curr, prev, next, bit,
+                          exclusive_bit(bit), state_name(bit)))
                return 0;
 
-       /*
-        * Prove that the new dependency does not connect a softirq-safe
-        * lock with a softirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
-        */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
-                                       LOCK_ENABLED_SOFTIRQS, "soft"))
-               return 0;
-       /*
-        * Prove that the new dependency does not connect a softirq-safe-read
-        * lock with a softirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
-        */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
-                                       LOCK_ENABLED_SOFTIRQS, "soft"))
+       return 1;
+}
+
+static int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+               struct held_lock *next)
+{
+#define LOCKDEP_STATE(__STATE)                                         \
+       if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
                return 0;
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
 
        return 1;
 }
@@ -1861,9 +1901,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
                curr->comm, task_pid_nr(curr));
        print_lock(this);
        if (forwards)
-               printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
+               printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
        else
-               printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
+               printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
        print_lock_name(other);
        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
@@ -1933,7 +1973,7 @@ void print_irqtrace_events(struct task_struct *curr)
        print_ip_sym(curr->softirq_disable_ip);
 }
 
-static int hardirq_verbose(struct lock_class *class)
+static int HARDIRQ_verbose(struct lock_class *class)
 {
 #if HARDIRQ_VERBOSE
        return class_filter(class);
@@ -1941,7 +1981,7 @@ static int hardirq_verbose(struct lock_class *class)
        return 0;
 }
 
-static int softirq_verbose(struct lock_class *class)
+static int SOFTIRQ_verbose(struct lock_class *class)
 {
 #if SOFTIRQ_VERBOSE
        return class_filter(class);
@@ -1949,185 +1989,95 @@ static int softirq_verbose(struct lock_class *class)
        return 0;
 }
 
+static int RECLAIM_FS_verbose(struct lock_class *class)
+{
+#if RECLAIM_VERBOSE
+       return class_filter(class);
+#endif
+       return 0;
+}
+
 #define STRICT_READ_CHECKS     1
 
-static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+static int (*state_verbose_f[])(struct lock_class *class) = {
+#define LOCKDEP_STATE(__STATE) \
+       __STATE##_verbose,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline int state_verbose(enum lock_usage_bit bit,
+                               struct lock_class *class)
+{
+       return state_verbose_f[bit >> 2](class);
+}
+
+typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
+                            enum lock_usage_bit bit, const char *name);
+
+static int
+mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                enum lock_usage_bit new_bit)
 {
-       int ret = 1;
+       int excl_bit = exclusive_bit(new_bit);
+       int read = new_bit & 1;
+       int dir = new_bit & 2;
 
-       switch(new_bit) {
-       case LOCK_USED_IN_HARDIRQ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_ENABLED_HARDIRQS_READ))
-                       return 0;
-               /*
-                * just marked it hardirq-safe, check that this lock
-                * took no hardirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_HARDIRQS, "hard"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-safe, check that this lock
-                * took no hardirq-unsafe-read lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                               LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
-                       return 0;
-#endif
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_SOFTIRQ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_ENABLED_SOFTIRQS_READ))
-                       return 0;
-               /*
-                * just marked it softirq-safe, check that this lock
-                * took no softirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_SOFTIRQS, "soft"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-safe, check that this lock
-                * took no softirq-unsafe-read lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                               LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
-                       return 0;
-#endif
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_HARDIRQ_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
-                       return 0;
-               /*
-                * just marked it hardirq-read-safe, check that this lock
-                * took no hardirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_HARDIRQS, "hard"))
-                       return 0;
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_SOFTIRQ_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
-                       return 0;
-               /*
-                * just marked it softirq-read-safe, check that this lock
-                * took no softirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_SOFTIRQS, "soft"))
-                       return 0;
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_HARDIRQS:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_USED_IN_HARDIRQ_READ))
-                       return 0;
-               /*
-                * just marked it hardirq-unsafe, check that no hardirq-safe
-                * lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_HARDIRQ, "hard"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-unsafe, check that no
-                * hardirq-safe-read lock in the system ever took
-                * it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                  LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
-                       return 0;
-#endif
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_SOFTIRQS:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_USED_IN_SOFTIRQ_READ))
-                       return 0;
-               /*
-                * just marked it softirq-unsafe, check that no softirq-safe
-                * lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_SOFTIRQ, "soft"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-unsafe, check that no
-                * softirq-safe-read lock in the system ever took
-                * it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                  LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
-                       return 0;
-#endif
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_HARDIRQS_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-read-unsafe, check that no
-                * hardirq-safe lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_HARDIRQ, "hard"))
-                       return 0;
-#endif
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_SOFTIRQS_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
+       /*
+        * mark USED_IN has to look forwards -- to ensure no dependency
+        * has ENABLED state, which would allow recursion deadlocks.
+        *
+        * mark ENABLED has to look backwards -- to ensure no dependee
+        * has USED_IN state, which, again, would allow  recursion deadlocks.
+        */
+       check_usage_f usage = dir ?
+               check_usage_backwards : check_usage_forwards;
+
+       /*
+        * Validate that this particular lock does not have conflicting
+        * usage states.
+        */
+       if (!valid_state(curr, this, new_bit, excl_bit))
+               return 0;
+
+       /*
+        * Validate that the lock dependencies don't have conflicting usage
+        * states.
+        */
+       if ((!read || !dir || STRICT_READ_CHECKS) &&
+                       !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
+               return 0;
+
+       /*
+        * Check for read in write conflicts
+        */
+       if (!read) {
+               if (!valid_state(curr, this, new_bit, excl_bit + 1))
                        return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-read-unsafe, check that no
-                * softirq-safe lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_SOFTIRQ, "soft"))
+
+               if (STRICT_READ_CHECKS &&
+                       !usage(curr, this, excl_bit + 1,
+                               state_name(new_bit + 1)))
                        return 0;
-#endif
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       default:
-               WARN_ON(1);
-               break;
        }
 
-       return ret;
+       if (state_verbose(new_bit, hlock_class(this)))
+               return 2;
+
+       return 1;
 }
 
+enum mark_type {
+#define LOCKDEP_STATE(__STATE) __STATE,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
 /*
  * Mark all held locks with a usage bit:
  */
 static int
-mark_held_locks(struct task_struct *curr, int hardirq)
+mark_held_locks(struct task_struct *curr, enum mark_type mark)
 {
        enum lock_usage_bit usage_bit;
        struct held_lock *hlock;
@@ -2136,17 +2086,12 @@ mark_held_locks(struct task_struct *curr, int hardirq)
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
-               if (hardirq) {
-                       if (hlock->read)
-                               usage_bit = LOCK_ENABLED_HARDIRQS_READ;
-                       else
-                               usage_bit = LOCK_ENABLED_HARDIRQS;
-               } else {
-                       if (hlock->read)
-                               usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
-                       else
-                               usage_bit = LOCK_ENABLED_SOFTIRQS;
-               }
+               usage_bit = 2 + (mark << 2); /* ENABLED */
+               if (hlock->read)
+                       usage_bit += 1; /* READ */
+
+               BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+
                if (!mark_lock(curr, hlock, usage_bit))
                        return 0;
        }
@@ -2200,7 +2145,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
         */
-       if (!mark_held_locks(curr, 1))
+       if (!mark_held_locks(curr, HARDIRQ))
                return;
        /*
         * If we have softirqs enabled, then set the usage
@@ -2208,7 +2153,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
         * this bit from being set before)
         */
        if (curr->softirqs_enabled)
-               if (!mark_held_locks(curr, 0))
+               if (!mark_held_locks(curr, SOFTIRQ))
                        return;
 
        curr->hardirq_enable_ip = ip;
@@ -2288,7 +2233,7 @@ void trace_softirqs_on(unsigned long ip)
         * enabled too:
         */
        if (curr->hardirqs_enabled)
-               mark_held_locks(curr, 0);
+               mark_held_locks(curr, SOFTIRQ);
 }
 
 /*
@@ -2317,6 +2262,31 @@ void trace_softirqs_off(unsigned long ip)
                debug_atomic_inc(&redundant_softirqs_off);
 }
 
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+       struct task_struct *curr = current;
+
+       if (unlikely(!debug_locks))
+               return;
+
+       /* no reclaim without waiting on it */
+       if (!(gfp_mask & __GFP_WAIT))
+               return;
+
+       /* this guy won't enter reclaim */
+       if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+               return;
+
+       /* We're only interested __GFP_FS allocations for now */
+       if (!(gfp_mask & __GFP_FS))
+               return;
+
+       if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
+               return;
+
+       mark_held_locks(curr, RECLAIM_FS);
+}
+
 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
 {
        /*
@@ -2345,19 +2315,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
        if (!hlock->hardirqs_off) {
                if (hlock->read) {
                        if (!mark_lock(curr, hlock,
-                                       LOCK_ENABLED_HARDIRQS_READ))
+                                       LOCK_ENABLED_HARDIRQ_READ))
                                return 0;
                        if (curr->softirqs_enabled)
                                if (!mark_lock(curr, hlock,
-                                               LOCK_ENABLED_SOFTIRQS_READ))
+                                               LOCK_ENABLED_SOFTIRQ_READ))
                                        return 0;
                } else {
                        if (!mark_lock(curr, hlock,
-                                       LOCK_ENABLED_HARDIRQS))
+                                       LOCK_ENABLED_HARDIRQ))
                                return 0;
                        if (curr->softirqs_enabled)
                                if (!mark_lock(curr, hlock,
-                                               LOCK_ENABLED_SOFTIRQS))
+                                               LOCK_ENABLED_SOFTIRQ))
+                                       return 0;
+               }
+       }
+
+       /*
+        * We reuse the irq context infrastructure more broadly as a general
+        * context checking code. This tests GFP_FS recursion (a lock taken
+        * during reclaim for a GFP_FS allocation is held over a GFP_FS
+        * allocation).
+        */
+       if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
+               if (hlock->read) {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
+                                       return 0;
+               } else {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
                                        return 0;
                }
        }
@@ -2412,6 +2398,10 @@ static inline int separate_irq_context(struct task_struct *curr,
        return 0;
 }
 
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+}
+
 #endif
 
 /*
@@ -2445,14 +2435,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
                return 0;
 
        switch (new_bit) {
-       case LOCK_USED_IN_HARDIRQ:
-       case LOCK_USED_IN_SOFTIRQ:
-       case LOCK_USED_IN_HARDIRQ_READ:
-       case LOCK_USED_IN_SOFTIRQ_READ:
-       case LOCK_ENABLED_HARDIRQS:
-       case LOCK_ENABLED_SOFTIRQS:
-       case LOCK_ENABLED_HARDIRQS_READ:
-       case LOCK_ENABLED_SOFTIRQS_READ:
+#define LOCKDEP_STATE(__STATE)                 \
+       case LOCK_USED_IN_##__STATE:            \
+       case LOCK_USED_IN_##__STATE##_READ:     \
+       case LOCK_ENABLED_##__STATE:            \
+       case LOCK_ENABLED_##__STATE##_READ:
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
                ret = mark_lock_irq(curr, this, new_bit);
                if (!ret)
                        return 0;
@@ -2925,6 +2914,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
 }
 EXPORT_SYMBOL_GPL(lock_set_class);
 
+DEFINE_TRACE(lock_acquire);
+
 /*
  * We are not always called with irqs disabled - do that here,
  * and also avoid lockdep recursion:
@@ -2935,6 +2926,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 {
        unsigned long flags;
 
+       trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
        if (unlikely(current->lockdep_recursion))
                return;
 
@@ -2949,11 +2942,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 }
 EXPORT_SYMBOL_GPL(lock_acquire);
 
+DEFINE_TRACE(lock_release);
+
 void lock_release(struct lockdep_map *lock, int nested,
                          unsigned long ip)
 {
        unsigned long flags;
 
+       trace_lock_release(lock, nested, ip);
+
        if (unlikely(current->lockdep_recursion))
                return;
 
@@ -2966,6 +2963,16 @@ void lock_release(struct lockdep_map *lock, int nested,
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
+void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
+{
+       current->lockdep_reclaim_gfp = gfp_mask;
+}
+
+void lockdep_clear_current_reclaim_state(void)
+{
+       current->lockdep_reclaim_gfp = 0;
+}
+
 #ifdef CONFIG_LOCK_STAT
 static int
 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
@@ -3092,10 +3099,14 @@ found_it:
        lock->ip = ip;
 }
 
+DEFINE_TRACE(lock_contended);
+
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
+       trace_lock_contended(lock, ip);
+
        if (unlikely(!lock_stat))
                return;
 
@@ -3111,10 +3122,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
 }
 EXPORT_SYMBOL_GPL(lock_contended);
 
+DEFINE_TRACE(lock_acquired);
+
 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
+       trace_lock_acquired(lock, ip);
+
        if (unlikely(!lock_stat))
                return;
 
index 56b1969..a2cc7e9 100644 (file)
@@ -6,6 +6,45 @@
  * lockdep subsystem internal functions and variables.
  */
 
+/*
+ * Lock-class usage-state bits:
+ */
+enum lock_usage_bit {
+#define LOCKDEP_STATE(__STATE)         \
+       LOCK_USED_IN_##__STATE,         \
+       LOCK_USED_IN_##__STATE##_READ,  \
+       LOCK_ENABLED_##__STATE,         \
+       LOCK_ENABLED_##__STATE##_READ,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+       LOCK_USED,
+       LOCK_USAGE_STATES
+};
+
+/*
+ * Usage-state bitmasks:
+ */
+#define __LOCKF(__STATE)       LOCKF_##__STATE = (1 << LOCK_##__STATE),
+
+enum {
+#define LOCKDEP_STATE(__STATE)                                         \
+       __LOCKF(USED_IN_##__STATE)                                      \
+       __LOCKF(USED_IN_##__STATE##_READ)                               \
+       __LOCKF(ENABLED_##__STATE)                                      \
+       __LOCKF(ENABLED_##__STATE##_READ)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+       __LOCKF(USED)
+};
+
+#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
+#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+
+#define LOCKF_ENABLED_IRQ_READ \
+               (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
+#define LOCKF_USED_IN_IRQ_READ \
+               (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+
 /*
  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  * we track.
 extern struct list_head all_lock_classes;
 extern struct lock_chain lock_chains[];
 
-extern void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
+#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
+
+extern void get_usage_chars(struct lock_class *class,
+                           char usage[LOCK_USAGE_CHARS]);
 
 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
 
index 13716b8..d7135aa 100644 (file)
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v)
 {
        struct lock_class *class = v;
        struct lock_list *entry;
-       char c1, c2, c3, c4;
+       char usage[LOCK_USAGE_CHARS];
 
        if (v == SEQ_START_TOKEN) {
                seq_printf(m, "all lock classes:\n");
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v)
        seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
 #endif
 
-       get_usage_chars(class, &c1, &c2, &c3, &c4);
-       seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
+       get_usage_chars(class, usage);
+       seq_printf(m, " %s", usage);
 
        seq_printf(m, ": ");
        print_name(m, class);
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
                        nr_uncategorized++;
                if (class->usage_mask & LOCKF_USED_IN_IRQ)
                        nr_irq_safe++;
-               if (class->usage_mask & LOCKF_ENABLED_IRQS)
+               if (class->usage_mask & LOCKF_ENABLED_IRQ)
                        nr_irq_unsafe++;
                if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
                        nr_softirq_safe++;
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
+               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
                        nr_softirq_unsafe++;
                if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
                        nr_hardirq_safe++;
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
+               if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
                        nr_hardirq_unsafe++;
                if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
                        nr_irq_read_safe++;
-               if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
+               if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
                        nr_irq_read_unsafe++;
                if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
                        nr_softirq_read_safe++;
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
+               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
                        nr_softirq_read_unsafe++;
                if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
                        nr_hardirq_read_safe++;
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
+               if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
                        nr_hardirq_read_unsafe++;
 
 #ifdef CONFIG_PROVE_LOCKING
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
 static void seq_header(struct seq_file *m)
 {
        seq_printf(m, "lock_stat version 0.3\n");
+
+       if (unlikely(!debug_locks))
+               seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
+
        seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
        seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
                        "%14s %14s\n",
diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h
new file mode 100644 (file)
index 0000000..995b0cc
--- /dev/null
@@ -0,0 +1,9 @@
+/*
+ * Lockdep states,
+ *
+ * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever
+ * you add one, or come up with a nice dynamic solution.
+ */
+LOCKDEP_STATE(HARDIRQ)
+LOCKDEP_STATE(SOFTIRQ)
+LOCKDEP_STATE(RECLAIM_FS)
index f0e04d6..8b742f2 100644 (file)
@@ -2769,7 +2769,7 @@ int is_module_address(unsigned long addr)
 
 
 /* Is this a valid kernel address? */
-__notrace_funcgraph struct module *__module_text_address(unsigned long addr)
+struct module *__module_text_address(unsigned long addr)
 {
        struct module *mod;
 
index 1d94160..50d022e 100644 (file)
 /*
  * Must be called with lock->wait_lock held.
  */
-void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
-{
-       lock->owner = new_owner;
-}
-
 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
 {
        memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 
        /* Mark the current thread as blocked on the lock: */
        ti->task->blocked_on = waiter;
-       waiter->lock = lock;
 }
 
 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock)
        DEBUG_LOCKS_WARN_ON(lock->magic != lock);
        DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
        DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
-       DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
+       mutex_clear_owner(lock);
 }
 
 void debug_mutex_init(struct mutex *lock, const char *name,
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name,
        debug_check_no_locks_freed((void *)lock, sizeof(*lock));
        lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
-       lock->owner = NULL;
        lock->magic = lock;
 }
 
index babfbdf..6b2d735 100644 (file)
 /*
  * This must be called with lock->wait_lock held.
  */
-extern void
-debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
-
-static inline void debug_mutex_clear_owner(struct mutex *lock)
-{
-       lock->owner = NULL;
-}
-
 extern void debug_mutex_lock_common(struct mutex *lock,
                                    struct mutex_waiter *waiter);
 extern void debug_mutex_wake_waiter(struct mutex *lock,
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
 
+static inline void mutex_set_owner(struct mutex *lock)
+{
+       lock->owner = current_thread_info();
+}
+
+static inline void mutex_clear_owner(struct mutex *lock)
+{
+       lock->owner = NULL;
+}
+
 #define spin_lock_mutex(lock, flags)                   \
        do {                                            \
                struct mutex *l = container_of(lock, struct mutex, wait_lock); \
index 4f45d4b..5d79781 100644 (file)
  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  * David Howells for suggestions and improvements.
  *
+ *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
+ *    from the -rt tree, where it was originally implemented for rtmutexes
+ *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
+ *    and Sven Dietrich.
+ *
  * Also see Documentation/mutex-design.txt.
  */
 #include <linux/mutex.h>
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
        atomic_set(&lock->count, 1);
        spin_lock_init(&lock->wait_lock);
        INIT_LIST_HEAD(&lock->wait_list);
+       mutex_clear_owner(lock);
 
        debug_mutex_init(lock, name, key);
 }
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
         * 'unlocked' into 'locked' state.
         */
        __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+       mutex_set_owner(lock);
 }
 
 EXPORT_SYMBOL(mutex_lock);
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
         * The unlocking fastpath is the 0->1 transition from 'locked'
         * into 'unlocked' state:
         */
+#ifndef CONFIG_DEBUG_MUTEXES
+       /*
+        * When debugging is enabled we must not clear the owner before time,
+        * the slow path will always be taken, and that clears the owner field
+        * after verifying that it was indeed current.
+        */
+       mutex_clear_owner(lock);
+#endif
        __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
 }
 
@@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 {
        struct task_struct *task = current;
        struct mutex_waiter waiter;
-       unsigned int old_val;
        unsigned long flags;
 
+       preempt_disable();
+       mutex_acquire(&lock->dep_map, subclass, 0, ip);
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+       /*
+        * Optimistic spinning.
+        *
+        * We try to spin for acquisition when we find that there are no
+        * pending waiters and the lock owner is currently running on a
+        * (different) CPU.
+        *
+        * The rationale is that if the lock owner is running, it is likely to
+        * release the lock soon.
+        *
+        * Since this needs the lock owner, and this mutex implementation
+        * doesn't track the owner atomically in the lock field, we need to
+        * track it non-atomically.
+        *
+        * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
+        * to serialize everything.
+        */
+
+       for (;;) {
+               struct thread_info *owner;
+
+               /*
+                * If there's an owner, wait for it to either
+                * release the lock or go to sleep.
+                */
+               owner = ACCESS_ONCE(lock->owner);
+               if (owner && !mutex_spin_on_owner(lock, owner))
+                       break;
+
+               if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
+                       lock_acquired(&lock->dep_map, ip);
+                       mutex_set_owner(lock);
+                       preempt_enable();
+                       return 0;
+               }
+
+               /*
+                * When there's no owner, we might have preempted between the
+                * owner acquiring the lock and setting the owner field. If
+                * we're an RT task that will live-lock because we won't let
+                * the owner complete.
+                */
+               if (!owner && (need_resched() || rt_task(task)))
+                       break;
+
+               /*
+                * The cpu_relax() call is a compiler barrier which forces
+                * everything in this loop to be re-loaded. We don't need
+                * memory barriers as we'll eventually observe the right
+                * values at the cost of a few extra spins.
+                */
+               cpu_relax();
+       }
+#endif
        spin_lock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_lock_common(lock, &waiter);
-       mutex_acquire(&lock->dep_map, subclass, 0, ip);
        debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
 
        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);
        waiter.task = task;
 
-       old_val = atomic_xchg(&lock->count, -1);
-       if (old_val == 1)
+       if (atomic_xchg(&lock->count, -1) == 1)
                goto done;
 
        lock_contended(&lock->dep_map, ip);
@@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * that when we release the lock, we properly wake up the
                 * other waiters:
                 */
-               old_val = atomic_xchg(&lock->count, -1);
-               if (old_val == 1)
+               if (atomic_xchg(&lock->count, -1) == 1)
                        break;
 
                /*
@@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                        spin_unlock_mutex(&lock->wait_lock, flags);
 
                        debug_mutex_free_waiter(&waiter);
+                       preempt_enable();
                        return -EINTR;
                }
                __set_task_state(task, state);
 
                /* didnt get the lock, go to sleep: */
                spin_unlock_mutex(&lock->wait_lock, flags);
-               schedule();
+               __schedule();
                spin_lock_mutex(&lock->wait_lock, flags);
        }
 
 done:
        lock_acquired(&lock->dep_map, ip);
        /* got the lock - rejoice! */
-       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
-       debug_mutex_set_owner(lock, task_thread_info(task));
+       mutex_remove_waiter(lock, &waiter, current_thread_info());
+       mutex_set_owner(lock);
 
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))
@@ -196,6 +265,7 @@ done:
        spin_unlock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_free_waiter(&waiter);
+       preempt_enable();
 
        return 0;
 }
@@ -222,7 +292,8 @@ int __sched
 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
        might_sleep();
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
+       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
+                                  subclass, _RET_IP_);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
                wake_up_process(waiter->task);
        }
 
-       debug_mutex_clear_owner(lock);
-
        spin_unlock_mutex(&lock->wait_lock, flags);
 }
 
@@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
  */
 int __sched mutex_lock_interruptible(struct mutex *lock)
 {
+       int ret;
+
        might_sleep();
-       return __mutex_fastpath_lock_retval
+       ret =  __mutex_fastpath_lock_retval
                        (&lock->count, __mutex_lock_interruptible_slowpath);
+       if (!ret)
+               mutex_set_owner(lock);
+
+       return ret;
 }
 
 EXPORT_SYMBOL(mutex_lock_interruptible);
 
 int __sched mutex_lock_killable(struct mutex *lock)
 {
+       int ret;
+
        might_sleep();
-       return __mutex_fastpath_lock_retval
+       ret = __mutex_fastpath_lock_retval
                        (&lock->count, __mutex_lock_killable_slowpath);
+       if (!ret)
+               mutex_set_owner(lock);
+
+       return ret;
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
@@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
 
        prev = atomic_xchg(&lock->count, -1);
        if (likely(prev == 1)) {
-               debug_mutex_set_owner(lock, current_thread_info());
+               mutex_set_owner(lock);
                mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
        }
+
        /* Set it back to 0 if there are no waiters: */
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
@@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
  */
 int __sched mutex_trylock(struct mutex *lock)
 {
-       return __mutex_fastpath_trylock(&lock->count,
-                                       __mutex_trylock_slowpath);
+       int ret;
+
+       ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
+       if (ret)
+               mutex_set_owner(lock);
+
+       return ret;
 }
 
 EXPORT_SYMBOL(mutex_trylock);
index a075daf..67578ca 100644 (file)
 #define mutex_remove_waiter(lock, waiter, ti) \
                __list_del((waiter)->list.prev, (waiter)->list.next)
 
-#define debug_mutex_set_owner(lock, new_owner)         do { } while (0)
-#define debug_mutex_clear_owner(lock)                  do { } while (0)
+#ifdef CONFIG_SMP
+static inline void mutex_set_owner(struct mutex *lock)
+{
+       lock->owner = current_thread_info();
+}
+
+static inline void mutex_clear_owner(struct mutex *lock)
+{
+       lock->owner = NULL;
+}
+#else
+static inline void mutex_set_owner(struct mutex *lock)
+{
+}
+
+static inline void mutex_clear_owner(struct mutex *lock)
+{
+}
+#endif
+
 #define debug_mutex_wake_waiter(lock, waiter)          do { } while (0)
 #define debug_mutex_free_waiter(waiter)                        do { } while (0)
 #define debug_mutex_add_waiter(lock, waiter, ti)       do { } while (0)
index 9d79b78..edc0ba6 100644 (file)
@@ -677,9 +677,7 @@ int relay_late_setup_files(struct rchan *chan,
         */
        for_each_online_cpu(i) {
                if (unlikely(!chan->buf[i])) {
-                       printk(KERN_ERR "relay_late_setup_files: CPU %u "
-                                       "has no buffer, it must have!\n", i);
-                       BUG();
+                       WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
                        err = -EINVAL;
                        break;
                }
index 0a76d0b..7299083 100644 (file)
@@ -4404,10 +4404,7 @@ void scheduler_tick(void)
 #endif
 }
 
-#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
-                               defined(CONFIG_PREEMPT_TRACER))
-
-static inline unsigned long get_parent_ip(unsigned long addr)
+unsigned long get_parent_ip(unsigned long addr)
 {
        if (in_lock_functions(addr)) {
                addr = CALLER_ADDR2;
@@ -4417,6 +4414,9 @@ static inline unsigned long get_parent_ip(unsigned long addr)
        return addr;
 }
 
+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+                               defined(CONFIG_PREEMPT_TRACER))
+
 void __kprobes add_preempt_count(int val)
 {
 #ifdef CONFIG_DEBUG_PREEMPT
@@ -4543,15 +4543,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
 /*
  * schedule() is the main scheduler function.
  */
-asmlinkage void __sched schedule(void)
+asmlinkage void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
        struct rq *rq;
        int cpu;
 
-need_resched:
-       preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_qsctr_inc(cpu);
@@ -4608,13 +4606,80 @@ need_resched_nonpreemptible:
 
        if (unlikely(reacquire_kernel_lock(current) < 0))
                goto need_resched_nonpreemptible;
+}
 
+asmlinkage void __sched schedule(void)
+{
+need_resched:
+       preempt_disable();
+       __schedule();
        preempt_enable_no_resched();
        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
                goto need_resched;
 }
 EXPORT_SYMBOL(schedule);
 
+#ifdef CONFIG_SMP
+/*
+ * Look out! "owner" is an entirely speculative pointer
+ * access and not reliable.
+ */
+int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
+{
+       unsigned int cpu;
+       struct rq *rq;
+
+       if (!sched_feat(OWNER_SPIN))
+               return 0;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /*
+        * Need to access the cpu field knowing that
+        * DEBUG_PAGEALLOC could have unmapped it if
+        * the mutex owner just released it and exited.
+        */
+       if (probe_kernel_address(&owner->cpu, cpu))
+               goto out;
+#else
+       cpu = owner->cpu;
+#endif
+
+       /*
+        * Even if the access succeeded (likely case),
+        * the cpu field may no longer be valid.
+        */
+       if (cpu >= nr_cpumask_bits)
+               goto out;
+
+       /*
+        * We need to validate that we can do a
+        * get_cpu() and that we have the percpu area.
+        */
+       if (!cpu_online(cpu))
+               goto out;
+
+       rq = cpu_rq(cpu);
+
+       for (;;) {
+               /*
+                * Owner changed, break to re-assess state.
+                */
+               if (lock->owner != owner)
+                       break;
+
+               /*
+                * Is that owner really running on that cpu?
+                */
+               if (task_thread_info(rq->curr) != owner || need_resched())
+                       return 0;
+
+               cpu_relax();
+       }
+out:
+       return 1;
+}
+#endif
+
 #ifdef CONFIG_PREEMPT
 /*
  * this is the entry point to schedule() from in-kernel preemption
index a0b0852..7ec82c1 100644 (file)
  * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
  * consistent between cpus (never more than 2 jiffies difference).
  */
-#include <linux/sched.h>
-#include <linux/percpu.h>
 #include <linux/spinlock.h>
-#include <linux/ktime.h>
+#include <linux/hardirq.h>
 #include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 static __read_mostly int sched_clock_running;
 
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+__read_mostly int sched_clock_stable;
+#else
+static const int sched_clock_stable = 1;
+#endif
 
 struct sched_clock_data {
        /*
@@ -87,7 +92,7 @@ void sched_clock_init(void)
 }
 
 /*
- * min,max except they take wrapping into account
+ * min, max except they take wrapping into account
  */
 
 static inline u64 wrap_min(u64 x, u64 y)
@@ -116,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
        if (unlikely(delta < 0))
                delta = 0;
 
+       if (unlikely(!sched_clock_running))
+               return 0ull;
+
        /*
         * scd->clock = clamp(scd->tick_gtod + delta,
-        *                    max(scd->tick_gtod, scd->clock),
-        *                    scd->tick_gtod + TICK_NSEC);
+        *                    max(scd->tick_gtod, scd->clock),
+        *                    scd->tick_gtod + TICK_NSEC);
         */
 
        clock = scd->tick_gtod + delta;
@@ -148,8 +156,20 @@ static void lock_double_clock(struct sched_clock_data *data1,
 
 u64 sched_clock_cpu(int cpu)
 {
-       struct sched_clock_data *scd = cpu_sdc(cpu);
        u64 now, clock, this_clock, remote_clock;
+       struct sched_clock_data *scd;
+
+       if (sched_clock_stable)
+               return sched_clock();
+
+       scd = cpu_sdc(cpu);
+
+       /*
+        * Normally this is not called in NMI context - but if it is,
+        * trying to do any locking here is totally lethal.
+        */
+       if (unlikely(in_nmi()))
+               return scd->clock;
 
        if (unlikely(!sched_clock_running))
                return 0ull;
@@ -193,6 +213,8 @@ u64 sched_clock_cpu(int cpu)
        return clock;
 }
 
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+
 void sched_clock_tick(void)
 {
        struct sched_clock_data *scd = this_scd();
@@ -235,22 +257,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
 }
 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
-#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-
-void sched_clock_init(void)
-{
-       sched_clock_running = 1;
-}
-
-u64 sched_clock_cpu(int cpu)
-{
-       if (unlikely(!sched_clock_running))
-               return 0;
-
-       return sched_clock();
-}
-
-#endif
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
 unsigned long long cpu_clock(int cpu)
 {
index da5d93b..07bc02e 100644 (file)
@@ -13,3 +13,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
 SCHED_FEAT(ASYM_EFF_LOAD, 1)
 SCHED_FEAT(WAKEUP_OVERLAP, 0)
 SCHED_FEAT(LAST_BUDDY, 1)
+SCHED_FEAT(OWNER_SPIN, 1)
index 57d3f67..bbf6d64 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/rcupdate.h>
+#include <linux/ftrace.h>
 #include <linux/smp.h>
 #include <linux/tick.h>
 
@@ -79,13 +80,23 @@ static void __local_bh_disable(unsigned long ip)
        WARN_ON_ONCE(in_irq());
 
        raw_local_irq_save(flags);
-       add_preempt_count(SOFTIRQ_OFFSET);
+       /*
+        * The preempt tracer hooks into add_preempt_count and will break
+        * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
+        * is set and before current->softirq_enabled is cleared.
+        * We must manually increment preempt_count here and manually
+        * call the trace_preempt_off later.
+        */
+       preempt_count() += SOFTIRQ_OFFSET;
        /*
         * Were softirqs turned off above:
         */
        if (softirq_count() == SOFTIRQ_OFFSET)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
+
+       if (preempt_count() == SOFTIRQ_OFFSET)
+               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 #else /* !CONFIG_TRACE_IRQFLAGS */
 static inline void __local_bh_disable(unsigned long ip)
index 13dd64f..ef1c385 100644 (file)
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer)
        debug_object_free(timer, &timer_debug_descr);
 }
 
-static void __init_timer(struct timer_list *timer);
+static void __init_timer(struct timer_list *timer,
+                        const char *name,
+                        struct lock_class_key *key);
 
-void init_timer_on_stack(struct timer_list *timer)
+void init_timer_on_stack_key(struct timer_list *timer,
+                            const char *name,
+                            struct lock_class_key *key)
 {
        debug_object_init_on_stack(timer, &timer_debug_descr);
-       __init_timer(timer);
+       __init_timer(timer, name, key);
 }
-EXPORT_SYMBOL_GPL(init_timer_on_stack);
+EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 
 void destroy_timer_on_stack(struct timer_list *timer)
 {
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
 static inline void debug_timer_deactivate(struct timer_list *timer) { }
 #endif
 
-static void __init_timer(struct timer_list *timer)
+static void __init_timer(struct timer_list *timer,
+                        const char *name,
+                        struct lock_class_key *key)
 {
        timer->entry.next = NULL;
        timer->base = __raw_get_cpu_var(tvec_bases);
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer)
        timer->start_pid = -1;
        memset(timer->start_comm, 0, TASK_COMM_LEN);
 #endif
+       lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
 
 /**
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer)
  * init_timer() must be done to a timer prior calling *any* of the
  * other timer functions.
  */
-void init_timer(struct timer_list *timer)
+void init_timer_key(struct timer_list *timer,
+                   const char *name,
+                   struct lock_class_key *key)
 {
        debug_timer_init(timer);
-       __init_timer(timer);
+       __init_timer(timer, name, key);
 }
-EXPORT_SYMBOL(init_timer);
+EXPORT_SYMBOL(init_timer_key);
 
-void init_timer_deferrable(struct timer_list *timer)
+void init_timer_deferrable_key(struct timer_list *timer,
+                              const char *name,
+                              struct lock_class_key *key)
 {
-       init_timer(timer);
+       init_timer_key(timer, name, key);
        timer_set_deferrable(timer);
 }
-EXPORT_SYMBOL(init_timer_deferrable);
+EXPORT_SYMBOL(init_timer_deferrable_key);
 
 static inline void detach_timer(struct timer_list *timer,
                                int clear_pending)
@@ -789,6 +800,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
  */
 int del_timer_sync(struct timer_list *timer)
 {
+#ifdef CONFIG_LOCKDEP
+       unsigned long flags;
+
+       local_irq_save(flags);
+       lock_map_acquire(&timer->lockdep_map);
+       lock_map_release(&timer->lockdep_map);
+       local_irq_restore(flags);
+#endif
+
        for (;;) {
                int ret = try_to_del_timer_sync(timer);
                if (ret >= 0)
@@ -861,10 +881,36 @@ static inline void __run_timers(struct tvec_base *base)
 
                        set_running_timer(base, timer);
                        detach_timer(timer, 1);
+
                        spin_unlock_irq(&base->lock);
                        {
                                int preempt_count = preempt_count();
+
+#ifdef CONFIG_LOCKDEP
+                               /*
+                                * It is permissible to free the timer from
+                                * inside the function that is called from
+                                * it, this we need to take into account for
+                                * lockdep too. To avoid bogus "held lock
+                                * freed" warnings as well as problems when
+                                * looking into timer->lockdep_map, make a
+                                * copy and use that here.
+                                */
+                               struct lockdep_map lockdep_map =
+                                       timer->lockdep_map;
+#endif
+                               /*
+                                * Couple the lock chain with the lock chain at
+                                * del_timer_sync() by acquiring the lock_map
+                                * around the fn() call here and in
+                                * del_timer_sync().
+                                */
+                               lock_map_acquire(&lockdep_map);
+
                                fn(data);
+
+                               lock_map_release(&lockdep_map);
+
                                if (preempt_count != preempt_count()) {
                                        printk(KERN_ERR "huh, entered %p "
                                               "with preempt_count %08x, exited"
index 34e707e..8e4a2a6 100644 (file)
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT
 config NOP_TRACER
        bool
 
+config HAVE_FTRACE_NMI_ENTER
+       bool
+
 config HAVE_FUNCTION_TRACER
        bool
 
@@ -37,6 +40,11 @@ config TRACER_MAX_TRACE
 config RING_BUFFER
        bool
 
+config FTRACE_NMI_ENTER
+       bool
+       depends on HAVE_FTRACE_NMI_ENTER
+       default y
+
 config TRACING
        bool
        select DEBUG_FS
@@ -44,13 +52,25 @@ config TRACING
        select STACKTRACE if STACKTRACE_SUPPORT
        select TRACEPOINTS
        select NOP_TRACER
+       select BINARY_PRINTF
+
+#
+# Minimum requirements an architecture has to meet for us to
+# be able to offer generic tracing facilities:
+#
+config TRACING_SUPPORT
+       bool
+       depends on TRACE_IRQFLAGS_SUPPORT
+       depends on STACKTRACE_SUPPORT
+       default y
+
+if TRACING_SUPPORT
 
 menu "Tracers"
 
 config FUNCTION_TRACER
        bool "Kernel Function Tracer"
        depends on HAVE_FUNCTION_TRACER
-       depends on DEBUG_KERNEL
        select FRAME_POINTER
        select KALLSYMS
        select TRACING
@@ -83,7 +103,6 @@ config IRQSOFF_TRACER
        default n
        depends on TRACE_IRQFLAGS_SUPPORT
        depends on GENERIC_TIME
-       depends on DEBUG_KERNEL
        select TRACE_IRQFLAGS
        select TRACING
        select TRACER_MAX_TRACE
@@ -106,7 +125,6 @@ config PREEMPT_TRACER
        default n
        depends on GENERIC_TIME
        depends on PREEMPT
-       depends on DEBUG_KERNEL
        select TRACING
        select TRACER_MAX_TRACE
        help
@@ -127,13 +145,13 @@ config SYSPROF_TRACER
        bool "Sysprof Tracer"
        depends on X86
        select TRACING
+       select CONTEXT_SWITCH_TRACER
        help
          This tracer provides the trace needed by the 'Sysprof' userspace
          tool.
 
 config SCHED_TRACER
        bool "Scheduling Latency Tracer"
-       depends on DEBUG_KERNEL
        select TRACING
        select CONTEXT_SWITCH_TRACER
        select TRACER_MAX_TRACE
@@ -143,16 +161,22 @@ config SCHED_TRACER
 
 config CONTEXT_SWITCH_TRACER
        bool "Trace process context switches"
-       depends on DEBUG_KERNEL
        select TRACING
        select MARKERS
        help
          This tracer gets called from the context switch and records
          all switching of tasks.
 
+config EVENT_TRACER
+       bool "Trace various events in the kernel"
+       select TRACING
+       help
+         This tracer hooks to various trace points in the kernel
+         allowing the user to pick and choose which trace point they
+         want to trace.
+
 config BOOT_TRACER
        bool "Trace boot initcalls"
-       depends on DEBUG_KERNEL
        select TRACING
        select CONTEXT_SWITCH_TRACER
        help
@@ -165,13 +189,11 @@ config BOOT_TRACER
          representation of the delays during initcalls - but the raw
          /debug/tracing/trace text output is readable too.
 
-         ( Note that tracing self tests can't be enabled if this tracer is
-           selected, because the self-tests are an initcall as well and that
-           would invalidate the boot trace. )
+         You must pass in ftrace=initcall to the kernel command line
+         to enable this on bootup.
 
 config TRACE_BRANCH_PROFILING
        bool "Trace likely/unlikely profiler"
-       depends on DEBUG_KERNEL
        select TRACING
        help
          This tracer profiles all the the likely and unlikely macros
@@ -224,7 +246,6 @@ config BRANCH_TRACER
 
 config POWER_TRACER
        bool "Trace power consumption behavior"
-       depends on DEBUG_KERNEL
        depends on X86
        select TRACING
        help
@@ -236,7 +257,6 @@ config POWER_TRACER
 config STACK_TRACER
        bool "Trace max stack"
        depends on HAVE_FUNCTION_TRACER
-       depends on DEBUG_KERNEL
        select FUNCTION_TRACER
        select STACKTRACE
        select KALLSYMS
@@ -266,11 +286,66 @@ config HW_BRANCH_TRACER
          This tracer records all branches on the system in a circular
          buffer giving access to the last N branches for each cpu.
 
+config KMEMTRACE
+       bool "Trace SLAB allocations"
+       select TRACING
+       help
+         kmemtrace provides tracing for slab allocator functions, such as
+         kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
+         data is then fed to the userspace application in order to analyse
+         allocation hotspots, internal fragmentation and so on, making it
+         possible to see how well an allocator performs, as well as debug
+         and profile kernel code.
+
+         This requires an userspace application to use. See
+         Documentation/vm/kmemtrace.txt for more information.
+
+         Saying Y will make the kernel somewhat larger and slower. However,
+         if you disable kmemtrace at run-time or boot-time, the performance
+         impact is minimal (depending on the arch the kernel is built for).
+
+         If unsure, say N.
+
+config WORKQUEUE_TRACER
+       bool "Trace workqueues"
+       select TRACING
+       help
+         The workqueue tracer provides some statistical informations
+          about each cpu workqueue thread such as the number of the
+          works inserted and executed since their creation. It can help
+          to evaluate the amount of work each of them have to perform.
+          For example it can help a developer to decide whether he should
+          choose a per cpu workqueue instead of a singlethreaded one.
+
+config BLK_DEV_IO_TRACE
+       bool "Support for tracing block io actions"
+       depends on SYSFS
+       depends on BLOCK
+       select RELAY
+       select DEBUG_FS
+       select TRACEPOINTS
+       select TRACING
+       select STACKTRACE
+       help
+         Say Y here if you want to be able to trace the block layer actions
+         on a given queue. Tracing allows you to see any traffic happening
+         on a block device queue. For more information (and the userspace
+         support tools needed), fetch the blktrace tools from:
+
+         git://git.kernel.dk/blktrace.git
+
+         Tracing also is possible using the ftrace interface, e.g.:
+
+           echo 1 > /sys/block/sda/sda1/trace/enable
+           echo blk > /sys/kernel/debug/tracing/current_tracer
+           cat /sys/kernel/debug/tracing/trace_pipe
+
+         If unsure, say N.
+
 config DYNAMIC_FTRACE
        bool "enable/disable ftrace tracepoints dynamically"
        depends on FUNCTION_TRACER
        depends on HAVE_DYNAMIC_FTRACE
-       depends on DEBUG_KERNEL
        default y
        help
          This option will modify all the calls to ftrace dynamically
@@ -296,7 +371,7 @@ config FTRACE_SELFTEST
 
 config FTRACE_STARTUP_TEST
        bool "Perform a startup test on ftrace"
-       depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
+       depends on TRACING
        select FTRACE_SELFTEST
        help
          This option performs a series of startup tests on ftrace. On bootup
@@ -306,7 +381,7 @@ config FTRACE_STARTUP_TEST
 
 config MMIOTRACE
        bool "Memory mapped IO tracing"
-       depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
+       depends on HAVE_MMIOTRACE_SUPPORT && PCI
        select TRACING
        help
          Mmiotrace traces Memory Mapped I/O access and is meant for
@@ -328,3 +403,6 @@ config MMIOTRACE_TEST
          Say N, unless you absolutely know what you are doing.
 
 endmenu
+
+endif # TRACING_SUPPORT
+
index 349d5a9..c7a2943 100644 (file)
@@ -19,6 +19,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
 
 obj-$(CONFIG_TRACING) += trace.o
+obj-$(CONFIG_TRACING) += trace_clock.o
+obj-$(CONFIG_TRACING) += trace_output.o
+obj-$(CONFIG_TRACING) += trace_stat.o
+obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
@@ -33,5 +37,11 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
 obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
 obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
 obj-$(CONFIG_POWER_TRACER) += trace_power.o
+obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
+obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
+obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
+obj-$(CONFIG_EVENT_TRACER) += trace_events.o
+obj-$(CONFIG_EVENT_TRACER) += events.o
+obj-$(CONFIG_EVENT_TRACER) += trace_export.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
new file mode 100644 (file)
index 0000000..d24a10b
--- /dev/null
@@ -0,0 +1,1537 @@
+/*
+ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/blktrace_api.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <trace/block.h>
+#include <linux/uaccess.h>
+#include "trace_output.h"
+
+static unsigned int blktrace_seq __read_mostly = 1;
+
+static struct trace_array *blk_tr;
+static int __read_mostly  blk_tracer_enabled;
+
+/* Select an alternative, minimalistic output than the original one */
+#define TRACE_BLK_OPT_CLASSIC  0x1
+
+static struct tracer_opt blk_tracer_opts[] = {
+       /* Default disable the minimalistic output */
+       { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
+       { }
+};
+
+static struct tracer_flags blk_tracer_flags = {
+       .val  = 0,
+       .opts = blk_tracer_opts,
+};
+
+/* Global reference count of probes */
+static DEFINE_MUTEX(blk_probe_mutex);
+static atomic_t blk_probes_ref = ATOMIC_INIT(0);
+
+static int blk_register_tracepoints(void);
+static void blk_unregister_tracepoints(void);
+
+/*
+ * Send out a notify message.
+ */
+static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+                      const void *data, size_t len)
+{
+       struct blk_io_trace *t;
+
+       if (!bt->rchan)
+               return;
+
+       t = relay_reserve(bt->rchan, sizeof(*t) + len);
+       if (t) {
+               const int cpu = smp_processor_id();
+
+               t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+               t->time = ktime_to_ns(ktime_get());
+               t->device = bt->dev;
+               t->action = action;
+               t->pid = pid;
+               t->cpu = cpu;
+               t->pdu_len = len;
+               memcpy((void *) t + sizeof(*t), data, len);
+       }
+}
+
+/*
+ * Send out a notify for this process, if we haven't done so since a trace
+ * started
+ */
+static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
+{
+       tsk->btrace_seq = blktrace_seq;
+       trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
+}
+
+static void trace_note_time(struct blk_trace *bt)
+{
+       struct timespec now;
+       unsigned long flags;
+       u32 words[2];
+
+       getnstimeofday(&now);
+       words[0] = now.tv_sec;
+       words[1] = now.tv_nsec;
+
+       local_irq_save(flags);
+       trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
+       local_irq_restore(flags);
+}
+
+void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
+{
+       int n;
+       va_list args;
+       unsigned long flags;
+       char *buf;
+
+       if (blk_tr) {
+               va_start(args, fmt);
+               ftrace_vprintk(fmt, args);
+               va_end(args);
+               return;
+       }
+
+       if (!bt->msg_data)
+               return;
+
+       local_irq_save(flags);
+       buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
+       va_start(args, fmt);
+       n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
+       va_end(args);
+
+       trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(__trace_note_message);
+
+static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
+                        pid_t pid)
+{
+       if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
+               return 1;
+       if (sector < bt->start_lba || sector > bt->end_lba)
+               return 1;
+       if (bt->pid && pid != bt->pid)
+               return 1;
+
+       return 0;
+}
+
+/*
+ * Data direction bit lookup
+ */
+static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ),
+                                        BLK_TC_ACT(BLK_TC_WRITE) };
+
+/* The ilog2() calls fall out because they're constant */
+#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
+         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
+
+/*
+ * The worker for the various blk_add_trace*() types. Fills out a
+ * blk_io_trace structure and places it in a per-cpu subbuffer.
+ */
+static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+                    int rw, u32 what, int error, int pdu_len, void *pdu_data)
+{
+       struct task_struct *tsk = current;
+       struct ring_buffer_event *event = NULL;
+       struct blk_io_trace *t;
+       unsigned long flags = 0;
+       unsigned long *sequence;
+       pid_t pid;
+       int cpu, pc = 0;
+
+       if (unlikely(bt->trace_state != Blktrace_running ||
+                    !blk_tracer_enabled))
+               return;
+
+       what |= ddir_act[rw & WRITE];
+       what |= MASK_TC_BIT(rw, BARRIER);
+       what |= MASK_TC_BIT(rw, SYNCIO);
+       what |= MASK_TC_BIT(rw, AHEAD);
+       what |= MASK_TC_BIT(rw, META);
+       what |= MASK_TC_BIT(rw, DISCARD);
+
+       pid = tsk->pid;
+       if (unlikely(act_log_check(bt, what, sector, pid)))
+               return;
+       cpu = raw_smp_processor_id();
+
+       if (blk_tr) {
+               tracing_record_cmdline(current);
+
+               pc = preempt_count();
+               event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+                                                 sizeof(*t) + pdu_len,
+                                                 0, pc);
+               if (!event)
+                       return;
+               t = ring_buffer_event_data(event);
+               goto record_it;
+       }
+
+       /*
+        * A word about the locking here - we disable interrupts to reserve
+        * some space in the relay per-cpu buffer, to prevent an irq
+        * from coming in and stepping on our toes.
+        */
+       local_irq_save(flags);
+
+       if (unlikely(tsk->btrace_seq != blktrace_seq))
+               trace_note_tsk(bt, tsk);
+
+       t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
+       if (t) {
+               sequence = per_cpu_ptr(bt->sequence, cpu);
+
+               t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+               t->sequence = ++(*sequence);
+               t->time = ktime_to_ns(ktime_get());
+record_it:
+               /*
+                * These two are not needed in ftrace as they are in the
+                * generic trace_entry, filled by tracing_generic_entry_update,
+                * but for the trace_event->bin() synthesizer benefit we do it
+                * here too.
+                */
+               t->cpu = cpu;
+               t->pid = pid;
+
+               t->sector = sector;
+               t->bytes = bytes;
+               t->action = what;
+               t->device = bt->dev;
+               t->error = error;
+               t->pdu_len = pdu_len;
+
+               if (pdu_len)
+                       memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
+
+               if (blk_tr) {
+                       trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+                       return;
+               }
+       }
+
+       local_irq_restore(flags);
+}
+
+static struct dentry *blk_tree_root;
+static DEFINE_MUTEX(blk_tree_mutex);
+
+static void blk_trace_cleanup(struct blk_trace *bt)
+{
+       debugfs_remove(bt->msg_file);
+       debugfs_remove(bt->dropped_file);
+       relay_close(bt->rchan);
+       free_percpu(bt->sequence);
+       free_percpu(bt->msg_data);
+       kfree(bt);
+       mutex_lock(&blk_probe_mutex);
+       if (atomic_dec_and_test(&blk_probes_ref))
+               blk_unregister_tracepoints();
+       mutex_unlock(&blk_probe_mutex);
+}
+
+int blk_trace_remove(struct request_queue *q)
+{
+       struct blk_trace *bt;
+
+       bt = xchg(&q->blk_trace, NULL);
+       if (!bt)
+               return -EINVAL;
+
+       if (bt->trace_state == Blktrace_setup ||
+           bt->trace_state == Blktrace_stopped)
+               blk_trace_cleanup(bt);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_trace_remove);
+
+static int blk_dropped_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+
+       return 0;
+}
+
+static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+                               size_t count, loff_t *ppos)
+{
+       struct blk_trace *bt = filp->private_data;
+       char buf[16];
+
+       snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
+
+       return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+static const struct file_operations blk_dropped_fops = {
+       .owner =        THIS_MODULE,
+       .open =         blk_dropped_open,
+       .read =         blk_dropped_read,
+};
+
+static int blk_msg_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+
+       return 0;
+}
+
+static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
+                               size_t count, loff_t *ppos)
+{
+       char *msg;
+       struct blk_trace *bt;
+
+       if (count > BLK_TN_MAX_MSG)
+               return -EINVAL;
+
+       msg = kmalloc(count, GFP_KERNEL);
+       if (msg == NULL)
+               return -ENOMEM;
+
+       if (copy_from_user(msg, buffer, count)) {
+               kfree(msg);
+               return -EFAULT;
+       }
+
+       bt = filp->private_data;
+       __trace_note_message(bt, "%s", msg);
+       kfree(msg);
+
+       return count;
+}
+
+static const struct file_operations blk_msg_fops = {
+       .owner =        THIS_MODULE,
+       .open =         blk_msg_open,
+       .write =        blk_msg_write,
+};
+
+/*
+ * Keep track of how many times we encountered a full subbuffer, to aid
+ * the user space app in telling how many lost events there were.
+ */
+static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+                                    void *prev_subbuf, size_t prev_padding)
+{
+       struct blk_trace *bt;
+
+       if (!relay_buf_full(buf))
+               return 1;
+
+       bt = buf->chan->private_data;
+       atomic_inc(&bt->dropped);
+       return 0;
+}
+
+static int blk_remove_buf_file_callback(struct dentry *dentry)
+{
+       struct dentry *parent = dentry->d_parent;
+       debugfs_remove(dentry);
+
+       /*
+       * this will fail for all but the last file, but that is ok. what we
+       * care about is the top level buts->name directory going away, when
+       * the last trace file is gone. Then we don't have to rmdir() that
+       * manually on trace stop, so it nicely solves the issue with
+       * force killing of running traces.
+       */
+
+       debugfs_remove(parent);
+       return 0;
+}
+
+static struct dentry *blk_create_buf_file_callback(const char *filename,
+                                                  struct dentry *parent,
+                                                  int mode,
+                                                  struct rchan_buf *buf,
+                                                  int *is_global)
+{
+       return debugfs_create_file(filename, mode, parent, buf,
+                                       &relay_file_operations);
+}
+
+static struct rchan_callbacks blk_relay_callbacks = {
+       .subbuf_start           = blk_subbuf_start_callback,
+       .create_buf_file        = blk_create_buf_file_callback,
+       .remove_buf_file        = blk_remove_buf_file_callback,
+};
+
+/*
+ * Setup everything required to start tracing
+ */
+int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+                       struct blk_user_trace_setup *buts)
+{
+       struct blk_trace *old_bt, *bt = NULL;
+       struct dentry *dir = NULL;
+       int ret, i;
+
+       if (!buts->buf_size || !buts->buf_nr)
+               return -EINVAL;
+
+       strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
+       buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
+
+       /*
+        * some device names have larger paths - convert the slashes
+        * to underscores for this to work as expected
+        */
+       for (i = 0; i < strlen(buts->name); i++)
+               if (buts->name[i] == '/')
+                       buts->name[i] = '_';
+
+       ret = -ENOMEM;
+       bt = kzalloc(sizeof(*bt), GFP_KERNEL);
+       if (!bt)
+               goto err;
+
+       bt->sequence = alloc_percpu(unsigned long);
+       if (!bt->sequence)
+               goto err;
+
+       bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
+       if (!bt->msg_data)
+               goto err;
+
+       ret = -ENOENT;
+
+       if (!blk_tree_root) {
+               blk_tree_root = debugfs_create_dir("block", NULL);
+               if (!blk_tree_root)
+                       return -ENOMEM;
+       }
+
+       dir = debugfs_create_dir(buts->name, blk_tree_root);
+
+       if (!dir)
+               goto err;
+
+       bt->dir = dir;
+       bt->dev = dev;
+       atomic_set(&bt->dropped, 0);
+
+       ret = -EIO;
+       bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+                                              &blk_dropped_fops);
+       if (!bt->dropped_file)
+               goto err;
+
+       bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
+       if (!bt->msg_file)
+               goto err;
+
+       bt->rchan = relay_open("trace", dir, buts->buf_size,
+                               buts->buf_nr, &blk_relay_callbacks, bt);
+       if (!bt->rchan)
+               goto err;
+
+       bt->act_mask = buts->act_mask;
+       if (!bt->act_mask)
+               bt->act_mask = (u16) -1;
+
+       bt->start_lba = buts->start_lba;
+       bt->end_lba = buts->end_lba;
+       if (!bt->end_lba)
+               bt->end_lba = -1ULL;
+
+       bt->pid = buts->pid;
+       bt->trace_state = Blktrace_setup;
+
+       mutex_lock(&blk_probe_mutex);
+       if (atomic_add_return(1, &blk_probes_ref) == 1) {
+               ret = blk_register_tracepoints();
+               if (ret)
+                       goto probe_err;
+       }
+       mutex_unlock(&blk_probe_mutex);
+
+       ret = -EBUSY;
+       old_bt = xchg(&q->blk_trace, bt);
+       if (old_bt) {
+               (void) xchg(&q->blk_trace, old_bt);
+               goto err;
+       }
+
+       return 0;
+probe_err:
+       atomic_dec(&blk_probes_ref);
+       mutex_unlock(&blk_probe_mutex);
+err:
+       if (bt) {
+               if (bt->msg_file)
+                       debugfs_remove(bt->msg_file);
+               if (bt->dropped_file)
+                       debugfs_remove(bt->dropped_file);
+               free_percpu(bt->sequence);
+               free_percpu(bt->msg_data);
+               if (bt->rchan)
+                       relay_close(bt->rchan);
+               kfree(bt);
+       }
+       return ret;
+}
+
+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+                   char __user *arg)
+{
+       struct blk_user_trace_setup buts;
+       int ret;
+
+       ret = copy_from_user(&buts, arg, sizeof(buts));
+       if (ret)
+               return -EFAULT;
+
+       ret = do_blk_trace_setup(q, name, dev, &buts);
+       if (ret)
+               return ret;
+
+       if (copy_to_user(arg, &buts, sizeof(buts)))
+               return -EFAULT;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_trace_setup);
+
+int blk_trace_startstop(struct request_queue *q, int start)
+{
+       int ret;
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt == NULL)
+               return -EINVAL;
+
+       /*
+        * For starting a trace, we can transition from a setup or stopped
+        * trace. For stopping a trace, the state must be running
+        */
+       ret = -EINVAL;
+       if (start) {
+               if (bt->trace_state == Blktrace_setup ||
+                   bt->trace_state == Blktrace_stopped) {
+                       blktrace_seq++;
+                       smp_mb();
+                       bt->trace_state = Blktrace_running;
+
+                       trace_note_time(bt);
+                       ret = 0;
+               }
+       } else {
+               if (bt->trace_state == Blktrace_running) {
+                       bt->trace_state = Blktrace_stopped;
+                       relay_flush(bt->rchan);
+                       ret = 0;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(blk_trace_startstop);
+
+/**
+ * blk_trace_ioctl: - handle the ioctls associated with tracing
+ * @bdev:      the block device
+ * @cmd:       the ioctl cmd
+ * @arg:       the argument data, if any
+ *
+ **/
+int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
+{
+       struct request_queue *q;
+       int ret, start = 0;
+       char b[BDEVNAME_SIZE];
+
+       q = bdev_get_queue(bdev);
+       if (!q)
+               return -ENXIO;
+
+       mutex_lock(&bdev->bd_mutex);
+
+       switch (cmd) {
+       case BLKTRACESETUP:
+               bdevname(bdev, b);
+               ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
+               break;
+       case BLKTRACESTART:
+               start = 1;
+       case BLKTRACESTOP:
+               ret = blk_trace_startstop(q, start);
+               break;
+       case BLKTRACETEARDOWN:
+               ret = blk_trace_remove(q);
+               break;
+       default:
+               ret = -ENOTTY;
+               break;
+       }
+
+       mutex_unlock(&bdev->bd_mutex);
+       return ret;
+}
+
+/**
+ * blk_trace_shutdown: - stop and cleanup trace structures
+ * @q:    the request queue associated with the device
+ *
+ **/
+void blk_trace_shutdown(struct request_queue *q)
+{
+       if (q->blk_trace) {
+               blk_trace_startstop(q, 0);
+               blk_trace_remove(q);
+       }
+}
+
+/*
+ * blktrace probes
+ */
+
+/**
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * @q:         queue the io is for
+ * @rq:                the source request
+ * @what:      the action
+ *
+ * Description:
+ *     Records an action against a request. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+                                   u32 what)
+{
+       struct blk_trace *bt = q->blk_trace;
+       int rw = rq->cmd_flags & 0x03;
+
+       if (likely(!bt))
+               return;
+
+       if (blk_discard_rq(rq))
+               rw |= (1 << BIO_RW_DISCARD);
+
+       if (blk_pc_request(rq)) {
+               what |= BLK_TC_ACT(BLK_TC_PC);
+               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
+                               sizeof(rq->cmd), rq->cmd);
+       } else  {
+               what |= BLK_TC_ACT(BLK_TC_FS);
+               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+                               rw, what, rq->errors, 0, NULL);
+       }
+}
+
+static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+}
+
+static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+}
+
+static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+}
+
+static void blk_add_trace_rq_requeue(struct request_queue *q,
+                                    struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+}
+
+static void blk_add_trace_rq_complete(struct request_queue *q,
+                                     struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
+}
+
+/**
+ * blk_add_trace_bio - Add a trace for a bio oriented action
+ * @q:         queue the io is for
+ * @bio:       the source bio
+ * @what:      the action
+ *
+ * Description:
+ *     Records an action against a bio. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+                                    u32 what)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (likely(!bt))
+               return;
+
+       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
+                       !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+}
+
+static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
+}
+
+static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
+}
+
+static void blk_add_trace_bio_backmerge(struct request_queue *q,
+                                       struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+}
+
+static void blk_add_trace_bio_frontmerge(struct request_queue *q,
+                                        struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+}
+
+static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+}
+
+static void blk_add_trace_getrq(struct request_queue *q,
+                               struct bio *bio, int rw)
+{
+       if (bio)
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
+       else {
+               struct blk_trace *bt = q->blk_trace;
+
+               if (bt)
+                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+       }
+}
+
+
+static void blk_add_trace_sleeprq(struct request_queue *q,
+                                 struct bio *bio, int rw)
+{
+       if (bio)
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
+       else {
+               struct blk_trace *bt = q->blk_trace;
+
+               if (bt)
+                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
+                                       0, 0, NULL);
+       }
+}
+
+static void blk_add_trace_plug(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt)
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+}
+
+static void blk_add_trace_unplug_io(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt) {
+               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
+               __be64 rpdu = cpu_to_be64(pdu);
+
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
+                               sizeof(rpdu), &rpdu);
+       }
+}
+
+static void blk_add_trace_unplug_timer(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt) {
+               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
+               __be64 rpdu = cpu_to_be64(pdu);
+
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
+                               sizeof(rpdu), &rpdu);
+       }
+}
+
+static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
+                               unsigned int pdu)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt) {
+               __be64 rpdu = cpu_to_be64(pdu);
+
+               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
+                               BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+                               sizeof(rpdu), &rpdu);
+       }
+}
+
+/**
+ * blk_add_trace_remap - Add a trace for a remap operation
+ * @q:         queue the io is for
+ * @bio:       the source bio
+ * @dev:       target device
+ * @from:      source sector
+ * @to:                target sector
+ *
+ * Description:
+ *     Device mapper or raid target sometimes need to split a bio because
+ *     it spans a stripe (or similar). Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
+                                      dev_t dev, sector_t from, sector_t to)
+{
+       struct blk_trace *bt = q->blk_trace;
+       struct blk_io_trace_remap r;
+
+       if (likely(!bt))
+               return;
+
+       r.device = cpu_to_be32(dev);
+       r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
+       r.sector = cpu_to_be64(to);
+
+       __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
+                       !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+}
+
+/**
+ * blk_add_driver_data - Add binary message with driver-specific data
+ * @q:         queue the io is for
+ * @rq:                io request
+ * @data:      driver-specific data
+ * @len:       length of driver-specific data
+ *
+ * Description:
+ *     Some drivers might want to write driver-specific data per request.
+ *
+ **/
+void blk_add_driver_data(struct request_queue *q,
+                        struct request *rq,
+                        void *data, size_t len)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (likely(!bt))
+               return;
+
+       if (blk_pc_request(rq))
+               __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
+                               rq->errors, len, data);
+       else
+               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+                               0, BLK_TA_DRV_DATA, rq->errors, len, data);
+}
+EXPORT_SYMBOL_GPL(blk_add_driver_data);
+
+static int blk_register_tracepoints(void)
+{
+       int ret;
+
+       ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
+       WARN_ON(ret);
+       ret = register_trace_block_getrq(blk_add_trace_getrq);
+       WARN_ON(ret);
+       ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
+       WARN_ON(ret);
+       ret = register_trace_block_plug(blk_add_trace_plug);
+       WARN_ON(ret);
+       ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
+       WARN_ON(ret);
+       ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
+       WARN_ON(ret);
+       ret = register_trace_block_split(blk_add_trace_split);
+       WARN_ON(ret);
+       ret = register_trace_block_remap(blk_add_trace_remap);
+       WARN_ON(ret);
+       return 0;
+}
+
+static void blk_unregister_tracepoints(void)
+{
+       unregister_trace_block_remap(blk_add_trace_remap);
+       unregister_trace_block_split(blk_add_trace_split);
+       unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
+       unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
+       unregister_trace_block_plug(blk_add_trace_plug);
+       unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
+       unregister_trace_block_getrq(blk_add_trace_getrq);
+       unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
+       unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
+       unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
+       unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
+       unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
+       unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
+       unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
+       unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
+       unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
+       unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
+
+       tracepoint_synchronize_unregister();
+}
+
+/*
+ * struct blk_io_tracer formatting routines
+ */
+
+static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
+{
+       int i = 0;
+
+       if (t->action & BLK_TC_DISCARD)
+               rwbs[i++] = 'D';
+       else if (t->action & BLK_TC_WRITE)
+               rwbs[i++] = 'W';
+       else if (t->bytes)
+               rwbs[i++] = 'R';
+       else
+               rwbs[i++] = 'N';
+
+       if (t->action & BLK_TC_AHEAD)
+               rwbs[i++] = 'A';
+       if (t->action & BLK_TC_BARRIER)
+               rwbs[i++] = 'B';
+       if (t->action & BLK_TC_SYNC)
+               rwbs[i++] = 'S';
+       if (t->action & BLK_TC_META)
+               rwbs[i++] = 'M';
+
+       rwbs[i] = '\0';
+}
+
+static inline
+const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
+{
+       return (const struct blk_io_trace *)ent;
+}
+
+static inline const void *pdu_start(const struct trace_entry *ent)
+{
+       return te_blk_io_trace(ent) + 1;
+}
+
+static inline u32 t_sec(const struct trace_entry *ent)
+{
+       return te_blk_io_trace(ent)->bytes >> 9;
+}
+
+static inline unsigned long long t_sector(const struct trace_entry *ent)
+{
+       return te_blk_io_trace(ent)->sector;
+}
+
+static inline __u16 t_error(const struct trace_entry *ent)
+{
+       return te_blk_io_trace(ent)->sector;
+}
+
+static __u64 get_pdu_int(const struct trace_entry *ent)
+{
+       const __u64 *val = pdu_start(ent);
+       return be64_to_cpu(*val);
+}
+
+static void get_pdu_remap(const struct trace_entry *ent,
+                         struct blk_io_trace_remap *r)
+{
+       const struct blk_io_trace_remap *__r = pdu_start(ent);
+       __u64 sector = __r->sector;
+
+       r->device = be32_to_cpu(__r->device);
+       r->device_from = be32_to_cpu(__r->device_from);
+       r->sector = be64_to_cpu(sector);
+}
+
+static int blk_log_action_iter(struct trace_iterator *iter, const char *act)
+{
+       char rwbs[6];
+       unsigned long long ts  = ns2usecs(iter->ts);
+       unsigned long usec_rem = do_div(ts, USEC_PER_SEC);
+       unsigned secs          = (unsigned long)ts;
+       const struct trace_entry *ent = iter->ent;
+       const struct blk_io_trace *t = (const struct blk_io_trace *)ent;
+
+       fill_rwbs(rwbs, t);
+
+       return trace_seq_printf(&iter->seq,
+                               "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ",
+                               MAJOR(t->device), MINOR(t->device), iter->cpu,
+                               secs, usec_rem, ent->pid, act, rwbs);
+}
+
+static int blk_log_action_seq(struct trace_seq *s, const struct blk_io_trace *t,
+                             const char *act)
+{
+       char rwbs[6];
+       fill_rwbs(rwbs, t);
+       return trace_seq_printf(s, "%3d,%-3d %2s %3s ",
+                               MAJOR(t->device), MINOR(t->device), act, rwbs);
+}
+
+static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
+{
+       const char *cmd = trace_find_cmdline(ent->pid);
+
+       if (t_sec(ent))
+               return trace_seq_printf(s, "%llu + %u [%s]\n",
+                                       t_sector(ent), t_sec(ent), cmd);
+       return trace_seq_printf(s, "[%s]\n", cmd);
+}
+
+static int blk_log_with_error(struct trace_seq *s,
+                             const struct trace_entry *ent)
+{
+       if (t_sec(ent))
+               return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent),
+                                       t_sec(ent), t_error(ent));
+       return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent));
+}
+
+static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
+{
+       struct blk_io_trace_remap r = { .device = 0, };
+
+       get_pdu_remap(ent, &r);
+       return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
+                              t_sector(ent),
+                              t_sec(ent), MAJOR(r.device), MINOR(r.device),
+                              (unsigned long long)r.sector);
+}
+
+static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
+{
+       return trace_seq_printf(s, "[%s]\n", trace_find_cmdline(ent->pid));
+}
+
+static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
+{
+       return trace_seq_printf(s, "[%s] %llu\n", trace_find_cmdline(ent->pid),
+                               get_pdu_int(ent));
+}
+
+static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
+{
+       return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
+                               get_pdu_int(ent), trace_find_cmdline(ent->pid));
+}
+
+/*
+ * struct tracer operations
+ */
+
+static void blk_tracer_print_header(struct seq_file *m)
+{
+       if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+               return;
+       seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
+                   "#  |     |     |           |   |   |\n");
+}
+
+static void blk_tracer_start(struct trace_array *tr)
+{
+       mutex_lock(&blk_probe_mutex);
+       if (atomic_add_return(1, &blk_probes_ref) == 1)
+               if (blk_register_tracepoints())
+                       atomic_dec(&blk_probes_ref);
+       mutex_unlock(&blk_probe_mutex);
+       trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
+}
+
+static int blk_tracer_init(struct trace_array *tr)
+{
+       blk_tr = tr;
+       blk_tracer_start(tr);
+       mutex_lock(&blk_probe_mutex);
+       blk_tracer_enabled++;
+       mutex_unlock(&blk_probe_mutex);
+       return 0;
+}
+
+static void blk_tracer_stop(struct trace_array *tr)
+{
+       trace_flags |= TRACE_ITER_CONTEXT_INFO;
+       mutex_lock(&blk_probe_mutex);
+       if (atomic_dec_and_test(&blk_probes_ref))
+               blk_unregister_tracepoints();
+       mutex_unlock(&blk_probe_mutex);
+}
+
+static void blk_tracer_reset(struct trace_array *tr)
+{
+       if (!atomic_read(&blk_probes_ref))
+               return;
+
+       mutex_lock(&blk_probe_mutex);
+       blk_tracer_enabled--;
+       WARN_ON(blk_tracer_enabled < 0);
+       mutex_unlock(&blk_probe_mutex);
+
+       blk_tracer_stop(tr);
+}
+
+static struct {
+       const char *act[2];
+       int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
+} what2act[] __read_mostly = {
+       [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
+       [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
+       [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
+       [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
+       [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
+       [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
+       [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
+       [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
+       [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
+       [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
+       [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
+       [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
+       [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
+       [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
+       [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
+};
+
+static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
+                                              int flags)
+{
+       struct trace_seq *s = &iter->seq;
+       const struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
+       const u16 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
+       int ret;
+
+       if (!trace_print_context(iter))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (unlikely(what == 0 || what > ARRAY_SIZE(what2act)))
+               ret = trace_seq_printf(s, "Bad pc action %x\n", what);
+       else {
+               const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
+               ret = blk_log_action_seq(s, t, what2act[what].act[long_act]);
+               if (ret)
+                       ret = what2act[what].print(s, iter->ent);
+       }
+
+       return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+}
+
+static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
+       const int offset = offsetof(struct blk_io_trace, sector);
+       struct blk_io_trace old = {
+               .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
+               .time     = ns2usecs(iter->ts),
+       };
+
+       if (!trace_seq_putmem(s, &old, offset))
+               return 0;
+       return trace_seq_putmem(s, &t->sector,
+                               sizeof(old) - offset + t->pdu_len);
+}
+
+static enum print_line_t
+blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
+{
+       return blk_trace_synthesize_old_trace(iter) ?
+                       TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+}
+
+static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
+{
+       const struct blk_io_trace *t;
+       u16 what;
+       int ret;
+
+       if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+               return TRACE_TYPE_UNHANDLED;
+
+       t = (const struct blk_io_trace *)iter->ent;
+       what = t->action & ((1 << BLK_TC_SHIFT) - 1);
+
+       if (unlikely(what == 0 || what > ARRAY_SIZE(what2act)))
+               ret = trace_seq_printf(&iter->seq, "Bad pc action %x\n", what);
+       else {
+               const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
+               ret = blk_log_action_iter(iter, what2act[what].act[long_act]);
+               if (ret)
+                       ret = what2act[what].print(&iter->seq, iter->ent);
+       }
+
+       return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct tracer blk_tracer __read_mostly = {
+       .name           = "blk",
+       .init           = blk_tracer_init,
+       .reset          = blk_tracer_reset,
+       .start          = blk_tracer_start,
+       .stop           = blk_tracer_stop,
+       .print_header   = blk_tracer_print_header,
+       .print_line     = blk_tracer_print_line,
+       .flags          = &blk_tracer_flags,
+};
+
+static struct trace_event trace_blk_event = {
+       .type           = TRACE_BLK,
+       .trace          = blk_trace_event_print,
+       .binary         = blk_trace_event_print_binary,
+};
+
+static int __init init_blk_tracer(void)
+{
+       if (!register_ftrace_event(&trace_blk_event)) {
+               pr_warning("Warning: could not register block events\n");
+               return 1;
+       }
+
+       if (register_tracer(&blk_tracer) != 0) {
+               pr_warning("Warning: could not register the block tracer\n");
+               unregister_ftrace_event(&trace_blk_event);
+               return 1;
+       }
+
+       return 0;
+}
+
+device_initcall(init_blk_tracer);
+
+static int blk_trace_remove_queue(struct request_queue *q)
+{
+       struct blk_trace *bt;
+
+       bt = xchg(&q->blk_trace, NULL);
+       if (bt == NULL)
+               return -EINVAL;
+
+       kfree(bt);
+       return 0;
+}
+
+/*
+ * Setup everything required to start tracing
+ */
+static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
+{
+       struct blk_trace *old_bt, *bt = NULL;
+       int ret;
+
+       ret = -ENOMEM;
+       bt = kzalloc(sizeof(*bt), GFP_KERNEL);
+       if (!bt)
+               goto err;
+
+       bt->dev = dev;
+       bt->act_mask = (u16)-1;
+       bt->end_lba = -1ULL;
+       bt->trace_state = Blktrace_running;
+
+       old_bt = xchg(&q->blk_trace, bt);
+       if (old_bt != NULL) {
+               (void)xchg(&q->blk_trace, old_bt);
+               kfree(bt);
+               ret = -EBUSY;
+       }
+       return 0;
+err:
+       return ret;
+}
+
+/*
+ * sysfs interface to enable and configure tracing
+ */
+
+static ssize_t sysfs_blk_trace_enable_show(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       struct hd_struct *p = dev_to_part(dev);
+       struct block_device *bdev;
+       ssize_t ret = -ENXIO;
+
+       lock_kernel();
+       bdev = bdget(part_devt(p));
+       if (bdev != NULL) {
+               struct request_queue *q = bdev_get_queue(bdev);
+
+               if (q != NULL) {
+                       mutex_lock(&bdev->bd_mutex);
+                       ret = sprintf(buf, "%u\n", !!q->blk_trace);
+                       mutex_unlock(&bdev->bd_mutex);
+               }
+
+               bdput(bdev);
+       }
+
+       unlock_kernel();
+       return ret;
+}
+
+static ssize_t sysfs_blk_trace_enable_store(struct device *dev,
+                                           struct device_attribute *attr,
+                                           const char *buf, size_t count)
+{
+       struct block_device *bdev;
+       struct request_queue *q;
+       struct hd_struct *p;
+       int value;
+       ssize_t ret = -ENXIO;
+
+       if (count == 0 || sscanf(buf, "%d", &value) != 1)
+               goto out;
+
+       lock_kernel();
+       p = dev_to_part(dev);
+       bdev = bdget(part_devt(p));
+       if (bdev == NULL)
+               goto out_unlock_kernel;
+
+       q = bdev_get_queue(bdev);
+       if (q == NULL)
+               goto out_bdput;
+
+       mutex_lock(&bdev->bd_mutex);
+       if (value)
+               ret = blk_trace_setup_queue(q, bdev->bd_dev);
+       else
+               ret = blk_trace_remove_queue(q);
+       mutex_unlock(&bdev->bd_mutex);
+
+       if (ret == 0)
+               ret = count;
+out_bdput:
+       bdput(bdev);
+out_unlock_kernel:
+       unlock_kernel();
+out:
+       return ret;
+}
+
+static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf);
+static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count);
+#define BLK_TRACE_DEVICE_ATTR(_name) \
+       DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
+                   sysfs_blk_trace_attr_show, \
+                   sysfs_blk_trace_attr_store)
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+                  sysfs_blk_trace_enable_show, sysfs_blk_trace_enable_store);
+static BLK_TRACE_DEVICE_ATTR(act_mask);
+static BLK_TRACE_DEVICE_ATTR(pid);
+static BLK_TRACE_DEVICE_ATTR(start_lba);
+static BLK_TRACE_DEVICE_ATTR(end_lba);
+
+static struct attribute *blk_trace_attrs[] = {
+       &dev_attr_enable.attr,
+       &dev_attr_act_mask.attr,
+       &dev_attr_pid.attr,
+       &dev_attr_start_lba.attr,
+       &dev_attr_end_lba.attr,
+       NULL
+};
+
+struct attribute_group blk_trace_attr_group = {
+       .name  = "trace",
+       .attrs = blk_trace_attrs,
+};
+
+static int blk_str2act_mask(const char *str)
+{
+       int mask = 0;
+       char *copy = kstrdup(str, GFP_KERNEL), *s;
+
+       if (copy == NULL)
+               return -ENOMEM;
+
+       s = strstrip(copy);
+
+       while (1) {
+               char *sep = strchr(s, ',');
+
+               if (sep != NULL)
+                       *sep = '\0';
+
+               if (strcasecmp(s, "barrier") == 0)
+                       mask |= BLK_TC_BARRIER;
+               else if (strcasecmp(s, "complete") == 0)
+                       mask |= BLK_TC_COMPLETE;
+               else if (strcasecmp(s, "fs") == 0)
+                       mask |= BLK_TC_FS;
+               else if (strcasecmp(s, "issue") == 0)
+                       mask |= BLK_TC_ISSUE;
+               else if (strcasecmp(s, "pc") == 0)
+                       mask |= BLK_TC_PC;
+               else if (strcasecmp(s, "queue") == 0)
+                       mask |= BLK_TC_QUEUE;
+               else if (strcasecmp(s, "read") == 0)
+                       mask |= BLK_TC_READ;
+               else if (strcasecmp(s, "requeue") == 0)
+                       mask |= BLK_TC_REQUEUE;
+               else if (strcasecmp(s, "sync") == 0)
+                       mask |= BLK_TC_SYNC;
+               else if (strcasecmp(s, "write") == 0)
+                       mask |= BLK_TC_WRITE;
+
+               if (sep == NULL)
+                       break;
+
+               s = sep + 1;
+       }
+       kfree(copy);
+
+       return mask;
+}
+
+static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct hd_struct *p = dev_to_part(dev);
+       struct request_queue *q;
+       struct block_device *bdev;
+       ssize_t ret = -ENXIO;
+
+       lock_kernel();
+       bdev = bdget(part_devt(p));
+       if (bdev == NULL)
+               goto out_unlock_kernel;
+
+       q = bdev_get_queue(bdev);
+       if (q == NULL)
+               goto out_bdput;
+       mutex_lock(&bdev->bd_mutex);
+       if (q->blk_trace == NULL)
+               ret = sprintf(buf, "disabled\n");
+       else if (attr == &dev_attr_act_mask)
+               ret = sprintf(buf, "%#x\n", q->blk_trace->act_mask);
+       else if (attr == &dev_attr_pid)
+               ret = sprintf(buf, "%u\n", q->blk_trace->pid);
+       else if (attr == &dev_attr_start_lba)
+               ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
+       else if (attr == &dev_attr_end_lba)
+               ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
+       mutex_unlock(&bdev->bd_mutex);
+out_bdput:
+       bdput(bdev);
+out_unlock_kernel:
+       unlock_kernel();
+       return ret;
+}
+
+static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct block_device *bdev;
+       struct request_queue *q;
+       struct hd_struct *p;
+       u64 value;
+       ssize_t ret = -ENXIO;
+
+       if (count == 0)
+               goto out;
+
+       if (attr == &dev_attr_act_mask) {
+               if (sscanf(buf, "%llx", &value) != 1) {
+                       /* Assume it is a list of trace category names */
+                       value = blk_str2act_mask(buf);
+                       if (value < 0)
+                               goto out;
+               }
+       } else if (sscanf(buf, "%llu", &value) != 1)
+               goto out;
+
+       lock_kernel();
+       p = dev_to_part(dev);
+       bdev = bdget(part_devt(p));
+       if (bdev == NULL)
+               goto out_unlock_kernel;
+
+       q = bdev_get_queue(bdev);
+       if (q == NULL)
+               goto out_bdput;
+
+       mutex_lock(&bdev->bd_mutex);
+       ret = 0;
+       if (q->blk_trace == NULL)
+               ret = blk_trace_setup_queue(q, bdev->bd_dev);
+
+       if (ret == 0) {
+               if (attr == &dev_attr_act_mask)
+                       q->blk_trace->act_mask = value;
+               else if (attr == &dev_attr_pid)
+                       q->blk_trace->pid = value;
+               else if (attr == &dev_attr_start_lba)
+                       q->blk_trace->start_lba = value;
+               else if (attr == &dev_attr_end_lba)
+                       q->blk_trace->end_lba = value;
+               ret = count;
+       }
+       mutex_unlock(&bdev->bd_mutex);
+out_bdput:
+       bdput(bdev);
+out_unlock_kernel:
+       unlock_kernel();
+out:
+       return ret;
+}
diff --git a/kernel/trace/events.c b/kernel/trace/events.c
new file mode 100644 (file)
index 0000000..9fc918d
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * This is the place to register all trace points as events.
+ */
+
+#include <linux/stringify.h>
+
+#include <trace/trace_events.h>
+
+#include "trace_output.h"
+
+#include "trace_events_stage_1.h"
+#include "trace_events_stage_2.h"
+#include "trace_events_stage_3.h"
+
+#include <trace/trace_event_types.h>
index fdf913d..d33d306 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/sysctl.h>
 #include <linux/ctype.h>
 #include <linux/list.h>
+#include <linux/hash.h>
 
 #include <asm/ftrace.h>
 
                        ftrace_kill();          \
        } while (0)
 
+/* hash bits for specific function selection */
+#define FTRACE_HASH_BITS 7
+#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* set when tracing only a pid */
-struct pid *ftrace_pid_trace;
-static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-
 /* Quick disabling of function tracer. */
 int function_trace_stop;
 
@@ -61,9 +62,7 @@ int function_trace_stop;
  */
 static int ftrace_disabled __read_mostly;
 
-static DEFINE_SPINLOCK(ftrace_lock);
-static DEFINE_MUTEX(ftrace_sysctl_lock);
-static DEFINE_MUTEX(ftrace_start_lock);
+static DEFINE_MUTEX(ftrace_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
@@ -134,9 +133,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-
        ops->next = ftrace_list;
        /*
         * We are entering ops into the ftrace_list but another
@@ -172,18 +168,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
 #endif
        }
 
-       spin_unlock(&ftrace_lock);
-
        return 0;
 }
 
 static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        struct ftrace_ops **p;
-       int ret = 0;
-
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
 
        /*
         * If we are removing the last function, then simply point
@@ -192,17 +182,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_list == ops && ops->next == &ftrace_list_end) {
                ftrace_trace_function = ftrace_stub;
                ftrace_list = &ftrace_list_end;
-               goto out;
+               return 0;
        }
 
        for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
                if (*p == ops)
                        break;
 
-       if (*p != ops) {
-               ret = -1;
-               goto out;
-       }
+       if (*p != ops)
+               return -1;
 
        *p = (*p)->next;
 
@@ -223,21 +211,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
                }
        }
 
- out:
-       spin_unlock(&ftrace_lock);
-
-       return ret;
+       return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
        ftrace_func_t func;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-
        if (ftrace_trace_function == ftrace_stub)
-               goto out;
+               return;
 
        func = ftrace_trace_function;
 
@@ -254,23 +236,29 @@ static void ftrace_update_pid_func(void)
 #else
        __ftrace_trace_function = func;
 #endif
-
- out:
-       spin_unlock(&ftrace_lock);
 }
 
+/* set when tracing only a pid */
+struct pid *ftrace_pid_trace;
+static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
 #ifdef CONFIG_DYNAMIC_FTRACE
+
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
 
-/*
- * Since MCOUNT_ADDR may point to mcount itself, we do not want
- * to get it confused by reading a reference in the code as we
- * are parsing on objcopy output of text. Use a variable for
- * it instead.
- */
-static unsigned long mcount_addr = MCOUNT_ADDR;
+static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
+
+struct ftrace_func_probe {
+       struct hlist_node       node;
+       struct ftrace_probe_ops *ops;
+       unsigned long           flags;
+       unsigned long           ip;
+       void                    *data;
+       struct rcu_head         rcu;
+};
+
 
 enum {
        FTRACE_ENABLE_CALLS             = (1 << 0),
@@ -290,7 +278,7 @@ static DEFINE_MUTEX(ftrace_regex_lock);
 
 struct ftrace_page {
        struct ftrace_page      *next;
-       unsigned long           index;
+       int                     index;
        struct dyn_ftrace       records[];
 };
 
@@ -305,6 +293,19 @@ static struct ftrace_page  *ftrace_pages;
 
 static struct dyn_ftrace *ftrace_free_records;
 
+/*
+ * This is a double for. Do not use 'break' to break out of the loop,
+ * you must use a goto.
+ */
+#define do_for_each_ftrace_rec(pg, rec)                                        \
+       for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
+               int _____i;                                             \
+               for (_____i = 0; _____i < pg->index; _____i++) {        \
+                       rec = &pg->records[_____i];
+
+#define while_for_each_ftrace_rec()            \
+               }                               \
+       }
 
 #ifdef CONFIG_KPROBES
 
@@ -349,23 +350,16 @@ void ftrace_release(void *start, unsigned long size)
        struct ftrace_page *pg;
        unsigned long s = (unsigned long)start;
        unsigned long e = s + size;
-       int i;
 
        if (ftrace_disabled || !start)
                return;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
-
-                       if ((rec->ip >= s) && (rec->ip < e))
-                               ftrace_free_rec(rec);
-               }
-       }
-       spin_unlock(&ftrace_lock);
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+               if ((rec->ip >= s) && (rec->ip < e))
+                       ftrace_free_rec(rec);
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
 }
 
 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -461,10 +455,10 @@ static void ftrace_bug(int failed, unsigned long ip)
 static int
 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 {
-       unsigned long ip, fl;
        unsigned long ftrace_addr;
+       unsigned long ip, fl;
 
-       ftrace_addr = (unsigned long)ftrace_caller;
+       ftrace_addr = (unsigned long)FTRACE_ADDR;
 
        ip = rec->ip;
 
@@ -473,7 +467,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
         * it is not enabled then do nothing.
         *
         * If this record is not to be traced and
-        * it is enabled then disabled it.
+        * it is enabled then disable it.
         *
         */
        if (rec->flags & FTRACE_FL_NOTRACE) {
@@ -493,7 +487,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
                        return 0;
 
-               /* Record is not filtered and is not enabled do nothing */
+               /* Record is not filtered or enabled, do nothing */
                if (!fl)
                        return 0;
 
@@ -515,7 +509,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 
                } else {
 
-                       /* if record is not enabled do nothing */
+                       /* if record is not enabled, do nothing */
                        if (!(rec->flags & FTRACE_FL_ENABLED))
                                return 0;
 
@@ -531,41 +525,40 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 
 static void ftrace_replace_code(int enable)
 {
-       int i, failed;
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int failed;
 
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
-
-                       /*
-                        * Skip over free records and records that have
-                        * failed.
-                        */
-                       if (rec->flags & FTRACE_FL_FREE ||
-                           rec->flags & FTRACE_FL_FAILED)
-                               continue;
+       do_for_each_ftrace_rec(pg, rec) {
+               /*
+                * Skip over free records and records that have
+                * failed.
+                */
+               if (rec->flags & FTRACE_FL_FREE ||
+                   rec->flags & FTRACE_FL_FAILED)
+                       continue;
 
-                       /* ignore updates to this record's mcount site */
-                       if (get_kprobe((void *)rec->ip)) {
-                               freeze_record(rec);
-                               continue;
-                       } else {
-                               unfreeze_record(rec);
-                       }
+               /* ignore updates to this record's mcount site */
+               if (get_kprobe((void *)rec->ip)) {
+                       freeze_record(rec);
+                       continue;
+               } else {
+                       unfreeze_record(rec);
+               }
 
-                       failed = __ftrace_replace_code(rec, enable);
-                       if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
-                               rec->flags |= FTRACE_FL_FAILED;
-                               if ((system_state == SYSTEM_BOOTING) ||
-                                   !core_kernel_text(rec->ip)) {
-                                       ftrace_free_rec(rec);
-                               } else
-                                       ftrace_bug(failed, rec->ip);
-                       }
+               failed = __ftrace_replace_code(rec, enable);
+               if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
+                       rec->flags |= FTRACE_FL_FAILED;
+                       if ((system_state == SYSTEM_BOOTING) ||
+                           !core_kernel_text(rec->ip)) {
+                               ftrace_free_rec(rec);
+                               } else {
+                               ftrace_bug(failed, rec->ip);
+                                       /* Stop processing */
+                                       return;
+                               }
                }
-       }
+       } while_for_each_ftrace_rec();
 }
 
 static int
@@ -576,7 +569,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
 
        ip = rec->ip;
 
-       ret = ftrace_make_nop(mod, rec, mcount_addr);
+       ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
        if (ret) {
                ftrace_bug(ret, ip);
                rec->flags |= FTRACE_FL_FAILED;
@@ -585,6 +578,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
        return 1;
 }
 
+/*
+ * archs can override this function if they must do something
+ * before the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_prepare(void)
+{
+       return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * after the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_post_process(void)
+{
+       return 0;
+}
+
 static int __ftrace_modify_code(void *data)
 {
        int *command = data;
@@ -607,7 +618,17 @@ static int __ftrace_modify_code(void *data)
 
 static void ftrace_run_update_code(int command)
 {
+       int ret;
+
+       ret = ftrace_arch_code_modify_prepare();
+       FTRACE_WARN_ON(ret);
+       if (ret)
+               return;
+
        stop_machine(__ftrace_modify_code, &command, NULL);
+
+       ret = ftrace_arch_code_modify_post_process();
+       FTRACE_WARN_ON(ret);
 }
 
 static ftrace_func_t saved_ftrace_func;
@@ -631,13 +652,10 @@ static void ftrace_startup(int command)
        if (unlikely(ftrace_disabled))
                return;
 
-       mutex_lock(&ftrace_start_lock);
        ftrace_start_up++;
        command |= FTRACE_ENABLE_CALLS;
 
        ftrace_startup_enable(command);
-
-       mutex_unlock(&ftrace_start_lock);
 }
 
 static void ftrace_shutdown(int command)
@@ -645,7 +663,6 @@ static void ftrace_shutdown(int command)
        if (unlikely(ftrace_disabled))
                return;
 
-       mutex_lock(&ftrace_start_lock);
        ftrace_start_up--;
        if (!ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
@@ -656,11 +673,9 @@ static void ftrace_shutdown(int command)
        }
 
        if (!command || !ftrace_enabled)
-               goto out;
+               return;
 
        ftrace_run_update_code(command);
- out:
-       mutex_unlock(&ftrace_start_lock);
 }
 
 static void ftrace_startup_sysctl(void)
@@ -670,7 +685,6 @@ static void ftrace_startup_sysctl(void)
        if (unlikely(ftrace_disabled))
                return;
 
-       mutex_lock(&ftrace_start_lock);
        /* Force update next time */
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
@@ -678,7 +692,6 @@ static void ftrace_startup_sysctl(void)
                command |= FTRACE_ENABLE_CALLS;
 
        ftrace_run_update_code(command);
-       mutex_unlock(&ftrace_start_lock);
 }
 
 static void ftrace_shutdown_sysctl(void)
@@ -688,13 +701,11 @@ static void ftrace_shutdown_sysctl(void)
        if (unlikely(ftrace_disabled))
                return;
 
-       mutex_lock(&ftrace_start_lock);
        /* ftrace_start_up is true if ftrace is running */
        if (ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
 
        ftrace_run_update_code(command);
-       mutex_unlock(&ftrace_start_lock);
 }
 
 static cycle_t         ftrace_update_time;
@@ -781,29 +792,106 @@ enum {
        FTRACE_ITER_CONT        = (1 << 1),
        FTRACE_ITER_NOTRACE     = (1 << 2),
        FTRACE_ITER_FAILURES    = (1 << 3),
+       FTRACE_ITER_PRINTALL    = (1 << 4),
+       FTRACE_ITER_HASH        = (1 << 5),
 };
 
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
 
 struct ftrace_iterator {
        struct ftrace_page      *pg;
-       unsigned                idx;
+       int                     hidx;
+       int                     idx;
        unsigned                flags;
        unsigned char           buffer[FTRACE_BUFF_MAX+1];
        unsigned                buffer_idx;
        unsigned                filtered;
 };
 
+static void *
+t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       struct hlist_node *hnd = v;
+       struct hlist_head *hhd;
+
+       WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
+
+       (*pos)++;
+
+ retry:
+       if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+               return NULL;
+
+       hhd = &ftrace_func_hash[iter->hidx];
+
+       if (hlist_empty(hhd)) {
+               iter->hidx++;
+               hnd = NULL;
+               goto retry;
+       }
+
+       if (!hnd)
+               hnd = hhd->first;
+       else {
+               hnd = hnd->next;
+               if (!hnd) {
+                       iter->hidx++;
+                       goto retry;
+               }
+       }
+
+       return hnd;
+}
+
+static void *t_hash_start(struct seq_file *m, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       void *p = NULL;
+
+       iter->flags |= FTRACE_ITER_HASH;
+
+       return t_hash_next(m, p, pos);
+}
+
+static int t_hash_show(struct seq_file *m, void *v)
+{
+       struct ftrace_func_probe *rec;
+       struct hlist_node *hnd = v;
+       char str[KSYM_SYMBOL_LEN];
+
+       rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+       if (rec->ops->print)
+               return rec->ops->print(m, rec->ip, rec->ops, rec->data);
+
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       seq_printf(m, "%s:", str);
+
+       kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
+       seq_printf(m, "%s", str);
+
+       if (rec->data)
+               seq_printf(m, ":%p", rec->data);
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
 static void *
 t_next(struct seq_file *m, void *v, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        struct dyn_ftrace *rec = NULL;
 
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_next(m, v, pos);
+
        (*pos)++;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
+       if (iter->flags & FTRACE_ITER_PRINTALL)
+               return NULL;
+
  retry:
        if (iter->idx >= iter->pg->index) {
                if (iter->pg->next) {
@@ -832,7 +920,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                        goto retry;
                }
        }
-       spin_unlock(&ftrace_lock);
 
        return rec;
 }
@@ -842,6 +929,23 @@ static void *t_start(struct seq_file *m, loff_t *pos)
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
 
+       mutex_lock(&ftrace_lock);
+       /*
+        * For set_ftrace_filter reading, if we have the filter
+        * off, we can short cut and just print out that all
+        * functions are enabled.
+        */
+       if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
+               if (*pos > 0)
+                       return t_hash_start(m, pos);
+               iter->flags |= FTRACE_ITER_PRINTALL;
+               (*pos)++;
+               return iter;
+       }
+
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_start(m, pos);
+
        if (*pos > 0) {
                if (iter->idx < 0)
                        return p;
@@ -851,18 +955,31 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 
        p = t_next(m, p, pos);
 
+       if (!p)
+               return t_hash_start(m, pos);
+
        return p;
 }
 
 static void t_stop(struct seq_file *m, void *p)
 {
+       mutex_unlock(&ftrace_lock);
 }
 
 static int t_show(struct seq_file *m, void *v)
 {
+       struct ftrace_iterator *iter = m->private;
        struct dyn_ftrace *rec = v;
        char str[KSYM_SYMBOL_LEN];
 
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_show(m, v);
+
+       if (iter->flags & FTRACE_ITER_PRINTALL) {
+               seq_printf(m, "#### all functions enabled ####\n");
+               return 0;
+       }
+
        if (!rec)
                return 0;
 
@@ -941,23 +1058,16 @@ static void ftrace_filter_reset(int enable)
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
        unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-       unsigned i;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
+       mutex_lock(&ftrace_lock);
        if (enable)
                ftrace_filtered = 0;
-       pg = ftrace_pages_start;
-       while (pg) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
-                       if (rec->flags & FTRACE_FL_FAILED)
-                               continue;
-                       rec->flags &= ~type;
-               }
-               pg = pg->next;
-       }
-       spin_unlock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+               rec->flags &= ~type;
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
 }
 
 static int
@@ -1038,86 +1148,536 @@ enum {
        MATCH_END_ONLY,
 };
 
-static void
-ftrace_match(unsigned char *buff, int len, int enable)
+/*
+ * (static function - no need for kernel doc)
+ *
+ * Pass in a buffer containing a glob and this function will
+ * set search to point to the search part of the buffer and
+ * return the type of search it is (see enum above).
+ * This does modify buff.
+ *
+ * Returns enum type.
+ *  search returns the pointer to use for comparison.
+ *  not returns 1 if buff started with a '!'
+ *     0 otherwise.
+ */
+static int
+ftrace_setup_glob(char *buff, int len, char **search, int *not)
 {
-       char str[KSYM_SYMBOL_LEN];
-       char *search = NULL;
-       struct ftrace_page *pg;
-       struct dyn_ftrace *rec;
        int type = MATCH_FULL;
-       unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-       unsigned i, match = 0, search_len = 0;
-       int not = 0;
+       int i;
 
        if (buff[0] == '!') {
-               not = 1;
+               *not = 1;
                buff++;
                len--;
-       }
+       } else
+               *not = 0;
+
+       *search = buff;
 
        for (i = 0; i < len; i++) {
                if (buff[i] == '*') {
                        if (!i) {
-                               search = buff + i + 1;
+                               *search = buff + 1;
                                type = MATCH_END_ONLY;
-                               search_len = len - (i + 1);
                        } else {
-                               if (type == MATCH_END_ONLY) {
+                               if (type == MATCH_END_ONLY)
                                        type = MATCH_MIDDLE_ONLY;
-                               } else {
-                                       match = i;
+                               else
                                        type = MATCH_FRONT_ONLY;
-                               }
                                buff[i] = 0;
                                break;
                        }
                }
        }
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-       if (enable)
-               ftrace_filtered = 1;
-       pg = ftrace_pages_start;
-       while (pg) {
-               for (i = 0; i < pg->index; i++) {
-                       int matched = 0;
-                       char *ptr;
-
-                       rec = &pg->records[i];
-                       if (rec->flags & FTRACE_FL_FAILED)
+       return type;
+}
+
+static int ftrace_match(char *str, char *regex, int len, int type)
+{
+       int matched = 0;
+       char *ptr;
+
+       switch (type) {
+       case MATCH_FULL:
+               if (strcmp(str, regex) == 0)
+                       matched = 1;
+               break;
+       case MATCH_FRONT_ONLY:
+               if (strncmp(str, regex, len) == 0)
+                       matched = 1;
+               break;
+       case MATCH_MIDDLE_ONLY:
+               if (strstr(str, regex))
+                       matched = 1;
+               break;
+       case MATCH_END_ONLY:
+               ptr = strstr(str, regex);
+               if (ptr && (ptr[len] == 0))
+                       matched = 1;
+               break;
+       }
+
+       return matched;
+}
+
+static int
+ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
+{
+       char str[KSYM_SYMBOL_LEN];
+
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       return ftrace_match(str, regex, len, type);
+}
+
+static void ftrace_match_records(char *buff, int len, int enable)
+{
+       unsigned int search_len;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       unsigned long flag;
+       char *search;
+       int type;
+       int not;
+
+       flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+       type = ftrace_setup_glob(buff, len, &search, &not);
+
+       search_len = strlen(search);
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+
+               if (ftrace_match_record(rec, search, search_len, type)) {
+                       if (not)
+                               rec->flags &= ~flag;
+                       else
+                               rec->flags |= flag;
+               }
+               /*
+                * Only enable filtering if we have a function that
+                * is filtered on.
+                */
+               if (enable && (rec->flags & FTRACE_FL_FILTER))
+                       ftrace_filtered = 1;
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
+}
+
+static int
+ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
+                          char *regex, int len, int type)
+{
+       char str[KSYM_SYMBOL_LEN];
+       char *modname;
+
+       kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
+
+       if (!modname || strcmp(modname, mod))
+               return 0;
+
+       /* blank search means to match all funcs in the mod */
+       if (len)
+               return ftrace_match(str, regex, len, type);
+       else
+               return 1;
+}
+
+static void ftrace_match_module_records(char *buff, char *mod, int enable)
+{
+       unsigned search_len = 0;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int type = MATCH_FULL;
+       char *search = buff;
+       unsigned long flag;
+       int not = 0;
+
+       flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+
+       /* blank or '*' mean the same */
+       if (strcmp(buff, "*") == 0)
+               buff[0] = 0;
+
+       /* handle the case of 'dont filter this module' */
+       if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
+               buff[0] = 0;
+               not = 1;
+       }
+
+       if (strlen(buff)) {
+               type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
+               search_len = strlen(search);
+       }
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+
+               if (ftrace_match_module_record(rec, mod,
+                                              search, search_len, type)) {
+                       if (not)
+                               rec->flags &= ~flag;
+                       else
+                               rec->flags |= flag;
+               }
+               if (enable && (rec->flags & FTRACE_FL_FILTER))
+                       ftrace_filtered = 1;
+
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
+}
+
+/*
+ * We register the module command as a template to show others how
+ * to register the a command as well.
+ */
+
+static int
+ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
+{
+       char *mod;
+
+       /*
+        * cmd == 'mod' because we only registered this func
+        * for the 'mod' ftrace_func_command.
+        * But if you register one func with multiple commands,
+        * you can tell which command was used by the cmd
+        * parameter.
+        */
+
+       /* we must have a module name */
+       if (!param)
+               return -EINVAL;
+
+       mod = strsep(&param, ":");
+       if (!strlen(mod))
+               return -EINVAL;
+
+       ftrace_match_module_records(func, mod, enable);
+       return 0;
+}
+
+static struct ftrace_func_command ftrace_mod_cmd = {
+       .name                   = "mod",
+       .func                   = ftrace_mod_callback,
+};
+
+static int __init ftrace_mod_cmd_init(void)
+{
+       return register_ftrace_command(&ftrace_mod_cmd);
+}
+device_initcall(ftrace_mod_cmd_init);
+
+static void
+function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_func_probe *entry;
+       struct hlist_head *hhd;
+       struct hlist_node *n;
+       unsigned long key;
+       int resched;
+
+       key = hash_long(ip, FTRACE_HASH_BITS);
+
+       hhd = &ftrace_func_hash[key];
+
+       if (hlist_empty(hhd))
+               return;
+
+       /*
+        * Disable preemption for these calls to prevent a RCU grace
+        * period. This syncs the hash iteration and freeing of items
+        * on the hash. rcu_read_lock is too dangerous here.
+        */
+       resched = ftrace_preempt_disable();
+       hlist_for_each_entry_rcu(entry, n, hhd, node) {
+               if (entry->ip == ip)
+                       entry->ops->func(ip, parent_ip, &entry->data);
+       }
+       ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_probe_ops __read_mostly =
+{
+       .func = function_trace_probe_call,
+};
+
+static int ftrace_probe_registered;
+
+static void __enable_ftrace_function_probe(void)
+{
+       int i;
+
+       if (ftrace_probe_registered)
+               return;
+
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+               if (hhd->first)
+                       break;
+       }
+       /* Nothing registered? */
+       if (i == FTRACE_FUNC_HASHSIZE)
+               return;
+
+       __register_ftrace_function(&trace_probe_ops);
+       ftrace_startup(0);
+       ftrace_probe_registered = 1;
+}
+
+static void __disable_ftrace_function_probe(void)
+{
+       int i;
+
+       if (!ftrace_probe_registered)
+               return;
+
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+               if (hhd->first)
+                       return;
+       }
+
+       /* no more funcs left */
+       __unregister_ftrace_function(&trace_probe_ops);
+       ftrace_shutdown(0);
+       ftrace_probe_registered = 0;
+}
+
+
+static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+{
+       struct ftrace_func_probe *entry =
+               container_of(rhp, struct ftrace_func_probe, rcu);
+
+       if (entry->ops->free)
+               entry->ops->free(&entry->data);
+       kfree(entry);
+}
+
+
+int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                             void *data)
+{
+       struct ftrace_func_probe *entry;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int type, len, not;
+       unsigned long key;
+       int count = 0;
+       char *search;
+
+       type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+       len = strlen(search);
+
+       /* we do not support '!' for function probes */
+       if (WARN_ON(not))
+               return -EINVAL;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+
+               if (!ftrace_match_record(rec, search, len, type))
+                       continue;
+
+               entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry) {
+                       /* If we did not process any, then return error */
+                       if (!count)
+                               count = -ENOMEM;
+                       goto out_unlock;
+               }
+
+               count++;
+
+               entry->data = data;
+
+               /*
+                * The caller might want to do something special
+                * for each function we find. We call the callback
+                * to give the caller an opportunity to do so.
+                */
+               if (ops->callback) {
+                       if (ops->callback(rec->ip, &entry->data) < 0) {
+                               /* caller does not like this func */
+                               kfree(entry);
                                continue;
-                       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-                       switch (type) {
-                       case MATCH_FULL:
-                               if (strcmp(str, buff) == 0)
-                                       matched = 1;
-                               break;
-                       case MATCH_FRONT_ONLY:
-                               if (memcmp(str, buff, match) == 0)
-                                       matched = 1;
-                               break;
-                       case MATCH_MIDDLE_ONLY:
-                               if (strstr(str, search))
-                                       matched = 1;
-                               break;
-                       case MATCH_END_ONLY:
-                               ptr = strstr(str, search);
-                               if (ptr && (ptr[search_len] == 0))
-                                       matched = 1;
-                               break;
                        }
-                       if (matched) {
-                               if (not)
-                                       rec->flags &= ~flag;
-                               else
-                                       rec->flags |= flag;
+               }
+
+               entry->ops = ops;
+               entry->ip = rec->ip;
+
+               key = hash_long(entry->ip, FTRACE_HASH_BITS);
+               hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
+
+       } while_for_each_ftrace_rec();
+       __enable_ftrace_function_probe();
+
+ out_unlock:
+       mutex_unlock(&ftrace_lock);
+
+       return count;
+}
+
+enum {
+       PROBE_TEST_FUNC         = 1,
+       PROBE_TEST_DATA         = 2
+};
+
+static void
+__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                                 void *data, int flags)
+{
+       struct ftrace_func_probe *entry;
+       struct hlist_node *n, *tmp;
+       char str[KSYM_SYMBOL_LEN];
+       int type = MATCH_FULL;
+       int i, len = 0;
+       char *search;
+
+       if (glob && (strcmp(glob, "*") || !strlen(glob)))
+               glob = NULL;
+       else {
+               int not;
+
+               type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+               len = strlen(search);
+
+               /* we do not support '!' for function probes */
+               if (WARN_ON(not))
+                       return;
+       }
+
+       mutex_lock(&ftrace_lock);
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+
+               hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+
+                       /* break up if statements for readability */
+                       if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
+                               continue;
+
+                       if ((flags & PROBE_TEST_DATA) && entry->data != data)
+                               continue;
+
+                       /* do this last, since it is the most expensive */
+                       if (glob) {
+                               kallsyms_lookup(entry->ip, NULL, NULL,
+                                               NULL, str);
+                               if (!ftrace_match(str, glob, len, type))
+                                       continue;
                        }
+
+                       hlist_del(&entry->node);
+                       call_rcu(&entry->rcu, ftrace_free_entry_rcu);
+               }
+       }
+       __disable_ftrace_function_probe();
+       mutex_unlock(&ftrace_lock);
+}
+
+void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                               void *data)
+{
+       __unregister_ftrace_function_probe(glob, ops, data,
+                                         PROBE_TEST_FUNC | PROBE_TEST_DATA);
+}
+
+void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
+{
+       __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
+}
+
+void unregister_ftrace_function_probe_all(char *glob)
+{
+       __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
+}
+
+static LIST_HEAD(ftrace_commands);
+static DEFINE_MUTEX(ftrace_cmd_mutex);
+
+int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+       struct ftrace_func_command *p;
+       int ret = 0;
+
+       mutex_lock(&ftrace_cmd_mutex);
+       list_for_each_entry(p, &ftrace_commands, list) {
+               if (strcmp(cmd->name, p->name) == 0) {
+                       ret = -EBUSY;
+                       goto out_unlock;
+               }
+       }
+       list_add(&cmd->list, &ftrace_commands);
+ out_unlock:
+       mutex_unlock(&ftrace_cmd_mutex);
+
+       return ret;
+}
+
+int unregister_ftrace_command(struct ftrace_func_command *cmd)
+{
+       struct ftrace_func_command *p, *n;
+       int ret = -ENODEV;
+
+       mutex_lock(&ftrace_cmd_mutex);
+       list_for_each_entry_safe(p, n, &ftrace_commands, list) {
+               if (strcmp(cmd->name, p->name) == 0) {
+                       ret = 0;
+                       list_del_init(&p->list);
+                       goto out_unlock;
+               }
+       }
+ out_unlock:
+       mutex_unlock(&ftrace_cmd_mutex);
+
+       return ret;
+}
+
+static int ftrace_process_regex(char *buff, int len, int enable)
+{
+       char *func, *command, *next = buff;
+       struct ftrace_func_command *p;
+       int ret = -EINVAL;
+
+       func = strsep(&next, ":");
+
+       if (!next) {
+               ftrace_match_records(func, len, enable);
+               return 0;
+       }
+
+       /* command found */
+
+       command = strsep(&next, ":");
+
+       mutex_lock(&ftrace_cmd_mutex);
+       list_for_each_entry(p, &ftrace_commands, list) {
+               if (strcmp(p->name, command) == 0) {
+                       ret = p->func(func, command, next, enable);
+                       goto out_unlock;
                }
-               pg = pg->next;
        }
-       spin_unlock(&ftrace_lock);
+ out_unlock:
+       mutex_unlock(&ftrace_cmd_mutex);
+
+       return ret;
 }
 
 static ssize_t
@@ -1187,7 +1747,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        if (isspace(ch)) {
                iter->filtered++;
                iter->buffer[iter->buffer_idx] = 0;
-               ftrace_match(iter->buffer, iter->buffer_idx, enable);
+               ret = ftrace_process_regex(iter->buffer,
+                                          iter->buffer_idx, enable);
+               if (ret)
+                       goto out;
                iter->buffer_idx = 0;
        } else
                iter->flags |= FTRACE_ITER_CONT;
@@ -1226,7 +1789,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
        if (reset)
                ftrace_filter_reset(enable);
        if (buf)
-               ftrace_match(buf, len, enable);
+               ftrace_match_records(buf, len, enable);
        mutex_unlock(&ftrace_regex_lock);
 }
 
@@ -1276,15 +1839,13 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
        if (iter->buffer_idx) {
                iter->filtered++;
                iter->buffer[iter->buffer_idx] = 0;
-               ftrace_match(iter->buffer, iter->buffer_idx, enable);
+               ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
        }
 
-       mutex_lock(&ftrace_sysctl_lock);
-       mutex_lock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
        if (ftrace_start_up && ftrace_enabled)
                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-       mutex_unlock(&ftrace_start_lock);
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
 
        kfree(iter);
        mutex_unlock(&ftrace_regex_lock);
@@ -1303,21 +1864,21 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
        return ftrace_regex_release(inode, file, 0);
 }
 
-static struct file_operations ftrace_avail_fops = {
+static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_avail_open,
        .read = seq_read,
        .llseek = seq_lseek,
        .release = ftrace_avail_release,
 };
 
-static struct file_operations ftrace_failures_fops = {
+static const struct file_operations ftrace_failures_fops = {
        .open = ftrace_failures_open,
        .read = seq_read,
        .llseek = seq_lseek,
        .release = ftrace_avail_release,
 };
 
-static struct file_operations ftrace_filter_fops = {
+static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = ftrace_regex_read,
        .write = ftrace_filter_write,
@@ -1325,7 +1886,7 @@ static struct file_operations ftrace_filter_fops = {
        .release = ftrace_filter_release,
 };
 
-static struct file_operations ftrace_notrace_fops = {
+static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = ftrace_regex_read,
        .write = ftrace_notrace_write,
@@ -1360,6 +1921,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)
 
        mutex_lock(&graph_lock);
 
+       /* Nothing, tell g_show to print all functions are enabled */
+       if (!ftrace_graph_count && !*pos)
+               return (void *)1;
+
        p = g_next(m, p, pos);
 
        return p;
@@ -1378,6 +1943,11 @@ static int g_show(struct seq_file *m, void *v)
        if (!ptr)
                return 0;
 
+       if (ptr == (unsigned long *)1) {
+               seq_printf(m, "#### all functions enabled ####\n");
+               return 0;
+       }
+
        kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
 
        seq_printf(m, "%s\n", str);
@@ -1431,42 +2001,52 @@ ftrace_graph_read(struct file *file, char __user *ubuf,
 }
 
 static int
-ftrace_set_func(unsigned long *array, int idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 {
-       char str[KSYM_SYMBOL_LEN];
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int search_len;
        int found = 0;
-       int i, j;
+       int type, not;
+       char *search;
+       bool exists;
+       int i;
 
        if (ftrace_disabled)
                return -ENODEV;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
+       /* decode regex */
+       type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+       if (not)
+               return -EINVAL;
 
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
+       search_len = strlen(search);
 
-                       if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
-                               continue;
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+                       break;
+
+               if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
+                       continue;
 
-                       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-                       if (strcmp(str, buffer) == 0) {
+               if (ftrace_match_record(rec, search, search_len, type)) {
+                       /* ensure it is not already in the array */
+                       exists = false;
+                       for (i = 0; i < *idx; i++)
+                               if (array[i] == rec->ip) {
+                                       exists = true;
+                                       break;
+                               }
+                       if (!exists) {
+                               array[(*idx)++] = rec->ip;
                                found = 1;
-                               for (j = 0; j < idx; j++)
-                                       if (array[j] == rec->ip) {
-                                               found = 0;
-                                               break;
-                                       }
-                               if (found)
-                                       array[idx] = rec->ip;
-                               break;
                        }
                }
-       }
-       spin_unlock(&ftrace_lock);
+       } while_for_each_ftrace_rec();
+
+       mutex_unlock(&ftrace_lock);
 
        return found ? 0 : -EINVAL;
 }
@@ -1534,13 +2114,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
        }
        buffer[index] = 0;
 
-       /* we allow only one at a time */
-       ret = ftrace_set_func(array, ftrace_graph_count, buffer);
+       /* we allow only one expression at a time */
+       ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
        if (ret)
                goto out;
 
-       ftrace_graph_count++;
-
        file->f_pos += read;
 
        ret = read;
@@ -1604,7 +2182,7 @@ static int ftrace_convert_nops(struct module *mod,
        unsigned long addr;
        unsigned long flags;
 
-       mutex_lock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
        p = start;
        while (p < end) {
                addr = ftrace_call_adjust(*p++);
@@ -1623,7 +2201,7 @@ static int ftrace_convert_nops(struct module *mod,
        local_irq_save(flags);
        ftrace_update_code(mod);
        local_irq_restore(flags);
-       mutex_unlock(&ftrace_start_lock);
+       mutex_unlock(&ftrace_lock);
 
        return 0;
 }
@@ -1796,7 +2374,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        if (ret < 0)
                return ret;
 
-       mutex_lock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
        if (val < 0) {
                /* disable pid tracing */
                if (!ftrace_pid_trace)
@@ -1835,12 +2413,12 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        ftrace_startup_enable(0);
 
  out:
-       mutex_unlock(&ftrace_start_lock);
+       mutex_unlock(&ftrace_lock);
 
        return cnt;
 }
 
-static struct file_operations ftrace_pid_fops = {
+static const struct file_operations ftrace_pid_fops = {
        .read = ftrace_pid_read,
        .write = ftrace_pid_write,
 };
@@ -1863,7 +2441,6 @@ static __init int ftrace_init_debugfs(void)
                           "'set_ftrace_pid' entry\n");
        return 0;
 }
-
 fs_initcall(ftrace_init_debugfs);
 
 /**
@@ -1898,17 +2475,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
        if (unlikely(ftrace_disabled))
                return -1;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
        ret = __register_ftrace_function(ops);
        ftrace_startup(0);
 
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
        return ret;
 }
 
 /**
- * unregister_ftrace_function - unresgister a function for profiling.
+ * unregister_ftrace_function - unregister a function for profiling.
  * @ops - ops structure that holds the function to unregister
  *
  * Unregister a function that was added to be called by ftrace profiling.
@@ -1917,10 +2494,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
        ret = __unregister_ftrace_function(ops);
        ftrace_shutdown(0);
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
 
        return ret;
 }
@@ -1935,7 +2512,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
        ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
 
@@ -1964,7 +2541,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        }
 
  out:
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
        return ret;
 }
 
@@ -2080,7 +2657,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
 {
        int ret = 0;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
@@ -2098,13 +2675,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_startup(FTRACE_START_FUNC_RET);
 
 out:
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
        return ret;
 }
 
 void unregister_ftrace_graph(void)
 {
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
        atomic_dec(&ftrace_graph_active);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
@@ -2112,7 +2689,7 @@ void unregister_ftrace_graph(void)
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
 
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
 }
 
 /* Allocate a return stack for newly created task */
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
new file mode 100644 (file)
index 0000000..ae201b3
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * Memory allocator tracing
+ *
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ */
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <trace/kmemtrace.h>
+
+#include "trace.h"
+#include "trace_output.h"
+
+/* Select an alternative, minimalistic output than the original one */
+#define TRACE_KMEM_OPT_MINIMAL 0x1
+
+static struct tracer_opt kmem_opts[] = {
+       /* Default disable the minimalistic output */
+       { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
+       { }
+};
+
+static struct tracer_flags kmem_tracer_flags = {
+       .val = 0,
+       .opts = kmem_opts
+};
+
+
+static bool kmem_tracing_enabled __read_mostly;
+static struct trace_array *kmemtrace_array;
+
+static int kmem_trace_init(struct trace_array *tr)
+{
+       int cpu;
+       kmemtrace_array = tr;
+
+       for_each_cpu_mask(cpu, cpu_possible_map)
+               tracing_reset(tr, cpu);
+
+       kmem_tracing_enabled = true;
+
+       return 0;
+}
+
+static void kmem_trace_reset(struct trace_array *tr)
+{
+       kmem_tracing_enabled = false;
+}
+
+static void kmemtrace_headers(struct seq_file *s)
+{
+       /* Don't need headers for the original kmemtrace output */
+       if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
+               return;
+
+       seq_printf(s, "#\n");
+       seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
+                       "      POINTER         NODE    CALLER\n");
+       seq_printf(s, "# FREE   |      |     |       |       "
+                       "       |   |            |        |\n");
+       seq_printf(s, "# |\n\n");
+}
+
+/*
+ * The two following functions give the original output from kmemtrace,
+ * or something close to....perhaps they need some missing things
+ */
+static enum print_line_t
+kmemtrace_print_alloc_original(struct trace_iterator *iter,
+                               struct kmemtrace_alloc_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Taken from the old linux/kmemtrace.h */
+       ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
+         "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
+          entry->type_id, entry->call_site, (unsigned long) entry->ptr,
+          (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
+          (unsigned long) entry->gfp_flags, entry->node);
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_original(struct trace_iterator *iter,
+                               struct kmemtrace_free_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Taken from the old linux/kmemtrace.h */
+       ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
+          entry->type_id, entry->call_site, (unsigned long) entry->ptr);
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+
+/* The two other following provide a more minimalistic output */
+static enum print_line_t
+kmemtrace_print_alloc_compress(struct trace_iterator *iter,
+                                       struct kmemtrace_alloc_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Alloc entry */
+       ret = trace_seq_printf(s, "  +      ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Type */
+       switch (entry->type_id) {
+       case KMEMTRACE_TYPE_KMALLOC:
+               ret = trace_seq_printf(s, "K   ");
+               break;
+       case KMEMTRACE_TYPE_CACHE:
+               ret = trace_seq_printf(s, "C   ");
+               break;
+       case KMEMTRACE_TYPE_PAGES:
+               ret = trace_seq_printf(s, "P   ");
+               break;
+       default:
+               ret = trace_seq_printf(s, "?   ");
+       }
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Requested */
+       ret = trace_seq_printf(s, "%4zu   ", entry->bytes_req);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Allocated */
+       ret = trace_seq_printf(s, "%4zu   ", entry->bytes_alloc);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Flags
+        * TODO: would be better to see the name of the GFP flag names
+        */
+       ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Pointer to allocated */
+       ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Node */
+       ret = trace_seq_printf(s, "%4d   ", entry->node);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Call site */
+       ret = seq_print_ip_sym(s, entry->call_site, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!trace_seq_printf(s, "\n"))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_compress(struct trace_iterator *iter,
+                               struct kmemtrace_free_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Free entry */
+       ret = trace_seq_printf(s, "  -      ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Type */
+       switch (entry->type_id) {
+       case KMEMTRACE_TYPE_KMALLOC:
+               ret = trace_seq_printf(s, "K     ");
+               break;
+       case KMEMTRACE_TYPE_CACHE:
+               ret = trace_seq_printf(s, "C     ");
+               break;
+       case KMEMTRACE_TYPE_PAGES:
+               ret = trace_seq_printf(s, "P     ");
+               break;
+       default:
+               ret = trace_seq_printf(s, "?     ");
+       }
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Skip requested/allocated/flags */
+       ret = trace_seq_printf(s, "                       ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Pointer to allocated */
+       ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Skip node */
+       ret = trace_seq_printf(s, "       ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Call site */
+       ret = seq_print_ip_sym(s, entry->call_site, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!trace_seq_printf(s, "\n"))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
+{
+       struct trace_entry *entry = iter->ent;
+
+       switch (entry->type) {
+       case TRACE_KMEM_ALLOC: {
+               struct kmemtrace_alloc_entry *field;
+               trace_assign_type(field, entry);
+               if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
+                       return kmemtrace_print_alloc_compress(iter, field);
+               else
+                       return kmemtrace_print_alloc_original(iter, field);
+       }
+
+       case TRACE_KMEM_FREE: {
+               struct kmemtrace_free_entry *field;
+               trace_assign_type(field, entry);
+               if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
+                       return kmemtrace_print_free_compress(iter, field);
+               else
+                       return kmemtrace_print_free_original(iter, field);
+       }
+
+       default:
+               return TRACE_TYPE_UNHANDLED;
+       }
+}
+
+/* Trace allocations */
+void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                            unsigned long call_site,
+                            const void *ptr,
+                            size_t bytes_req,
+                            size_t bytes_alloc,
+                            gfp_t gfp_flags,
+                            int node)
+{
+       struct ring_buffer_event *event;
+       struct kmemtrace_alloc_entry *entry;
+       struct trace_array *tr = kmemtrace_array;
+
+       if (!kmem_tracing_enabled)
+               return;
+
+       event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
+                                         sizeof(*entry), 0, 0);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+
+       entry->call_site = call_site;
+       entry->ptr = ptr;
+       entry->bytes_req = bytes_req;
+       entry->bytes_alloc = bytes_alloc;
+       entry->gfp_flags = gfp_flags;
+       entry->node     =       node;
+
+       trace_buffer_unlock_commit(tr, event, 0, 0);
+}
+EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
+
+void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                      unsigned long call_site,
+                      const void *ptr)
+{
+       struct ring_buffer_event *event;
+       struct kmemtrace_free_entry *entry;
+       struct trace_array *tr = kmemtrace_array;
+
+       if (!kmem_tracing_enabled)
+               return;
+
+       event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
+                                         sizeof(*entry), 0, 0);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       entry->type_id  = type_id;
+       entry->call_site = call_site;
+       entry->ptr = ptr;
+
+       trace_buffer_unlock_commit(tr, event, 0, 0);
+}
+EXPORT_SYMBOL(kmemtrace_mark_free);
+
+static struct tracer kmem_tracer __read_mostly = {
+       .name           = "kmemtrace",
+       .init           = kmem_trace_init,
+       .reset          = kmem_trace_reset,
+       .print_line     = kmemtrace_print_line,
+       .print_header = kmemtrace_headers,
+       .flags          = &kmem_tracer_flags
+};
+
+void kmemtrace_init(void)
+{
+       /* earliest opportunity to start kmem tracing */
+}
+
+static int __init init_kmem_tracer(void)
+{
+       return register_tracer(&kmem_tracer);
+}
+
+device_initcall(init_kmem_tracer);
index bd38c5c..1788584 100644 (file)
@@ -4,13 +4,15 @@
  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  */
 #include <linux/ring_buffer.h>
+#include <linux/trace_clock.h>
+#include <linux/ftrace_irq.h>
 #include <linux/spinlock.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
+#include <linux/hardirq.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
-#include <linux/sched.h>       /* used for sched_clock() (for now) */
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/list.h>
@@ -57,7 +59,9 @@ enum {
        RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
 };
 
-static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
+static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
+
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 
 /**
  * tracing_on - enable all tracing buffers
@@ -89,26 +93,34 @@ EXPORT_SYMBOL_GPL(tracing_off);
  * tracing_off_permanent - permanently disable ring buffers
  *
  * This function, once called, will disable all ring buffers
- * permanenty.
+ * permanently.
  */
 void tracing_off_permanent(void)
 {
        set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 }
 
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+       return ring_buffer_flags == RB_BUFFERS_ON;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
 #include "trace.h"
 
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 
-/* FIXME!!! */
 u64 ring_buffer_time_stamp(int cpu)
 {
        u64 time;
 
        preempt_disable_notrace();
        /* shift to debug/test normalization and TIME_EXTENTS */
-       time = sched_clock() << DEBUG_SHIFT;
+       time = trace_clock_local() << DEBUG_SHIFT;
        preempt_enable_no_resched_notrace();
 
        return time;
@@ -122,9 +134,8 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 
-#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
-#define RB_ALIGNMENT_SHIFT     2
-#define RB_ALIGNMENT           (1 << RB_ALIGNMENT_SHIFT)
+#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
+#define RB_ALIGNMENT           4U
 #define RB_MAX_SMALL_DATA      28
 
 enum {
@@ -133,7 +144,7 @@ enum {
 };
 
 /* inline for ring buffer fast paths */
-static inline unsigned
+static unsigned
 rb_event_length(struct ring_buffer_event *event)
 {
        unsigned length;
@@ -151,7 +162,7 @@ rb_event_length(struct ring_buffer_event *event)
 
        case RINGBUF_TYPE_DATA:
                if (event->len)
-                       length = event->len << RB_ALIGNMENT_SHIFT;
+                       length = event->len * RB_ALIGNMENT;
                else
                        length = event->array[0];
                return length + RB_EVNT_HDR_SIZE;
@@ -179,7 +190,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 
 /* inline for ring buffer fast paths */
-static inline void *
+static void *
 rb_event_data(struct ring_buffer_event *event)
 {
        BUG_ON(event->type != RINGBUF_TYPE_DATA);
@@ -209,7 +220,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 
 struct buffer_data_page {
        u64              time_stamp;    /* page time stamp */
-       local_t          commit;        /* write commited index */
+       local_t          commit;        /* write committed index */
        unsigned char    data[];        /* data of buffer page */
 };
 
@@ -225,14 +236,25 @@ static void rb_init_page(struct buffer_data_page *bpage)
        local_set(&bpage->commit, 0);
 }
 
+/**
+ * ring_buffer_page_len - the size of data on the page.
+ * @page: The page to read
+ *
+ * Returns the amount of data on the page, including buffer page header.
+ */
+size_t ring_buffer_page_len(void *page)
+{
+       return local_read(&((struct buffer_data_page *)page)->commit)
+               + BUF_PAGE_HDR_SIZE;
+}
+
 /*
  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  * this issue out.
  */
-static inline void free_buffer_page(struct buffer_page *bpage)
+static void free_buffer_page(struct buffer_page *bpage)
 {
-       if (bpage->page)
-               free_page((unsigned long)bpage->page);
+       free_page((unsigned long)bpage->page);
        kfree(bpage);
 }
 
@@ -246,7 +268,7 @@ static inline int test_time_stamp(u64 delta)
        return 0;
 }
 
-#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
+#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 
 /*
  * head_page == tail_page && head == tail then buffer is empty.
@@ -260,7 +282,7 @@ struct ring_buffer_per_cpu {
        struct list_head                pages;
        struct buffer_page              *head_page;     /* read from head */
        struct buffer_page              *tail_page;     /* write to tail */
-       struct buffer_page              *commit_page;   /* commited pages */
+       struct buffer_page              *commit_page;   /* committed pages */
        struct buffer_page              *reader_page;
        unsigned long                   overrun;
        unsigned long                   entries;
@@ -273,8 +295,8 @@ struct ring_buffer {
        unsigned                        pages;
        unsigned                        flags;
        int                             cpus;
-       cpumask_var_t                   cpumask;
        atomic_t                        record_disabled;
+       cpumask_var_t                   cpumask;
 
        struct mutex                    mutex;
 
@@ -303,7 +325,7 @@ struct ring_buffer_iter {
  * check_pages - integrity check of buffer pages
  * @cpu_buffer: CPU buffer with pages to test
  *
- * As a safty measure we check to make sure the data pages have not
+ * As a safety measure we check to make sure the data pages have not
  * been corrupted.
  */
 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
@@ -811,7 +833,7 @@ rb_event_index(struct ring_buffer_event *event)
        return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
 }
 
-static inline int
+static int
 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
             struct ring_buffer_event *event)
 {
@@ -825,7 +847,7 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
                rb_commit_index(cpu_buffer) == index;
 }
 
-static inline void
+static void
 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
                    struct ring_buffer_event *event)
 {
@@ -850,7 +872,7 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
        local_set(&cpu_buffer->commit_page->page->commit, index);
 }
 
-static inline void
+static void
 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
 {
        /*
@@ -896,7 +918,7 @@ static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        cpu_buffer->reader_page->read = 0;
 }
 
-static inline void rb_inc_iter(struct ring_buffer_iter *iter)
+static void rb_inc_iter(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
 
@@ -926,7 +948,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
  * and with this, we can determine what to place into the
  * data field.
  */
-static inline void
+static void
 rb_update_event(struct ring_buffer_event *event,
                         unsigned type, unsigned length)
 {
@@ -938,15 +960,11 @@ rb_update_event(struct ring_buffer_event *event,
                break;
 
        case RINGBUF_TYPE_TIME_EXTEND:
-               event->len =
-                       (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
-                       >> RB_ALIGNMENT_SHIFT;
+               event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
                break;
 
        case RINGBUF_TYPE_TIME_STAMP:
-               event->len =
-                       (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
-                       >> RB_ALIGNMENT_SHIFT;
+               event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
                break;
 
        case RINGBUF_TYPE_DATA:
@@ -955,16 +973,14 @@ rb_update_event(struct ring_buffer_event *event,
                        event->len = 0;
                        event->array[0] = length;
                } else
-                       event->len =
-                               (length + (RB_ALIGNMENT-1))
-                               >> RB_ALIGNMENT_SHIFT;
+                       event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
                break;
        default:
                BUG();
        }
 }
 
-static inline unsigned rb_calculate_event_length(unsigned length)
+static unsigned rb_calculate_event_length(unsigned length)
 {
        struct ring_buffer_event event; /* Used only for sizeof array */
 
@@ -990,6 +1006,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        struct ring_buffer *buffer = cpu_buffer->buffer;
        struct ring_buffer_event *event;
        unsigned long flags;
+       bool lock_taken = false;
 
        commit_page = cpu_buffer->commit_page;
        /* we just need to protect against interrupts */
@@ -1003,7 +1020,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                struct buffer_page *next_page = tail_page;
 
                local_irq_save(flags);
-               __raw_spin_lock(&cpu_buffer->lock);
+               /*
+                * Since the write to the buffer is still not
+                * fully lockless, we must be careful with NMIs.
+                * The locks in the writers are taken when a write
+                * crosses to a new page. The locks protect against
+                * races with the readers (this will soon be fixed
+                * with a lockless solution).
+                *
+                * Because we can not protect against NMIs, and we
+                * want to keep traces reentrant, we need to manage
+                * what happens when we are in an NMI.
+                *
+                * NMIs can happen after we take the lock.
+                * If we are in an NMI, only take the lock
+                * if it is not already taken. Otherwise
+                * simply fail.
+                */
+               if (unlikely(in_nmi())) {
+                       if (!__raw_spin_trylock(&cpu_buffer->lock))
+                               goto out_reset;
+               } else
+                       __raw_spin_lock(&cpu_buffer->lock);
+
+               lock_taken = true;
 
                rb_inc_page(cpu_buffer, &next_page);
 
@@ -1012,7 +1052,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
                /* we grabbed the lock before incrementing */
                if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
-                       goto out_unlock;
+                       goto out_reset;
 
                /*
                 * If for some reason, we had an interrupt storm that made
@@ -1021,12 +1061,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                 */
                if (unlikely(next_page == commit_page)) {
                        WARN_ON_ONCE(1);
-                       goto out_unlock;
+                       goto out_reset;
                }
 
                if (next_page == head_page) {
                        if (!(buffer->flags & RB_FL_OVERWRITE))
-                               goto out_unlock;
+                               goto out_reset;
 
                        /* tail_page has not moved yet? */
                        if (tail_page == cpu_buffer->tail_page) {
@@ -1100,12 +1140,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
        return event;
 
- out_unlock:
+ out_reset:
        /* reset write */
        if (tail <= BUF_PAGE_SIZE)
                local_set(&tail_page->write, tail);
 
-       __raw_spin_unlock(&cpu_buffer->lock);
+       if (likely(lock_taken))
+               __raw_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
        return NULL;
 }
@@ -1265,7 +1306,6 @@ static DEFINE_PER_CPU(int, rb_need_resched);
  * ring_buffer_lock_reserve - reserve a part of the buffer
  * @buffer: the ring buffer to reserve from
  * @length: the length of the data to reserve (excluding event header)
- * @flags: a pointer to save the interrupt flags
  *
  * Returns a reseverd event on the ring buffer to copy directly to.
  * The user of this interface will need to get the body to write into
@@ -1278,9 +1318,7 @@ static DEFINE_PER_CPU(int, rb_need_resched);
  * If NULL is returned, then nothing has been allocated or locked.
  */
 struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
-                        unsigned long length,
-                        unsigned long *flags)
+ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
@@ -1347,15 +1385,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  * ring_buffer_unlock_commit - commit a reserved
  * @buffer: The buffer to commit to
  * @event: The event pointer to commit.
- * @flags: the interrupt flags received from ring_buffer_lock_reserve.
  *
  * This commits the data to the ring buffer, and releases any locks held.
  *
  * Must be paired with ring_buffer_lock_reserve.
  */
 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
-                             struct ring_buffer_event *event,
-                             unsigned long flags)
+                             struct ring_buffer_event *event)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        int cpu = raw_smp_processor_id();
@@ -1438,7 +1474,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 }
 EXPORT_SYMBOL_GPL(ring_buffer_write);
 
-static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
+static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
 {
        struct buffer_page *reader = cpu_buffer->reader_page;
        struct buffer_page *head = cpu_buffer->head_page;
@@ -2277,9 +2313,24 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
        if (buffer_a->pages != buffer_b->pages)
                return -EINVAL;
 
+       if (ring_buffer_flags != RB_BUFFERS_ON)
+               return -EAGAIN;
+
+       if (atomic_read(&buffer_a->record_disabled))
+               return -EAGAIN;
+
+       if (atomic_read(&buffer_b->record_disabled))
+               return -EAGAIN;
+
        cpu_buffer_a = buffer_a->buffers[cpu];
        cpu_buffer_b = buffer_b->buffers[cpu];
 
+       if (atomic_read(&cpu_buffer_a->record_disabled))
+               return -EAGAIN;
+
+       if (atomic_read(&cpu_buffer_b->record_disabled))
+               return -EAGAIN;
+
        /*
         * We can't do a synchronize_sched here because this
         * function can be called in atomic context.
@@ -2303,13 +2354,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
 
 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
-                             struct buffer_data_page *bpage)
+                             struct buffer_data_page *bpage,
+                             unsigned int offset)
 {
        struct ring_buffer_event *event;
        unsigned long head;
 
        __raw_spin_lock(&cpu_buffer->lock);
-       for (head = 0; head < local_read(&bpage->commit);
+       for (head = offset; head < local_read(&bpage->commit);
             head += rb_event_length(event)) {
 
                event = __rb_data_page_index(bpage, head);
@@ -2340,8 +2392,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
  */
 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
 {
-       unsigned long addr;
        struct buffer_data_page *bpage;
+       unsigned long addr;
 
        addr = __get_free_page(GFP_KERNEL);
        if (!addr)
@@ -2349,6 +2401,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
 
        bpage = (void *)addr;
 
+       rb_init_page(bpage);
+
        return bpage;
 }
 
@@ -2368,6 +2422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  * ring_buffer_read_page - extract a page from the ring buffer
  * @buffer: buffer to extract from
  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
+ * @len: amount to extract
  * @cpu: the cpu of the buffer to extract
  * @full: should the extraction only happen when the page is full.
  *
@@ -2377,12 +2432,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  * to swap with a page in the ring buffer.
  *
  * for example:
- *     rpage = ring_buffer_alloc_page(buffer);
+ *     rpage = ring_buffer_alloc_read_page(buffer);
  *     if (!rpage)
  *             return error;
- *     ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
- *     if (ret)
- *             process_page(rpage);
+ *     ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+ *     if (ret >= 0)
+ *             process_page(rpage, ret);
  *
  * When @full is set, the function will not return true unless
  * the writer is off the reader page.
@@ -2393,69 +2448,111 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  *  responsible for that.
  *
  * Returns:
- *  1 if data has been transferred
- *  0 if no data has been transferred.
+ *  >=0 if data has been transferred, returns the offset of consumed data.
+ *  <0 if no data has been transferred.
  */
 int ring_buffer_read_page(struct ring_buffer *buffer,
-                           void **data_page, int cpu, int full)
+                         void **data_page, size_t len, int cpu, int full)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct ring_buffer_event *event;
        struct buffer_data_page *bpage;
+       struct buffer_page *reader;
        unsigned long flags;
-       int ret = 0;
+       unsigned int commit;
+       unsigned int read;
+       u64 save_timestamp;
+       int ret = -1;
+
+       /*
+        * If len is not big enough to hold the page header, then
+        * we can not copy anything.
+        */
+       if (len <= BUF_PAGE_HDR_SIZE)
+               return -1;
+
+       len -= BUF_PAGE_HDR_SIZE;
 
        if (!data_page)
-               return 0;
+               return -1;
 
        bpage = *data_page;
        if (!bpage)
-               return 0;
+               return -1;
 
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
-       /*
-        * rb_buffer_peek will get the next ring buffer if
-        * the current reader page is empty.
-        */
-       event = rb_buffer_peek(buffer, cpu, NULL);
-       if (!event)
+       reader = rb_get_reader_page(cpu_buffer);
+       if (!reader)
                goto out;
 
-       /* check for data */
-       if (!local_read(&cpu_buffer->reader_page->page->commit))
-               goto out;
+       event = rb_reader_event(cpu_buffer);
+
+       read = reader->read;
+       commit = rb_page_commit(reader);
+
        /*
-        * If the writer is already off of the read page, then simply
-        * switch the read page with the given page. Otherwise
-        * we need to copy the data from the reader to the writer.
+        * If this page has been partially read or
+        * if len is not big enough to read the rest of the page or
+        * a writer is still on the page, then
+        * we must copy the data from the page to the buffer.
+        * Otherwise, we can simply swap the page with the one passed in.
         */
-       if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
-               unsigned int read = cpu_buffer->reader_page->read;
+       if (read || (len < (commit - read)) ||
+           cpu_buffer->reader_page == cpu_buffer->commit_page) {
+               struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
+               unsigned int rpos = read;
+               unsigned int pos = 0;
+               unsigned int size;
 
                if (full)
                        goto out;
-               /* The writer is still on the reader page, we must copy */
-               bpage = cpu_buffer->reader_page->page;
-               memcpy(bpage->data,
-                      cpu_buffer->reader_page->page->data + read,
-                      local_read(&bpage->commit) - read);
 
-               /* consume what was read */
-               cpu_buffer->reader_page += read;
+               if (len > (commit - read))
+                       len = (commit - read);
 
+               size = rb_event_length(event);
+
+               if (len < size)
+                       goto out;
+
+               /* save the current timestamp, since the user will need it */
+               save_timestamp = cpu_buffer->read_stamp;
+
+               /* Need to copy one event at a time */
+               do {
+                       memcpy(bpage->data + pos, rpage->data + rpos, size);
+
+                       len -= size;
+
+                       rb_advance_reader(cpu_buffer);
+                       rpos = reader->read;
+                       pos += size;
+
+                       event = rb_reader_event(cpu_buffer);
+                       size = rb_event_length(event);
+               } while (len > size);
+
+               /* update bpage */
+               local_set(&bpage->commit, pos);
+               bpage->time_stamp = save_timestamp;
+
+               /* we copied everything to the beginning */
+               read = 0;
        } else {
                /* swap the pages */
                rb_init_page(bpage);
-               bpage = cpu_buffer->reader_page->page;
-               cpu_buffer->reader_page->page = *data_page;
-               cpu_buffer->reader_page->read = 0;
+               bpage = reader->page;
+               reader->page = *data_page;
+               local_set(&reader->write, 0);
+               reader->read = 0;
                *data_page = bpage;
+
+               /* update the entry counter */
+               rb_remove_entries(cpu_buffer, bpage, read);
        }
-       ret = 1;
+       ret = read;
 
-       /* update the entry counter */
-       rb_remove_entries(cpu_buffer, bpage);
  out:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
@@ -2466,7 +2563,7 @@ static ssize_t
 rb_simple_read(struct file *filp, char __user *ubuf,
               size_t cnt, loff_t *ppos)
 {
-       long *p = filp->private_data;
+       unsigned long *p = filp->private_data;
        char buf[64];
        int r;
 
@@ -2482,9 +2579,9 @@ static ssize_t
 rb_simple_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
 {
-       long *p = filp->private_data;
+       unsigned long *p = filp->private_data;
        char buf[64];
-       long val;
+       unsigned long val;
        int ret;
 
        if (cnt >= sizeof(buf))
@@ -2509,7 +2606,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static struct file_operations rb_simple_fops = {
+static const struct file_operations rb_simple_fops = {
        .open           = tracing_open_generic,
        .read           = rb_simple_read,
        .write          = rb_simple_write,
index 17bb88d..e5b5619 100644 (file)
  *  Copyright (C) 2004-2006 Ingo Molnar
  *  Copyright (C) 2004 William Lee Irwin III
  */
+#include <linux/ring_buffer.h>
 #include <linux/utsrelease.h>
+#include <linux/stacktrace.h>
+#include <linux/writeback.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/notifier.h>
+#include <linux/irqflags.h>
 #include <linux/debugfs.h>
 #include <linux/pagemap.h>
 #include <linux/hardirq.h>
 #include <linux/linkage.h>
 #include <linux/uaccess.h>
+#include <linux/kprobes.h>
 #include <linux/ftrace.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
+#include <linux/splice.h>
 #include <linux/kdebug.h>
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/poll.h>
 #include <linux/gfp.h>
 #include <linux/fs.h>
-#include <linux/kprobes.h>
-#include <linux/writeback.h>
-
-#include <linux/stacktrace.h>
-#include <linux/ring_buffer.h>
-#include <linux/irqflags.h>
 
 #include "trace.h"
+#include "trace_output.h"
 
 #define TRACE_BUFFER_FLAGS     (RB_FL_OVERWRITE)
 
@@ -47,11 +48,16 @@ unsigned long __read_mostly tracing_thresh;
  * We need to change this state when a selftest is running.
  * A selftest will lurk into the ring-buffer to count the
  * entries inserted during the selftest although some concurrent
- * insertions into the ring-buffer such as ftrace_printk could occurred
+ * insertions into the ring-buffer such as trace_printk could occurred
  * at the same time, giving false positive or negative results.
  */
 static bool __read_mostly tracing_selftest_running;
 
+/*
+ * If a tracer is running, we do not want to run SELFTEST.
+ */
+static bool __read_mostly tracing_selftest_disabled;
+
 /* For tracers that don't implement custom flags */
 static struct tracer_opt dummy_tracer_opt[] = {
        { }
@@ -73,7 +79,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
  * of the tracer is successful. But that is the only place that sets
  * this back to zero.
  */
-int tracing_disabled = 1;
+static int tracing_disabled = 1;
 
 static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
 
@@ -91,6 +97,9 @@ static inline void ftrace_enable_cpu(void)
 
 static cpumask_var_t __read_mostly     tracing_buffer_mask;
 
+/* Define which cpu buffers are currently read in trace_pipe */
+static cpumask_var_t                   tracing_reader_cpumask;
+
 #define for_each_tracing_cpu(cpu)      \
        for_each_cpu(cpu, tracing_buffer_mask)
 
@@ -109,14 +118,19 @@ static cpumask_var_t __read_mostly        tracing_buffer_mask;
  */
 int ftrace_dump_on_oops;
 
-static int tracing_set_tracer(char *buf);
+static int tracing_set_tracer(const char *buf);
+
+#define BOOTUP_TRACER_SIZE             100
+static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
+static char *default_bootup_tracer;
 
 static int __init set_ftrace(char *str)
 {
-       tracing_set_tracer(str);
+       strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
+       default_bootup_tracer = bootup_tracer_buf;
        return 1;
 }
-__setup("ftrace", set_ftrace);
+__setup("ftrace=", set_ftrace);
 
 static int __init set_ftrace_dump_on_oops(char *str)
 {
@@ -186,9 +200,6 @@ int tracing_is_enabled(void)
        return tracer_enabled;
 }
 
-/* function tracing enabled */
-int                            ftrace_function_enabled;
-
 /*
  * trace_buf_size is the size in bytes that is allocated
  * for a buffer. Note, the number of bytes is always rounded
@@ -229,7 +240,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
 /* trace_flags holds trace_options default values */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
-       TRACE_ITER_ANNOTATE;
+       TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO;
 
 /**
  * trace_wake_up - wake up tasks waiting for trace input
@@ -280,13 +291,15 @@ static const char *trace_options[] = {
        "block",
        "stacktrace",
        "sched-tree",
-       "ftrace_printk",
+       "trace_printk",
        "ftrace_preempt",
        "branch",
        "annotate",
        "userstacktrace",
        "sym-userobj",
        "printk-msg-only",
+       "context-info",
+       "latency-format",
        NULL
 };
 
@@ -326,146 +339,37 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        data->rt_priority = tsk->rt_priority;
 
        /* record this tasks comm */
-       tracing_record_cmdline(current);
+       tracing_record_cmdline(tsk);
 }
 
-/**
- * trace_seq_printf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
 {
-       int len = (PAGE_SIZE - 1) - s->len;
-       va_list ap;
+       int len;
        int ret;
 
-       if (!len)
-               return 0;
-
-       va_start(ap, fmt);
-       ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
-       va_end(ap);
-
-       /* If we can't write it all, don't bother writing anything */
-       if (ret >= len)
-               return 0;
-
-       s->len += ret;
-
-       return len;
-}
-
-/**
- * trace_seq_puts - trace sequence printing of simple string
- * @s: trace sequence descriptor
- * @str: simple string to record
- *
- * The tracer may use either the sequence operations or its own
- * copy to user routines. This function records a simple string
- * into a special buffer (@s) for later retrieval by a sequencer
- * or other mechanism.
- */
-static int
-trace_seq_puts(struct trace_seq *s, const char *str)
-{
-       int len = strlen(str);
-
-       if (len > ((PAGE_SIZE - 1) - s->len))
-               return 0;
-
-       memcpy(s->buffer + s->len, str, len);
-       s->len += len;
-
-       return len;
-}
-
-static int
-trace_seq_putc(struct trace_seq *s, unsigned char c)
-{
-       if (s->len >= (PAGE_SIZE - 1))
-               return 0;
-
-       s->buffer[s->len++] = c;
-
-       return 1;
-}
-
-static int
-trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
-{
-       if (len > ((PAGE_SIZE - 1) - s->len))
+       if (!cnt)
                return 0;
 
-       memcpy(s->buffer + s->len, mem, len);
-       s->len += len;
-
-       return len;
-}
-
-#define MAX_MEMHEX_BYTES       8
-#define HEX_CHARS              (MAX_MEMHEX_BYTES*2 + 1)
-
-static int
-trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
-{
-       unsigned char hex[HEX_CHARS];
-       unsigned char *data = mem;
-       int i, j;
-
-#ifdef __BIG_ENDIAN
-       for (i = 0, j = 0; i < len; i++) {
-#else
-       for (i = len-1, j = 0; i >= 0; i--) {
-#endif
-               hex[j++] = hex_asc_hi(data[i]);
-               hex[j++] = hex_asc_lo(data[i]);
-       }
-       hex[j++] = ' ';
-
-       return trace_seq_putmem(s, hex, j);
-}
-
-static int
-trace_seq_path(struct trace_seq *s, struct path *path)
-{
-       unsigned char *p;
+       if (s->len <= s->readpos)
+               return -EBUSY;
 
-       if (s->len >= (PAGE_SIZE - 1))
-               return 0;
-       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
-       if (!IS_ERR(p)) {
-               p = mangle_path(s->buffer + s->len, p, "\n");
-               if (p) {
-                       s->len = p - s->buffer;
-                       return 1;
-               }
-       } else {
-               s->buffer[s->len++] = '?';
-               return 1;
-       }
+       len = s->len - s->readpos;
+       if (cnt > len)
+               cnt = len;
+       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+       if (ret == cnt)
+               return -EFAULT;
 
-       return 0;
-}
+       cnt -= ret;
 
-static void
-trace_seq_reset(struct trace_seq *s)
-{
-       s->len = 0;
-       s->readpos = 0;
+       s->readpos += cnt;
+       return cnt;
 }
 
-ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
+ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 {
        int len;
-       int ret;
+       void *ret;
 
        if (s->len <= s->readpos)
                return -EBUSY;
@@ -473,11 +377,11 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
        len = s->len - s->readpos;
        if (cnt > len)
                cnt = len;
-       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
-       if (ret)
+       ret = memcpy(buf, s->buffer + s->readpos, cnt);
+       if (!ret)
                return -EFAULT;
 
-       s->readpos += len;
+       s->readpos += cnt;
        return cnt;
 }
 
@@ -489,7 +393,7 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
        s->buffer[len] = 0;
        seq_puts(m, s->buffer);
 
-       trace_seq_reset(s);
+       trace_seq_init(s);
 }
 
 /**
@@ -543,7 +447,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        ftrace_enable_cpu();
 
-       WARN_ON_ONCE(ret);
+       WARN_ON_ONCE(ret && ret != -EAGAIN);
 
        __update_max_tr(tr, tsk, cpu);
        __raw_spin_unlock(&ftrace_max_lock);
@@ -556,6 +460,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
  * Register a new plugin tracer.
  */
 int register_tracer(struct tracer *type)
+__releases(kernel_lock)
+__acquires(kernel_lock)
 {
        struct tracer *t;
        int len;
@@ -594,9 +500,12 @@ int register_tracer(struct tracer *type)
        else
                if (!type->flags->opts)
                        type->flags->opts = dummy_tracer_opt;
+       if (!type->wait_pipe)
+               type->wait_pipe = default_wait_pipe;
+
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
-       if (type->selftest) {
+       if (type->selftest && !tracing_selftest_disabled) {
                struct tracer *saved_tracer = current_trace;
                struct trace_array *tr = &global_trace;
                int i;
@@ -638,8 +547,26 @@ int register_tracer(struct tracer *type)
  out:
        tracing_selftest_running = false;
        mutex_unlock(&trace_types_lock);
-       lock_kernel();
 
+       if (ret || !default_bootup_tracer)
+               goto out_unlock;
+
+       if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
+               goto out_unlock;
+
+       printk(KERN_INFO "Starting tracer '%s'\n", type->name);
+       /* Do we want this tracer to start on bootup? */
+       tracing_set_tracer(type->name);
+       default_bootup_tracer = NULL;
+       /* disable other selftests, since this will break it. */
+       tracing_selftest_disabled = 1;
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+       printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
+              type->name);
+#endif
+
+ out_unlock:
+       lock_kernel();
        return ret;
 }
 
@@ -658,6 +585,15 @@ void unregister_tracer(struct tracer *type)
 
  found:
        *t = (*t)->next;
+
+       if (type == current_trace && tracer_enabled) {
+               tracer_enabled = 0;
+               tracing_stop();
+               if (current_trace->stop)
+                       current_trace->stop(&global_trace);
+               current_trace = &nop_trace;
+       }
+
        if (strlen(type->name) != max_tracer_type_len)
                goto out;
 
@@ -693,10 +629,10 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
-static DEFINE_SPINLOCK(trace_cmdline_lock);
+static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
 
 /* temporary disable recording */
-atomic_t trace_record_cmdline_disabled __read_mostly;
+static atomic_t trace_record_cmdline_disabled __read_mostly;
 
 static void trace_init_cmdlines(void)
 {
@@ -738,13 +674,12 @@ void tracing_start(void)
                return;
 
        spin_lock_irqsave(&tracing_start_lock, flags);
-       if (--trace_stop_count)
-               goto out;
-
-       if (trace_stop_count < 0) {
-               /* Someone screwed up their debugging */
-               WARN_ON_ONCE(1);
-               trace_stop_count = 0;
+       if (--trace_stop_count) {
+               if (trace_stop_count < 0) {
+                       /* Someone screwed up their debugging */
+                       WARN_ON_ONCE(1);
+                       trace_stop_count = 0;
+               }
                goto out;
        }
 
@@ -806,7 +741,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
         * nor do we want to disable interrupts,
         * so if we miss here, then better luck next time.
         */
-       if (!spin_trylock(&trace_cmdline_lock))
+       if (!__raw_spin_trylock(&trace_cmdline_lock))
                return;
 
        idx = map_pid_to_cmdline[tsk->pid];
@@ -824,7 +759,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
 
        memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
 
-       spin_unlock(&trace_cmdline_lock);
+       __raw_spin_unlock(&trace_cmdline_lock);
 }
 
 char *trace_find_cmdline(int pid)
@@ -876,78 +811,114 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
                (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
 }
 
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+                                                   unsigned char type,
+                                                   unsigned long len,
+                                                   unsigned long flags, int pc)
+{
+       struct ring_buffer_event *event;
+
+       event = ring_buffer_lock_reserve(tr->buffer, len);
+       if (event != NULL) {
+               struct trace_entry *ent = ring_buffer_event_data(event);
+
+               tracing_generic_entry_update(ent, flags, pc);
+               ent->type = type;
+       }
+
+       return event;
+}
+static void ftrace_trace_stack(struct trace_array *tr,
+                              unsigned long flags, int skip, int pc);
+static void ftrace_trace_userstack(struct trace_array *tr,
+                                  unsigned long flags, int pc);
+
+void trace_buffer_unlock_commit(struct trace_array *tr,
+                               struct ring_buffer_event *event,
+                               unsigned long flags, int pc)
+{
+       ring_buffer_unlock_commit(tr->buffer, event);
+
+       ftrace_trace_stack(tr, flags, 6, pc);
+       ftrace_trace_userstack(tr, flags, pc);
+       trace_wake_up();
+}
+
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
+                                 unsigned long flags, int pc)
+{
+       return trace_buffer_lock_reserve(&global_trace,
+                                        type, len, flags, pc);
+}
+
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+                                       unsigned long flags, int pc)
+{
+       return trace_buffer_unlock_commit(&global_trace, event, flags, pc);
+}
+
 void
-trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+trace_function(struct trace_array *tr,
               unsigned long ip, unsigned long parent_ip, unsigned long flags,
               int pc)
 {
        struct ring_buffer_event *event;
        struct ftrace_entry *entry;
-       unsigned long irq_flags;
 
        /* If we are reading the ring buffer, don't trace */
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
+                                         flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type                 = TRACE_FN;
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static void __trace_graph_entry(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct ftrace_graph_ent *trace,
                                unsigned long flags,
                                int pc)
 {
        struct ring_buffer_event *event;
        struct ftrace_graph_ent_entry *entry;
-       unsigned long irq_flags;
 
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
 
-       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type                 = TRACE_GRAPH_ENT;
        entry->graph_ent                        = *trace;
-       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+       ring_buffer_unlock_commit(global_trace.buffer, event);
 }
 
 static void __trace_graph_return(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct ftrace_graph_ret *trace,
                                unsigned long flags,
                                int pc)
 {
        struct ring_buffer_event *event;
        struct ftrace_graph_ret_entry *entry;
-       unsigned long irq_flags;
 
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
 
-       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type                 = TRACE_GRAPH_RET;
        entry->ret                              = *trace;
-       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+       ring_buffer_unlock_commit(global_trace.buffer, event);
 }
 #endif
 
@@ -957,31 +928,23 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
        int pc)
 {
        if (likely(!atomic_read(&data->disabled)))
-               trace_function(tr, data, ip, parent_ip, flags, pc);
+               trace_function(tr, ip, parent_ip, flags, pc);
 }
 
-static void ftrace_trace_stack(struct trace_array *tr,
-                              struct trace_array_cpu *data,
-                              unsigned long flags,
-                              int skip, int pc)
+static void __ftrace_trace_stack(struct trace_array *tr,
+                                unsigned long flags,
+                                int skip, int pc)
 {
 #ifdef CONFIG_STACKTRACE
        struct ring_buffer_event *event;
        struct stack_entry *entry;
        struct stack_trace trace;
-       unsigned long irq_flags;
-
-       if (!(trace_flags & TRACE_ITER_STACKTRACE))
-               return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_STACK,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type         = TRACE_STACK;
-
        memset(&entry->caller, 0, sizeof(entry->caller));
 
        trace.nr_entries        = 0;
@@ -990,38 +953,43 @@ static void ftrace_trace_stack(struct trace_array *tr,
        trace.entries           = entry->caller;
 
        save_stack_trace(&trace);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 #endif
 }
 
+static void ftrace_trace_stack(struct trace_array *tr,
+                              unsigned long flags,
+                              int skip, int pc)
+{
+       if (!(trace_flags & TRACE_ITER_STACKTRACE))
+               return;
+
+       __ftrace_trace_stack(tr, flags, skip, pc);
+}
+
 void __trace_stack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
                   unsigned long flags,
-                  int skip)
+                  int skip, int pc)
 {
-       ftrace_trace_stack(tr, data, flags, skip, preempt_count());
+       __ftrace_trace_stack(tr, flags, skip, pc);
 }
 
 static void ftrace_trace_userstack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
-                  unsigned long flags, int pc)
+                                  unsigned long flags, int pc)
 {
 #ifdef CONFIG_STACKTRACE
        struct ring_buffer_event *event;
        struct userstack_entry *entry;
        struct stack_trace trace;
-       unsigned long irq_flags;
 
        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type         = TRACE_USER_STACK;
 
        memset(&entry->caller, 0, sizeof(entry->caller));
 
@@ -1031,70 +999,58 @@ static void ftrace_trace_userstack(struct trace_array *tr,
        trace.entries           = entry->caller;
 
        save_stack_trace_user(&trace);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 #endif
 }
 
-void __trace_userstack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
-                  unsigned long flags)
+#ifdef UNUSED
+static void __trace_userstack(struct trace_array *tr, unsigned long flags)
 {
-       ftrace_trace_userstack(tr, data, flags, preempt_count());
+       ftrace_trace_userstack(tr, flags, preempt_count());
 }
+#endif /* UNUSED */
 
 static void
-ftrace_trace_special(void *__tr, void *__data,
+ftrace_trace_special(void *__tr,
                     unsigned long arg1, unsigned long arg2, unsigned long arg3,
                     int pc)
 {
        struct ring_buffer_event *event;
-       struct trace_array_cpu *data = __data;
        struct trace_array *tr = __tr;
        struct special_entry *entry;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
+                                         sizeof(*entry), 0, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, pc);
-       entry->ent.type                 = TRACE_SPECIAL;
        entry->arg1                     = arg1;
        entry->arg2                     = arg2;
        entry->arg3                     = arg3;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, data, irq_flags, 4, pc);
-       ftrace_trace_userstack(tr, data, irq_flags, pc);
-
-       trace_wake_up();
+       trace_buffer_unlock_commit(tr, event, 0, pc);
 }
 
 void
 __trace_special(void *__tr, void *__data,
                unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
-       ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
+       ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
 }
 
 void
 tracing_sched_switch_trace(struct trace_array *tr,
-                          struct trace_array_cpu *data,
                           struct task_struct *prev,
                           struct task_struct *next,
                           unsigned long flags, int pc)
 {
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_CTX,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type                 = TRACE_CTX;
        entry->prev_pid                 = prev->pid;
        entry->prev_prio                = prev->prio;
        entry->prev_state               = prev->state;
@@ -1102,29 +1058,23 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->next_prio                = next->prio;
        entry->next_state               = next->state;
        entry->next_cpu = task_cpu(next);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, data, flags, 5, pc);
-       ftrace_trace_userstack(tr, data, flags, pc);
+       trace_buffer_unlock_commit(tr, event, flags, pc);
 }
 
 void
 tracing_sched_wakeup_trace(struct trace_array *tr,
-                          struct trace_array_cpu *data,
                           struct task_struct *wakee,
                           struct task_struct *curr,
                           unsigned long flags, int pc)
 {
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type                 = TRACE_WAKE;
        entry->prev_pid                 = curr->pid;
        entry->prev_prio                = curr->prio;
        entry->prev_state               = curr->state;
@@ -1132,11 +1082,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->next_prio                = wakee->prio;
        entry->next_state               = wakee->state;
        entry->next_cpu                 = task_cpu(wakee);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, data, flags, 6, pc);
-       ftrace_trace_userstack(tr, data, flags, pc);
 
-       trace_wake_up();
+       ring_buffer_unlock_commit(tr->buffer, event);
+       ftrace_trace_stack(tr, flags, 6, pc);
+       ftrace_trace_userstack(tr, flags, pc);
 }
 
 void
@@ -1157,66 +1106,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
        data = tr->data[cpu];
 
        if (likely(atomic_inc_return(&data->disabled) == 1))
-               ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
-
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
-}
-
-#ifdef CONFIG_FUNCTION_TRACER
-static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
-{
-       struct trace_array *tr = &global_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
-       int cpu, resched;
-       int pc;
-
-       if (unlikely(!ftrace_function_enabled))
-               return;
-
-       pc = preempt_count();
-       resched = ftrace_preempt_disable();
-       local_save_flags(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1))
-               trace_function(tr, data, ip, parent_ip, flags, pc);
-
-       atomic_dec(&data->disabled);
-       ftrace_preempt_enable(resched);
-}
-
-static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
-{
-       struct trace_array *tr = &global_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
-       int cpu;
-       int pc;
-
-       if (unlikely(!ftrace_function_enabled))
-               return;
-
-       /*
-        * Need to use raw, since this must be called before the
-        * recursive protection is performed.
-        */
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1)) {
-               pc = preempt_count();
-               trace_function(tr, data, ip, parent_ip, flags, pc);
-       }
+               ftrace_trace_special(tr, arg1, arg2, arg3, pc);
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
@@ -1244,7 +1134,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               __trace_graph_entry(tr, data, trace, flags, pc);
+               __trace_graph_entry(tr, trace, flags, pc);
        }
        /* Only do the atomic if it is not already set */
        if (!test_tsk_trace_graph(current))
@@ -1270,7 +1160,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               __trace_graph_return(tr, data, trace, flags, pc);
+               __trace_graph_return(tr, trace, flags, pc);
        }
        if (!trace->depth)
                clear_tsk_trace_graph(current);
@@ -1279,30 +1169,66 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-static struct ftrace_ops trace_ops __read_mostly =
-{
-       .func = function_trace_call,
-};
 
-void tracing_start_function_trace(void)
+/**
+ * trace_vprintk - write binary msg to tracing buffer
+ *
+ */
+int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
 {
-       ftrace_function_enabled = 0;
+       static DEFINE_SPINLOCK(trace_buf_lock);
+       static u32 trace_buf[TRACE_BUF_SIZE];
 
-       if (trace_flags & TRACE_ITER_PREEMPTONLY)
-               trace_ops.func = function_trace_call_preempt_only;
-       else
-               trace_ops.func = function_trace_call;
+       struct ring_buffer_event *event;
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       struct print_entry *entry;
+       unsigned long flags;
+       int resched;
+       int cpu, len = 0, size, pc;
 
-       register_ftrace_function(&trace_ops);
-       ftrace_function_enabled = 1;
-}
+       if (unlikely(tracing_selftest_running || tracing_disabled))
+               return 0;
 
-void tracing_stop_function_trace(void)
-{
-       ftrace_function_enabled = 0;
-       unregister_ftrace_function(&trace_ops);
+       /* Don't pollute graph traces with trace_vprintk internals */
+       pause_graph_tracing();
+
+       pc = preempt_count();
+       resched = ftrace_preempt_disable();
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+
+       if (unlikely(atomic_read(&data->disabled)))
+               goto out;
+
+       spin_lock_irqsave(&trace_buf_lock, flags);
+       len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+
+       if (len > TRACE_BUF_SIZE || len < 0)
+               goto out_unlock;
+
+       size = sizeof(*entry) + sizeof(u32) * len;
+       event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc);
+       if (!event)
+               goto out_unlock;
+       entry = ring_buffer_event_data(event);
+       entry->ip                       = ip;
+       entry->depth                    = depth;
+       entry->fmt                      = fmt;
+
+       memcpy(entry->buf, trace_buf, sizeof(u32) * len);
+       ring_buffer_unlock_commit(tr->buffer, event);
+
+out_unlock:
+       spin_unlock_irqrestore(&trace_buf_lock, flags);
+
+out:
+       ftrace_preempt_enable(resched);
+       unpause_graph_tracing();
+
+       return len;
 }
-#endif
+EXPORT_SYMBOL_GPL(trace_vprintk);
 
 enum trace_file_type {
        TRACE_FILE_LAT_FMT      = 1,
@@ -1345,10 +1271,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
 {
        struct ring_buffer *buffer = iter->tr->buffer;
        struct trace_entry *ent, *next = NULL;
+       int cpu_file = iter->cpu_file;
        u64 next_ts = 0, ts;
        int next_cpu = -1;
        int cpu;
 
+       /*
+        * If we are in a per_cpu trace file, don't bother by iterating over
+        * all cpu and peek directly.
+        */
+       if (cpu_file > TRACE_PIPE_ALL_CPU) {
+               if (ring_buffer_empty_cpu(buffer, cpu_file))
+                       return NULL;
+               ent = peek_next_entry(iter, cpu_file, ent_ts);
+               if (ent_cpu)
+                       *ent_cpu = cpu_file;
+
+               return ent;
+       }
+
        for_each_tracing_cpu(cpu) {
 
                if (ring_buffer_empty_cpu(buffer, cpu))
@@ -1376,8 +1317,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
 }
 
 /* Find the next real entry, without updating the iterator itself */
-static struct trace_entry *
-find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
+struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
+                                         int *ent_cpu, u64 *ent_ts)
 {
        return __find_next_entry(iter, ent_cpu, ent_ts);
 }
@@ -1426,19 +1367,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
        return ent;
 }
 
+/*
+ * No necessary locking here. The worst thing which can
+ * happen is loosing events consumed at the same time
+ * by a trace_pipe reader.
+ * Other than that, we don't risk to crash the ring buffer
+ * because it serializes the readers.
+ *
+ * The current tracer is copied to avoid a global locking
+ * all around.
+ */
 static void *s_start(struct seq_file *m, loff_t *pos)
 {
        struct trace_iterator *iter = m->private;
+       static struct tracer *old_tracer;
+       int cpu_file = iter->cpu_file;
        void *p = NULL;
        loff_t l = 0;
        int cpu;
 
+       /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
-
-       if (!current_trace || current_trace != iter->trace) {
-               mutex_unlock(&trace_types_lock);
-               return NULL;
+       if (unlikely(old_tracer != current_trace && current_trace)) {
+               old_tracer = current_trace;
+               *iter->trace = *current_trace;
        }
+       mutex_unlock(&trace_types_lock);
 
        atomic_inc(&trace_record_cmdline_disabled);
 
@@ -1449,9 +1403,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
                ftrace_disable_cpu();
 
-               for_each_tracing_cpu(cpu) {
-                       ring_buffer_iter_reset(iter->buffer_iter[cpu]);
-               }
+               if (cpu_file == TRACE_PIPE_ALL_CPU) {
+                       for_each_tracing_cpu(cpu)
+                               ring_buffer_iter_reset(iter->buffer_iter[cpu]);
+               } else
+                       ring_buffer_iter_reset(iter->buffer_iter[cpu_file]);
+
 
                ftrace_enable_cpu();
 
@@ -1469,158 +1426,9 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 static void s_stop(struct seq_file *m, void *p)
 {
        atomic_dec(&trace_record_cmdline_disabled);
-       mutex_unlock(&trace_types_lock);
 }
 
-#ifdef CONFIG_KRETPROBES
-static inline const char *kretprobed(const char *name)
-{
-       static const char tramp_name[] = "kretprobe_trampoline";
-       int size = sizeof(tramp_name);
-
-       if (strncmp(tramp_name, name, size) == 0)
-               return "[unknown/kretprobe'd]";
-       return name;
-}
-#else
-static inline const char *kretprobed(const char *name)
-{
-       return name;
-}
-#endif /* CONFIG_KRETPROBES */
-
-static int
-seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
-       char str[KSYM_SYMBOL_LEN];
-       const char *name;
-
-       kallsyms_lookup(address, NULL, NULL, NULL, str);
-
-       name = kretprobed(str);
-
-       return trace_seq_printf(s, fmt, name);
-#endif
-       return 1;
-}
-
-static int
-seq_print_sym_offset(struct trace_seq *s, const char *fmt,
-                    unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
-       char str[KSYM_SYMBOL_LEN];
-       const char *name;
-
-       sprint_symbol(str, address);
-       name = kretprobed(str);
-
-       return trace_seq_printf(s, fmt, name);
-#endif
-       return 1;
-}
-
-#ifndef CONFIG_64BIT
-# define IP_FMT "%08lx"
-#else
-# define IP_FMT "%016lx"
-#endif
-
-int
-seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
-{
-       int ret;
-
-       if (!ip)
-               return trace_seq_printf(s, "0");
-
-       if (sym_flags & TRACE_ITER_SYM_OFFSET)
-               ret = seq_print_sym_offset(s, "%s", ip);
-       else
-               ret = seq_print_sym_short(s, "%s", ip);
-
-       if (!ret)
-               return 0;
-
-       if (sym_flags & TRACE_ITER_SYM_ADDR)
-               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
-       return ret;
-}
-
-static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
-                                   unsigned long ip, unsigned long sym_flags)
-{
-       struct file *file = NULL;
-       unsigned long vmstart = 0;
-       int ret = 1;
-
-       if (mm) {
-               const struct vm_area_struct *vma;
-
-               down_read(&mm->mmap_sem);
-               vma = find_vma(mm, ip);
-               if (vma) {
-                       file = vma->vm_file;
-                       vmstart = vma->vm_start;
-               }
-               if (file) {
-                       ret = trace_seq_path(s, &file->f_path);
-                       if (ret)
-                               ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
-               }
-               up_read(&mm->mmap_sem);
-       }
-       if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
-               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
-       return ret;
-}
-
-static int
-seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
-                     unsigned long sym_flags)
-{
-       struct mm_struct *mm = NULL;
-       int ret = 1;
-       unsigned int i;
-
-       if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
-               struct task_struct *task;
-               /*
-                * we do the lookup on the thread group leader,
-                * since individual threads might have already quit!
-                */
-               rcu_read_lock();
-               task = find_task_by_vpid(entry->ent.tgid);
-               if (task)
-                       mm = get_task_mm(task);
-               rcu_read_unlock();
-       }
-
-       for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
-               unsigned long ip = entry->caller[i];
-
-               if (ip == ULONG_MAX || !ret)
-                       break;
-               if (i && ret)
-                       ret = trace_seq_puts(s, " <- ");
-               if (!ip) {
-                       if (ret)
-                               ret = trace_seq_puts(s, "??");
-                       continue;
-               }
-               if (!ret)
-                       break;
-               if (ret)
-                       ret = seq_print_user_ip(s, mm, ip, sym_flags);
-       }
-
-       if (mm)
-               mmput(mm);
-       return ret;
-}
-
-static void print_lat_help_header(struct seq_file *m)
+static void print_lat_help_header(struct seq_file *m)
 {
        seq_puts(m, "#                  _------=> CPU#            \n");
        seq_puts(m, "#                 / _-----=> irqs-off        \n");
@@ -1658,11 +1466,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
        total = entries +
                ring_buffer_overruns(iter->tr->buffer);
 
-       seq_printf(m, "%s latency trace v1.1.5 on %s\n",
+       seq_printf(m, "%s latency trace v1.1.5 on %s\n",
                   name, UTS_RELEASE);
-       seq_puts(m, "-----------------------------------"
+       seq_puts(m, "-----------------------------------"
                 "---------------------------------\n");
-       seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
+       seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
                   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
                   nsecs_to_usecs(data->saved_latency),
                   entries,
@@ -1684,121 +1492,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 #else
        seq_puts(m, ")\n");
 #endif
-       seq_puts(m, "    -----------------\n");
-       seq_printf(m, "    | task: %.16s-%d "
+       seq_puts(m, "#    -----------------\n");
+       seq_printf(m, "#    | task: %.16s-%d "
                   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
                   data->comm, data->pid, data->uid, data->nice,
                   data->policy, data->rt_priority);
-       seq_puts(m, "    -----------------\n");
+       seq_puts(m, "#    -----------------\n");
 
        if (data->critical_start) {
-               seq_puts(m, " => started at: ");
+               seq_puts(m, " => started at: ");
                seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
                trace_print_seq(m, &iter->seq);
-               seq_puts(m, "\n => ended at:   ");
+               seq_puts(m, "\n => ended at:   ");
                seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
                trace_print_seq(m, &iter->seq);
-               seq_puts(m, "\n");
-       }
-
-       seq_puts(m, "\n");
-}
-
-static void
-lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
-{
-       int hardirq, softirq;
-       char *comm;
-
-       comm = trace_find_cmdline(entry->pid);
-
-       trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
-       trace_seq_printf(s, "%3d", cpu);
-       trace_seq_printf(s, "%c%c",
-                       (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
-                        (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
-                       ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
-
-       hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
-       softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
-       if (hardirq && softirq) {
-               trace_seq_putc(s, 'H');
-       } else {
-               if (hardirq) {
-                       trace_seq_putc(s, 'h');
-               } else {
-                       if (softirq)
-                               trace_seq_putc(s, 's');
-                       else
-                               trace_seq_putc(s, '.');
-               }
-       }
-
-       if (entry->preempt_count)
-               trace_seq_printf(s, "%x", entry->preempt_count);
-       else
-               trace_seq_puts(s, ".");
-}
-
-unsigned long preempt_mark_thresh = 100;
-
-static void
-lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
-                   unsigned long rel_usecs)
-{
-       trace_seq_printf(s, " %4lldus", abs_usecs);
-       if (rel_usecs > preempt_mark_thresh)
-               trace_seq_puts(s, "!: ");
-       else if (rel_usecs > 1)
-               trace_seq_puts(s, "+: ");
-       else
-               trace_seq_puts(s, " : ");
-}
-
-static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
-
-static int task_state_char(unsigned long state)
-{
-       int bit = state ? __ffs(state) + 1 : 0;
-
-       return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
-}
-
-/*
- * The message is supposed to contain an ending newline.
- * If the printing stops prematurely, try to add a newline of our own.
- */
-void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
-{
-       struct trace_entry *ent;
-       struct trace_field_cont *cont;
-       bool ok = true;
-
-       ent = peek_next_entry(iter, iter->cpu, NULL);
-       if (!ent || ent->type != TRACE_CONT) {
-               trace_seq_putc(s, '\n');
-               return;
+               seq_puts(m, "#\n");
        }
 
-       do {
-               cont = (struct trace_field_cont *)ent;
-               if (ok)
-                       ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
-
-               ftrace_disable_cpu();
-
-               if (iter->buffer_iter[iter->cpu])
-                       ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
-               else
-                       ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
-
-               ftrace_enable_cpu();
-
-               ent = peek_next_entry(iter, iter->cpu, NULL);
-       } while (ent && ent->type == TRACE_CONT);
-
-       if (!ok)
-               trace_seq_putc(s, '\n');
+       seq_puts(m, "#\n");
 }
 
 static void test_cpu_buff_start(struct trace_iterator *iter)
@@ -1818,452 +1529,88 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
        trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
 }
 
-static enum print_line_t
-print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
-{
-       struct trace_seq *s = &iter->seq;
-       unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
-       struct trace_entry *next_entry;
-       unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
-       struct trace_entry *entry = iter->ent;
-       unsigned long abs_usecs;
-       unsigned long rel_usecs;
-       u64 next_ts;
-       char *comm;
-       int S, T;
-       int i;
-
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
-
-       test_cpu_buff_start(iter);
-
-       next_entry = find_next_entry(iter, NULL, &next_ts);
-       if (!next_entry)
-               next_ts = iter->ts;
-       rel_usecs = ns2usecs(next_ts - iter->ts);
-       abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
-
-       if (verbose) {
-               comm = trace_find_cmdline(entry->pid);
-               trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
-                                " %ld.%03ldms (+%ld.%03ldms): ",
-                                comm,
-                                entry->pid, cpu, entry->flags,
-                                entry->preempt_count, trace_idx,
-                                ns2usecs(iter->ts),
-                                abs_usecs/1000,
-                                abs_usecs % 1000, rel_usecs/1000,
-                                rel_usecs % 1000);
-       } else {
-               lat_print_generic(s, entry, cpu);
-               lat_print_timestamp(s, abs_usecs, rel_usecs);
-       }
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-
-               trace_assign_type(field, entry);
-
-               seq_print_ip_sym(s, field->ip, sym_flags);
-               trace_seq_puts(s, " (");
-               seq_print_ip_sym(s, field->parent_ip, sym_flags);
-               trace_seq_puts(s, ")\n");
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-
-               trace_assign_type(field, entry);
-
-               T = task_state_char(field->next_state);
-               S = task_state_char(field->prev_state);
-               comm = trace_find_cmdline(field->next_pid);
-               trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
-                                field->prev_pid,
-                                field->prev_prio,
-                                S, entry->type == TRACE_CTX ? "==>" : "  +",
-                                field->next_cpu,
-                                field->next_pid,
-                                field->next_prio,
-                                T, comm);
-               break;
-       }
-       case TRACE_SPECIAL: {
-               struct special_entry *field;
-
-               trace_assign_type(field, entry);
-
-               trace_seq_printf(s, "# %ld %ld %ld\n",
-                                field->arg1,
-                                field->arg2,
-                                field->arg3);
-               break;
-       }
-       case TRACE_STACK: {
-               struct stack_entry *field;
-
-               trace_assign_type(field, entry);
-
-               for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
-                       if (i)
-                               trace_seq_puts(s, " <= ");
-                       seq_print_ip_sym(s, field->caller[i], sym_flags);
-               }
-               trace_seq_puts(s, "\n");
-               break;
-       }
-       case TRACE_PRINT: {
-               struct print_entry *field;
-
-               trace_assign_type(field, entry);
-
-               seq_print_ip_sym(s, field->ip, sym_flags);
-               trace_seq_printf(s, ": %s", field->buf);
-               if (entry->flags & TRACE_FLAG_CONT)
-                       trace_seq_print_cont(s, iter);
-               break;
-       }
-       case TRACE_BRANCH: {
-               struct trace_branch *field;
-
-               trace_assign_type(field, entry);
-
-               trace_seq_printf(s, "[%s] %s:%s:%d\n",
-                                field->correct ? "  ok  " : " MISS ",
-                                field->func,
-                                field->file,
-                                field->line);
-               break;
-       }
-       case TRACE_USER_STACK: {
-               struct userstack_entry *field;
-
-               trace_assign_type(field, entry);
-
-               seq_print_userip_objs(field, s, sym_flags);
-               trace_seq_putc(s, '\n');
-               break;
-       }
-       default:
-               trace_seq_printf(s, "Unknown type %d\n", entry->type);
-       }
-       return TRACE_TYPE_HANDLED;
-}
-
 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
        struct trace_entry *entry;
-       unsigned long usec_rem;
-       unsigned long long t;
-       unsigned long secs;
-       char *comm;
-       int ret;
-       int S, T;
-       int i;
+       struct trace_event *event;
 
        entry = iter->ent;
 
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
-
        test_cpu_buff_start(iter);
 
-       comm = trace_find_cmdline(iter->ent->pid);
-
-       t = ns2usecs(iter->ts);
-       usec_rem = do_div(t, 1000000ULL);
-       secs = (unsigned long)t;
-
-       ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-       ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-       ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-
-               trace_assign_type(field, entry);
-
-               ret = seq_print_ip_sym(s, field->ip, sym_flags);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
-                                               field->parent_ip) {
-                       ret = trace_seq_printf(s, " <-");
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-                       ret = seq_print_ip_sym(s,
-                                              field->parent_ip,
-                                              sym_flags);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
-               ret = trace_seq_printf(s, "\n");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-
-               trace_assign_type(field, entry);
-
-               T = task_state_char(field->next_state);
-               S = task_state_char(field->prev_state);
-               ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
-                                      field->prev_pid,
-                                      field->prev_prio,
-                                      S,
-                                      entry->type == TRACE_CTX ? "==>" : "  +",
-                                      field->next_cpu,
-                                      field->next_pid,
-                                      field->next_prio,
-                                      T);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_SPECIAL: {
-               struct special_entry *field;
-
-               trace_assign_type(field, entry);
-
-               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
-                                field->arg1,
-                                field->arg2,
-                                field->arg3);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_STACK: {
-               struct stack_entry *field;
-
-               trace_assign_type(field, entry);
+       event = ftrace_find_event(entry->type);
 
-               for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
-                       if (i) {
-                               ret = trace_seq_puts(s, " <= ");
-                               if (!ret)
-                                       return TRACE_TYPE_PARTIAL_LINE;
-                       }
-                       ret = seq_print_ip_sym(s, field->caller[i],
-                                              sym_flags);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
+       if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+               if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+                       if (!trace_print_lat_context(iter))
+                               goto partial;
+               } else {
+                       if (!trace_print_context(iter))
+                               goto partial;
                }
-               ret = trace_seq_puts(s, "\n");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_PRINT: {
-               struct print_entry *field;
-
-               trace_assign_type(field, entry);
-
-               seq_print_ip_sym(s, field->ip, sym_flags);
-               trace_seq_printf(s, ": %s", field->buf);
-               if (entry->flags & TRACE_FLAG_CONT)
-                       trace_seq_print_cont(s, iter);
-               break;
-       }
-       case TRACE_GRAPH_RET: {
-               return print_graph_function(iter);
        }
-       case TRACE_GRAPH_ENT: {
-               return print_graph_function(iter);
-       }
-       case TRACE_BRANCH: {
-               struct trace_branch *field;
-
-               trace_assign_type(field, entry);
 
-               trace_seq_printf(s, "[%s] %s:%s:%d\n",
-                                field->correct ? "  ok  " : " MISS ",
-                                field->func,
-                                field->file,
-                                field->line);
-               break;
-       }
-       case TRACE_USER_STACK: {
-               struct userstack_entry *field;
+       if (event)
+               return event->trace(iter, sym_flags);
 
-               trace_assign_type(field, entry);
+       if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
+               goto partial;
 
-               ret = seq_print_userip_objs(field, s, sym_flags);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               ret = trace_seq_putc(s, '\n');
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       }
        return TRACE_TYPE_HANDLED;
+partial:
+       return TRACE_TYPE_PARTIAL_LINE;
 }
 
 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
        struct trace_entry *entry;
-       int ret;
-       int S, T;
+       struct trace_event *event;
 
        entry = iter->ent;
 
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
-
-       ret = trace_seq_printf(s, "%d %d %llu ",
-               entry->pid, iter->cpu, iter->ts);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-
-               trace_assign_type(field, entry);
-
-               ret = trace_seq_printf(s, "%x %x\n",
-                                       field->ip,
-                                       field->parent_ip);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-
-               trace_assign_type(field, entry);
-
-               T = task_state_char(field->next_state);
-               S = entry->type == TRACE_WAKE ? '+' :
-                       task_state_char(field->prev_state);
-               ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
-                                      field->prev_pid,
-                                      field->prev_prio,
-                                      S,
-                                      field->next_cpu,
-                                      field->next_pid,
-                                      field->next_prio,
-                                      T);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
+       if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+               if (!trace_seq_printf(s, "%d %d %llu ",
+                                     entry->pid, iter->cpu, iter->ts))
+                       goto partial;
        }
-       case TRACE_SPECIAL:
-       case TRACE_USER_STACK:
-       case TRACE_STACK: {
-               struct special_entry *field;
-
-               trace_assign_type(field, entry);
 
-               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
-                                field->arg1,
-                                field->arg2,
-                                field->arg3);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_PRINT: {
-               struct print_entry *field;
+       event = ftrace_find_event(entry->type);
+       if (event)
+               return event->raw(iter, 0);
 
-               trace_assign_type(field, entry);
+       if (!trace_seq_printf(s, "%d ?\n", entry->type))
+               goto partial;
 
-               trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
-               if (entry->flags & TRACE_FLAG_CONT)
-                       trace_seq_print_cont(s, iter);
-               break;
-       }
-       }
        return TRACE_TYPE_HANDLED;
+partial:
+       return TRACE_TYPE_PARTIAL_LINE;
 }
 
-#define SEQ_PUT_FIELD_RET(s, x)                                \
-do {                                                   \
-       if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
-               return 0;                               \
-} while (0)
-
-#define SEQ_PUT_HEX_FIELD_RET(s, x)                    \
-do {                                                   \
-       BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);     \
-       if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
-               return 0;                               \
-} while (0)
-
 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
        unsigned char newline = '\n';
        struct trace_entry *entry;
-       int S, T;
+       struct trace_event *event;
 
        entry = iter->ent;
 
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
-
-       SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
-       SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
-       SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
-
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-
-               trace_assign_type(field, entry);
-
-               SEQ_PUT_HEX_FIELD_RET(s, field->ip);
-               SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-
-               trace_assign_type(field, entry);
-
-               T = task_state_char(field->next_state);
-               S = entry->type == TRACE_WAKE ? '+' :
-                       task_state_char(field->prev_state);
-               SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
-               SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
-               SEQ_PUT_HEX_FIELD_RET(s, S);
-               SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
-               SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
-               SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
-               SEQ_PUT_HEX_FIELD_RET(s, T);
-               break;
+       if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+               SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
+               SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
+               SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
        }
-       case TRACE_SPECIAL:
-       case TRACE_USER_STACK:
-       case TRACE_STACK: {
-               struct special_entry *field;
 
-               trace_assign_type(field, entry);
-
-               SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
-               SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
-               SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
-               break;
-       }
+       event = ftrace_find_event(entry->type);
+       if (event) {
+               enum print_line_t ret = event->hex(iter, 0);
+               if (ret != TRACE_TYPE_HANDLED)
+                       return ret;
        }
+
        SEQ_PUT_FIELD_RET(s, newline);
 
        return TRACE_TYPE_HANDLED;
@@ -2278,13 +1625,10 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
 
        trace_assign_type(field, entry);
 
-       ret = trace_seq_printf(s, field->buf);
+       ret = trace_seq_bprintf(s, field->fmt, field->buf);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
-       if (entry->flags & TRACE_FLAG_CONT)
-               trace_seq_print_cont(s, iter);
-
        return TRACE_TYPE_HANDLED;
 }
 
@@ -2292,53 +1636,18 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
        struct trace_entry *entry;
+       struct trace_event *event;
 
        entry = iter->ent;
 
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
-
-       SEQ_PUT_FIELD_RET(s, entry->pid);
-       SEQ_PUT_FIELD_RET(s, entry->cpu);
-       SEQ_PUT_FIELD_RET(s, iter->ts);
-
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-
-               trace_assign_type(field, entry);
-
-               SEQ_PUT_FIELD_RET(s, field->ip);
-               SEQ_PUT_FIELD_RET(s, field->parent_ip);
-               break;
-       }
-       case TRACE_CTX: {
-               struct ctx_switch_entry *field;
-
-               trace_assign_type(field, entry);
-
-               SEQ_PUT_FIELD_RET(s, field->prev_pid);
-               SEQ_PUT_FIELD_RET(s, field->prev_prio);
-               SEQ_PUT_FIELD_RET(s, field->prev_state);
-               SEQ_PUT_FIELD_RET(s, field->next_pid);
-               SEQ_PUT_FIELD_RET(s, field->next_prio);
-               SEQ_PUT_FIELD_RET(s, field->next_state);
-               break;
+       if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+               SEQ_PUT_FIELD_RET(s, entry->pid);
+               SEQ_PUT_FIELD_RET(s, iter->cpu);
+               SEQ_PUT_FIELD_RET(s, iter->ts);
        }
-       case TRACE_SPECIAL:
-       case TRACE_USER_STACK:
-       case TRACE_STACK: {
-               struct special_entry *field;
-
-               trace_assign_type(field, entry);
 
-               SEQ_PUT_FIELD_RET(s, field->arg1);
-               SEQ_PUT_FIELD_RET(s, field->arg2);
-               SEQ_PUT_FIELD_RET(s, field->arg3);
-               break;
-       }
-       }
-       return 1;
+       event = ftrace_find_event(entry->type);
+       return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
 }
 
 static int trace_empty(struct trace_iterator *iter)
@@ -2382,9 +1691,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
        if (trace_flags & TRACE_ITER_RAW)
                return print_raw_fmt(iter);
 
-       if (iter->iter_flags & TRACE_FILE_LAT_FMT)
-               return print_lat_fmt(iter, iter->idx, iter->cpu);
-
        return print_trace_fmt(iter);
 }
 
@@ -2426,30 +1732,40 @@ static struct seq_operations tracer_seq_ops = {
 };
 
 static struct trace_iterator *
-__tracing_open(struct inode *inode, struct file *file, int *ret)
+__tracing_open(struct inode *inode, struct file *file)
 {
+       long cpu_file = (long) inode->i_private;
+       void *fail_ret = ERR_PTR(-ENOMEM);
        struct trace_iterator *iter;
        struct seq_file *m;
-       int cpu;
+       int cpu, ret;
 
-       if (tracing_disabled) {
-               *ret = -ENODEV;
-               return NULL;
-       }
+       if (tracing_disabled)
+               return ERR_PTR(-ENODEV);
 
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-       if (!iter) {
-               *ret = -ENOMEM;
-               goto out;
-       }
+       if (!iter)
+               return ERR_PTR(-ENOMEM);
 
+       /*
+        * We make a copy of the current tracer to avoid concurrent
+        * changes on it while we are reading.
+        */
        mutex_lock(&trace_types_lock);
+       iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
+       if (!iter->trace)
+               goto fail;
+
+       if (current_trace)
+               *iter->trace = *current_trace;
+
        if (current_trace && current_trace->print_max)
                iter->tr = &max_tr;
        else
-               iter->tr = inode->i_private;
-       iter->trace = current_trace;
+               iter->tr = &global_trace;
        iter->pos = -1;
+       mutex_init(&iter->mutex);
+       iter->cpu_file = cpu_file;
 
        /* Notify the tracer early; before we stop tracing. */
        if (iter->trace && iter->trace->open)
@@ -2459,20 +1775,30 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
        if (ring_buffer_overruns(iter->tr->buffer))
                iter->iter_flags |= TRACE_FILE_ANNOTATE;
 
+       if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
+               for_each_tracing_cpu(cpu) {
 
-       for_each_tracing_cpu(cpu) {
+                       iter->buffer_iter[cpu] =
+                               ring_buffer_read_start(iter->tr->buffer, cpu);
 
+                       if (!iter->buffer_iter[cpu])
+                               goto fail_buffer;
+               }
+       } else {
+               cpu = iter->cpu_file;
                iter->buffer_iter[cpu] =
-                       ring_buffer_read_start(iter->tr->buffer, cpu);
+                               ring_buffer_read_start(iter->tr->buffer, cpu);
 
                if (!iter->buffer_iter[cpu])
-                       goto fail_buffer;
+                       goto fail;
        }
 
        /* TODO stop tracer */
-       *ret = seq_open(file, &tracer_seq_ops);
-       if (*ret)
+       ret = seq_open(file, &tracer_seq_ops);
+       if (ret < 0) {
+               fail_ret = ERR_PTR(ret);
                goto fail_buffer;
+       }
 
        m = file->private_data;
        m->private = iter;
@@ -2482,7 +1808,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
 
        mutex_unlock(&trace_types_lock);
 
- out:
        return iter;
 
  fail_buffer:
@@ -2490,10 +1815,12 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
                if (iter->buffer_iter[cpu])
                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
        }
+ fail:
        mutex_unlock(&trace_types_lock);
+       kfree(iter->trace);
        kfree(iter);
 
-       return ERR_PTR(-ENOMEM);
+       return fail_ret;
 }
 
 int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -2505,7 +1832,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
        return 0;
 }
 
-int tracing_release(struct inode *inode, struct file *file)
+static int tracing_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
        struct trace_iterator *iter = m->private;
@@ -2525,35 +1852,28 @@ int tracing_release(struct inode *inode, struct file *file)
        mutex_unlock(&trace_types_lock);
 
        seq_release(inode, file);
+       mutex_destroy(&iter->mutex);
+       kfree(iter->trace);
        kfree(iter);
        return 0;
 }
 
 static int tracing_open(struct inode *inode, struct file *file)
 {
-       int ret;
+       struct trace_iterator *iter;
+       int ret = 0;
 
-       __tracing_open(inode, file, &ret);
+       iter = __tracing_open(inode, file);
+       if (IS_ERR(iter))
+               ret = PTR_ERR(iter);
+       else if (trace_flags & TRACE_ITER_LATENCY_FMT)
+               iter->iter_flags |= TRACE_FILE_LAT_FMT;
 
        return ret;
 }
 
-static int tracing_lt_open(struct inode *inode, struct file *file)
-{
-       struct trace_iterator *iter;
-       int ret;
-
-       iter = __tracing_open(inode, file, &ret);
-
-       if (!ret)
-               iter->iter_flags |= TRACE_FILE_LAT_FMT;
-
-       return ret;
-}
-
-
-static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
 {
        struct tracer *t = m->private;
 
@@ -2623,21 +1943,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
        return ret;
 }
 
-static struct file_operations tracing_fops = {
+static const struct file_operations tracing_fops = {
        .open           = tracing_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = tracing_release,
 };
 
-static struct file_operations tracing_lt_fops = {
-       .open           = tracing_lt_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = tracing_release,
-};
-
-static struct file_operations show_traces_fops = {
+static const struct file_operations show_traces_fops = {
        .open           = show_traces_open,
        .read           = seq_read,
        .release        = seq_release,
@@ -2730,7 +2043,7 @@ err_unlock:
        return err;
 }
 
-static struct file_operations tracing_cpumask_fops = {
+static const struct file_operations tracing_cpumask_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_cpumask_read,
        .write          = tracing_cpumask_write,
@@ -2740,57 +2053,62 @@ static ssize_t
 tracing_trace_options_read(struct file *filp, char __user *ubuf,
                       size_t cnt, loff_t *ppos)
 {
-       int i;
+       struct tracer_opt *trace_opts;
+       u32 tracer_flags;
+       int len = 0;
        char *buf;
        int r = 0;
-       int len = 0;
-       u32 tracer_flags = current_trace->flags->val;
-       struct tracer_opt *trace_opts = current_trace->flags->opts;
+       int i;
 
 
-       /* calulate max size */
+       /* calculate max size */
        for (i = 0; trace_options[i]; i++) {
                len += strlen(trace_options[i]);
-               len += 3; /* "no" and space */
+               len += 3; /* "no" and newline */
        }
 
+       mutex_lock(&trace_types_lock);
+       tracer_flags = current_trace->flags->val;
+       trace_opts = current_trace->flags->opts;
+
        /*
         * Increase the size with names of options specific
         * of the current tracer.
         */
        for (i = 0; trace_opts[i].name; i++) {
                len += strlen(trace_opts[i].name);
-               len += 3; /* "no" and space */
+               len += 3; /* "no" and newline */
        }
 
        /* +2 for \n and \0 */
        buf = kmalloc(len + 2, GFP_KERNEL);
-       if (!buf)
+       if (!buf) {
+               mutex_unlock(&trace_types_lock);
                return -ENOMEM;
+       }
 
        for (i = 0; trace_options[i]; i++) {
                if (trace_flags & (1 << i))
-                       r += sprintf(buf + r, "%s ", trace_options[i]);
+                       r += sprintf(buf + r, "%s\n", trace_options[i]);
                else
-                       r += sprintf(buf + r, "no%s ", trace_options[i]);
+                       r += sprintf(buf + r, "no%s\n", trace_options[i]);
        }
 
        for (i = 0; trace_opts[i].name; i++) {
                if (tracer_flags & trace_opts[i].bit)
-                       r += sprintf(buf + r, "%s ",
+                       r += sprintf(buf + r, "%s\n",
                                trace_opts[i].name);
                else
-                       r += sprintf(buf + r, "no%s ",
+                       r += sprintf(buf + r, "no%s\n",
                                trace_opts[i].name);
        }
+       mutex_unlock(&trace_types_lock);
 
-       r += sprintf(buf + r, "\n");
        WARN_ON(r >= len + 2);
 
        r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 
        kfree(buf);
-
        return r;
 }
 
@@ -2865,7 +2183,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 
        /* If no option could be set, test the specific tracer options */
        if (!trace_options[i]) {
+               mutex_lock(&trace_types_lock);
                ret = set_tracer_option(current_trace, cmp, neg);
+               mutex_unlock(&trace_types_lock);
                if (ret)
                        return ret;
        }
@@ -2875,7 +2195,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static struct file_operations tracing_iter_fops = {
+static const struct file_operations tracing_iter_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_trace_options_read,
        .write          = tracing_trace_options_write,
@@ -2908,7 +2228,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
                                        readme_msg, strlen(readme_msg));
 }
 
-static struct file_operations tracing_readme_fops = {
+static const struct file_operations tracing_readme_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_readme_read,
 };
@@ -2930,7 +2250,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
 {
        struct trace_array *tr = filp->private_data;
        char buf[64];
-       long val;
+       unsigned long val;
        int ret;
 
        if (cnt >= sizeof(buf))
@@ -2985,8 +2305,23 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
-static int tracing_set_tracer(char *buf)
+int tracer_init(struct tracer *t, struct trace_array *tr)
+{
+       tracing_reset_online_cpus(tr);
+       return t->init(tr);
+}
+
+struct trace_option_dentry;
+
+static struct trace_option_dentry *
+create_trace_option_files(struct tracer *tracer);
+
+static void
+destroy_trace_option_files(struct trace_option_dentry *topts);
+
+static int tracing_set_tracer(const char *buf)
 {
+       static struct trace_option_dentry *topts;
        struct trace_array *tr = &global_trace;
        struct tracer *t;
        int ret = 0;
@@ -3007,9 +2342,14 @@ static int tracing_set_tracer(char *buf)
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
 
+       destroy_trace_option_files(topts);
+
        current_trace = t;
+
+       topts = create_trace_option_files(current_trace);
+
        if (t->init) {
-               ret = t->init(tr);
+               ret = tracer_init(t, tr);
                if (ret)
                        goto out;
        }
@@ -3072,9 +2412,9 @@ static ssize_t
 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
                      size_t cnt, loff_t *ppos)
 {
-       long *ptr = filp->private_data;
+       unsigned long *ptr = filp->private_data;
        char buf[64];
-       long val;
+       unsigned long val;
        int ret;
 
        if (cnt >= sizeof(buf))
@@ -3094,54 +2434,96 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static atomic_t tracing_reader;
-
 static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
+       long cpu_file = (long) inode->i_private;
        struct trace_iterator *iter;
+       int ret = 0;
 
        if (tracing_disabled)
                return -ENODEV;
 
-       /* We only allow for reader of the pipe */
-       if (atomic_inc_return(&tracing_reader) != 1) {
-               atomic_dec(&tracing_reader);
-               return -EBUSY;
+       mutex_lock(&trace_types_lock);
+
+       /* We only allow one reader per cpu */
+       if (cpu_file == TRACE_PIPE_ALL_CPU) {
+               if (!cpumask_empty(tracing_reader_cpumask)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+               cpumask_setall(tracing_reader_cpumask);
+       } else {
+               if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
+                       cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
+               else {
+                       ret = -EBUSY;
+                       goto out;
+               }
        }
 
        /* create a buffer to store the information to pass to userspace */
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-       if (!iter)
-               return -ENOMEM;
+       if (!iter) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
-       if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
-               kfree(iter);
-               return -ENOMEM;
+       /*
+        * We make a copy of the current tracer to avoid concurrent
+        * changes on it while we are reading.
+        */
+       iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
+       if (!iter->trace) {
+               ret = -ENOMEM;
+               goto fail;
        }
+       if (current_trace)
+               *iter->trace = *current_trace;
 
-       mutex_lock(&trace_types_lock);
+       if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto fail;
+       }
 
        /* trace pipe does not show start of buffer */
        cpumask_setall(iter->started);
 
+       iter->cpu_file = cpu_file;
        iter->tr = &global_trace;
-       iter->trace = current_trace;
+       mutex_init(&iter->mutex);
        filp->private_data = iter;
 
        if (iter->trace->pipe_open)
                iter->trace->pipe_open(iter);
+
+out:
        mutex_unlock(&trace_types_lock);
+       return ret;
 
-       return 0;
+fail:
+       kfree(iter->trace);
+       kfree(iter);
+       mutex_unlock(&trace_types_lock);
+       return ret;
 }
 
 static int tracing_release_pipe(struct inode *inode, struct file *file)
 {
        struct trace_iterator *iter = file->private_data;
 
+       mutex_lock(&trace_types_lock);
+
+       if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
+               cpumask_clear(tracing_reader_cpumask);
+       else
+               cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
+
+       mutex_unlock(&trace_types_lock);
+
        free_cpumask_var(iter->started);
+       mutex_destroy(&iter->mutex);
+       kfree(iter->trace);
        kfree(iter);
-       atomic_dec(&tracing_reader);
 
        return 0;
 }
@@ -3167,67 +2549,57 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
        }
 }
 
-/*
- * Consumer reader.
- */
-static ssize_t
-tracing_read_pipe(struct file *filp, char __user *ubuf,
-                 size_t cnt, loff_t *ppos)
+
+void default_wait_pipe(struct trace_iterator *iter)
 {
-       struct trace_iterator *iter = filp->private_data;
-       ssize_t sret;
+       DEFINE_WAIT(wait);
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
+       prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
 
-       trace_seq_reset(&iter->seq);
+       if (trace_empty(iter))
+               schedule();
 
-       mutex_lock(&trace_types_lock);
-       if (iter->trace->read) {
-               sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
-               if (sret)
-                       goto out;
-       }
+       finish_wait(&trace_wait, &wait);
+}
+
+/*
+ * This is a make-shift waitqueue.
+ * A tracer might use this callback on some rare cases:
+ *
+ *  1) the current tracer might hold the runqueue lock when it wakes up
+ *     a reader, hence a deadlock (sched, function, and function graph tracers)
+ *  2) the function tracers, trace all functions, we don't want
+ *     the overhead of calling wake_up and friends
+ *     (and tracing them too)
+ *
+ *     Anyway, this is really very primitive wakeup.
+ */
+void poll_wait_pipe(struct trace_iterator *iter)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+       /* sleep for 100 msecs, and try again. */
+       schedule_timeout(HZ / 10);
+}
+
+/* Must be called with trace_types_lock mutex held. */
+static int tracing_wait_pipe(struct file *filp)
+{
+       struct trace_iterator *iter = filp->private_data;
 
-waitagain:
-       sret = 0;
        while (trace_empty(iter)) {
 
                if ((filp->f_flags & O_NONBLOCK)) {
-                       sret = -EAGAIN;
-                       goto out;
+                       return -EAGAIN;
                }
 
-               /*
-                * This is a make-shift waitqueue. The reason we don't use
-                * an actual wait queue is because:
-                *  1) we only ever have one waiter
-                *  2) the tracing, traces all functions, we don't want
-                *     the overhead of calling wake_up and friends
-                *     (and tracing them too)
-                *     Anyway, this is really very primitive wakeup.
-                */
-               set_current_state(TASK_INTERRUPTIBLE);
-               iter->tr->waiter = current;
-
-               mutex_unlock(&trace_types_lock);
-
-               /* sleep for 100 msecs, and try again. */
-               schedule_timeout(HZ/10);
+               mutex_unlock(&iter->mutex);
 
-               mutex_lock(&trace_types_lock);
-
-               iter->tr->waiter = NULL;
+               iter->trace->wait_pipe(iter);
 
-               if (signal_pending(current)) {
-                       sret = -EINTR;
-                       goto out;
-               }
+               mutex_lock(&iter->mutex);
 
-               if (iter->trace != current_trace)
-                       goto out;
+               if (signal_pending(current))
+                       return -EINTR;
 
                /*
                 * We block until we read something and tracing is disabled.
@@ -3240,13 +2612,59 @@ waitagain:
                 */
                if (!tracer_enabled && iter->pos)
                        break;
+       }
+
+       return 1;
+}
+
+/*
+ * Consumer reader.
+ */
+static ssize_t
+tracing_read_pipe(struct file *filp, char __user *ubuf,
+                 size_t cnt, loff_t *ppos)
+{
+       struct trace_iterator *iter = filp->private_data;
+       static struct tracer *old_tracer;
+       ssize_t sret;
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               return sret;
+
+       trace_seq_init(&iter->seq);
+
+       /* copy the tracer to avoid using a global lock all around */
+       mutex_lock(&trace_types_lock);
+       if (unlikely(old_tracer != current_trace && current_trace)) {
+               old_tracer = current_trace;
+               *iter->trace = *current_trace;
+       }
+       mutex_unlock(&trace_types_lock);
 
-               continue;
+       /*
+        * Avoid more than one consumer on a single file descriptor
+        * This is just a matter of traces coherency, the ring buffer itself
+        * is protected.
+        */
+       mutex_lock(&iter->mutex);
+       if (iter->trace->read) {
+               sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+               if (sret)
+                       goto out;
        }
 
+waitagain:
+       sret = tracing_wait_pipe(filp);
+       if (sret <= 0)
+               goto out;
+
        /* stop when tracing is finished */
-       if (trace_empty(iter))
+       if (trace_empty(iter)) {
+               sret = 0;
                goto out;
+       }
 
        if (cnt >= PAGE_SIZE)
                cnt = PAGE_SIZE - 1;
@@ -3267,8 +2685,8 @@ waitagain:
                        iter->seq.len = len;
                        break;
                }
-
-               trace_consume(iter);
+               if (ret != TRACE_TYPE_NO_CONSUME)
+                       trace_consume(iter);
 
                if (iter->seq.len >= cnt)
                        break;
@@ -3277,7 +2695,7 @@ waitagain:
        /* Now copy what we have to the user */
        sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
        if (iter->seq.readpos >= iter->seq.len)
-               trace_seq_reset(&iter->seq);
+               trace_seq_init(&iter->seq);
 
        /*
         * If there was nothing to send to user, inspite of consuming trace
@@ -3287,11 +2705,148 @@ waitagain:
                goto waitagain;
 
 out:
-       mutex_unlock(&trace_types_lock);
+       mutex_unlock(&iter->mutex);
 
        return sret;
 }
 
+static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
+                                    struct pipe_buffer *buf)
+{
+       __free_page(buf->page);
+}
+
+static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
+                                    unsigned int idx)
+{
+       __free_page(spd->pages[idx]);
+}
+
+static struct pipe_buf_operations tracing_pipe_buf_ops = {
+       .can_merge              = 0,
+       .map                    = generic_pipe_buf_map,
+       .unmap                  = generic_pipe_buf_unmap,
+       .confirm                = generic_pipe_buf_confirm,
+       .release                = tracing_pipe_buf_release,
+       .steal                  = generic_pipe_buf_steal,
+       .get                    = generic_pipe_buf_get,
+};
+
+static size_t
+tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
+{
+       size_t count;
+       int ret;
+
+       /* Seq buffer is page-sized, exactly what we need. */
+       for (;;) {
+               count = iter->seq.len;
+               ret = print_trace_line(iter);
+               count = iter->seq.len - count;
+               if (rem < count) {
+                       rem = 0;
+                       iter->seq.len -= count;
+                       break;
+               }
+               if (ret == TRACE_TYPE_PARTIAL_LINE) {
+                       iter->seq.len -= count;
+                       break;
+               }
+
+               trace_consume(iter);
+               rem -= count;
+               if (!find_next_entry_inc(iter)) {
+                       rem = 0;
+                       iter->ent = NULL;
+                       break;
+               }
+       }
+
+       return rem;
+}
+
+static ssize_t tracing_splice_read_pipe(struct file *filp,
+                                       loff_t *ppos,
+                                       struct pipe_inode_info *pipe,
+                                       size_t len,
+                                       unsigned int flags)
+{
+       struct page *pages[PIPE_BUFFERS];
+       struct partial_page partial[PIPE_BUFFERS];
+       struct trace_iterator *iter = filp->private_data;
+       struct splice_pipe_desc spd = {
+               .pages          = pages,
+               .partial        = partial,
+               .nr_pages       = 0, /* This gets updated below. */
+               .flags          = flags,
+               .ops            = &tracing_pipe_buf_ops,
+               .spd_release    = tracing_spd_release_pipe,
+       };
+       static struct tracer *old_tracer;
+       ssize_t ret;
+       size_t rem;
+       unsigned int i;
+
+       /* copy the tracer to avoid using a global lock all around */
+       mutex_lock(&trace_types_lock);
+       if (unlikely(old_tracer != current_trace && current_trace)) {
+               old_tracer = current_trace;
+               *iter->trace = *current_trace;
+       }
+       mutex_unlock(&trace_types_lock);
+
+       mutex_lock(&iter->mutex);
+
+       if (iter->trace->splice_read) {
+               ret = iter->trace->splice_read(iter, filp,
+                                              ppos, pipe, len, flags);
+               if (ret)
+                       goto out_err;
+       }
+
+       ret = tracing_wait_pipe(filp);
+       if (ret <= 0)
+               goto out_err;
+
+       if (!iter->ent && !find_next_entry_inc(iter)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Fill as many pages as possible. */
+       for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
+               pages[i] = alloc_page(GFP_KERNEL);
+               if (!pages[i])
+                       break;
+
+               rem = tracing_fill_pipe_page(rem, iter);
+
+               /* Copy the data into the page, so we can start over. */
+               ret = trace_seq_to_buffer(&iter->seq,
+                                         page_address(pages[i]),
+                                         iter->seq.len);
+               if (ret < 0) {
+                       __free_page(pages[i]);
+                       break;
+               }
+               partial[i].offset = 0;
+               partial[i].len = iter->seq.len;
+
+               trace_seq_init(&iter->seq);
+       }
+
+       mutex_unlock(&iter->mutex);
+
+       spd.nr_pages = i;
+
+       return splice_to_pipe(pipe, &spd);
+
+out_err:
+       mutex_unlock(&iter->mutex);
+
+       return ret;
+}
+
 static ssize_t
 tracing_entries_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
@@ -3433,78 +2988,324 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static struct file_operations tracing_max_lat_fops = {
+static const struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
        .write          = tracing_max_lat_write,
 };
 
-static struct file_operations tracing_ctrl_fops = {
+static const struct file_operations tracing_ctrl_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_ctrl_read,
        .write          = tracing_ctrl_write,
 };
 
-static struct file_operations set_tracer_fops = {
+static const struct file_operations set_tracer_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_set_trace_read,
        .write          = tracing_set_trace_write,
 };
 
-static struct file_operations tracing_pipe_fops = {
+static const struct file_operations tracing_pipe_fops = {
        .open           = tracing_open_pipe,
        .poll           = tracing_poll_pipe,
        .read           = tracing_read_pipe,
+       .splice_read    = tracing_splice_read_pipe,
        .release        = tracing_release_pipe,
 };
 
-static struct file_operations tracing_entries_fops = {
+static const struct file_operations tracing_entries_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_entries_read,
        .write          = tracing_entries_write,
 };
 
-static struct file_operations tracing_mark_fops = {
+static const struct file_operations tracing_mark_fops = {
        .open           = tracing_open_generic,
        .write          = tracing_mark_write,
 };
 
-#ifdef CONFIG_DYNAMIC_FTRACE
+struct ftrace_buffer_info {
+       struct trace_array      *tr;
+       void                    *spare;
+       int                     cpu;
+       unsigned int            read;
+};
 
-int __weak ftrace_arch_read_dyn_info(char *buf, int size)
+static int tracing_buffers_open(struct inode *inode, struct file *filp)
 {
+       int cpu = (int)(long)inode->i_private;
+       struct ftrace_buffer_info *info;
+
+       if (tracing_disabled)
+               return -ENODEV;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->tr        = &global_trace;
+       info->cpu       = cpu;
+       info->spare     = ring_buffer_alloc_read_page(info->tr->buffer);
+       /* Force reading ring buffer for first read */
+       info->read      = (unsigned int)-1;
+       if (!info->spare)
+               goto out;
+
+       filp->private_data = info;
+
        return 0;
+
+ out:
+       kfree(info);
+       return -ENOMEM;
 }
 
 static ssize_t
-tracing_read_dyn_info(struct file *filp, char __user *ubuf,
-                 size_t cnt, loff_t *ppos)
+tracing_buffers_read(struct file *filp, char __user *ubuf,
+                    size_t count, loff_t *ppos)
 {
-       static char ftrace_dyn_info_buffer[1024];
-       static DEFINE_MUTEX(dyn_info_mutex);
-       unsigned long *p = filp->private_data;
-       char *buf = ftrace_dyn_info_buffer;
-       int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
-       int r;
+       struct ftrace_buffer_info *info = filp->private_data;
+       unsigned int pos;
+       ssize_t ret;
+       size_t size;
 
-       mutex_lock(&dyn_info_mutex);
-       r = sprintf(buf, "%ld ", *p);
+       if (!count)
+               return 0;
 
-       r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
-       buf[r++] = '\n';
+       /* Do we have previous read data to read? */
+       if (info->read < PAGE_SIZE)
+               goto read;
 
-       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       info->read = 0;
 
-       mutex_unlock(&dyn_info_mutex);
+       ret = ring_buffer_read_page(info->tr->buffer,
+                                   &info->spare,
+                                   count,
+                                   info->cpu, 0);
+       if (ret < 0)
+               return 0;
 
-       return r;
-}
+       pos = ring_buffer_page_len(info->spare);
 
-static struct file_operations tracing_dyn_info_fops = {
-       .open           = tracing_open_generic,
-       .read           = tracing_read_dyn_info,
-};
-#endif
+       if (pos < PAGE_SIZE)
+               memset(info->spare + pos, 0, PAGE_SIZE - pos);
+
+read:
+       size = PAGE_SIZE - info->read;
+       if (size > count)
+               size = count;
+
+       ret = copy_to_user(ubuf, info->spare + info->read, size);
+       if (ret == size)
+               return -EFAULT;
+       size -= ret;
+
+       *ppos += size;
+       info->read += size;
+
+       return size;
+}
+
+static int tracing_buffers_release(struct inode *inode, struct file *file)
+{
+       struct ftrace_buffer_info *info = file->private_data;
+
+       ring_buffer_free_read_page(info->tr->buffer, info->spare);
+       kfree(info);
+
+       return 0;
+}
+
+struct buffer_ref {
+       struct ring_buffer      *buffer;
+       void                    *page;
+       int                     ref;
+};
+
+static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
+                                   struct pipe_buffer *buf)
+{
+       struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+       if (--ref->ref)
+               return;
+
+       ring_buffer_free_read_page(ref->buffer, ref->page);
+       kfree(ref);
+       buf->private = 0;
+}
+
+static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
+                                struct pipe_buffer *buf)
+{
+       return 1;
+}
+
+static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+                               struct pipe_buffer *buf)
+{
+       struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+       ref->ref++;
+}
+
+/* Pipe buffer operations for a buffer. */
+static struct pipe_buf_operations buffer_pipe_buf_ops = {
+       .can_merge              = 0,
+       .map                    = generic_pipe_buf_map,
+       .unmap                  = generic_pipe_buf_unmap,
+       .confirm                = generic_pipe_buf_confirm,
+       .release                = buffer_pipe_buf_release,
+       .steal                  = buffer_pipe_buf_steal,
+       .get                    = buffer_pipe_buf_get,
+};
+
+/*
+ * Callback from splice_to_pipe(), if we need to release some pages
+ * at the end of the spd in case we error'ed out in filling the pipe.
+ */
+static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+       struct buffer_ref *ref =
+               (struct buffer_ref *)spd->partial[i].private;
+
+       if (--ref->ref)
+               return;
+
+       ring_buffer_free_read_page(ref->buffer, ref->page);
+       kfree(ref);
+       spd->partial[i].private = 0;
+}
+
+static ssize_t
+tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+                           struct pipe_inode_info *pipe, size_t len,
+                           unsigned int flags)
+{
+       struct ftrace_buffer_info *info = file->private_data;
+       struct partial_page partial[PIPE_BUFFERS];
+       struct page *pages[PIPE_BUFFERS];
+       struct splice_pipe_desc spd = {
+               .pages          = pages,
+               .partial        = partial,
+               .flags          = flags,
+               .ops            = &buffer_pipe_buf_ops,
+               .spd_release    = buffer_spd_release,
+       };
+       struct buffer_ref *ref;
+       int size, i;
+       size_t ret;
+
+       /*
+        * We can't seek on a buffer input
+        */
+       if (unlikely(*ppos))
+               return -ESPIPE;
+
+
+       for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
+               struct page *page;
+               int r;
+
+               ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+               if (!ref)
+                       break;
+
+               ref->buffer = info->tr->buffer;
+               ref->page = ring_buffer_alloc_read_page(ref->buffer);
+               if (!ref->page) {
+                       kfree(ref);
+                       break;
+               }
+
+               r = ring_buffer_read_page(ref->buffer, &ref->page,
+                                         len, info->cpu, 0);
+               if (r < 0) {
+                       ring_buffer_free_read_page(ref->buffer,
+                                                  ref->page);
+                       kfree(ref);
+                       break;
+               }
+
+               /*
+                * zero out any left over data, this is going to
+                * user land.
+                */
+               size = ring_buffer_page_len(ref->page);
+               if (size < PAGE_SIZE)
+                       memset(ref->page + size, 0, PAGE_SIZE - size);
+
+               page = virt_to_page(ref->page);
+
+               spd.pages[i] = page;
+               spd.partial[i].len = PAGE_SIZE;
+               spd.partial[i].offset = 0;
+               spd.partial[i].private = (unsigned long)ref;
+               spd.nr_pages++;
+       }
+
+       spd.nr_pages = i;
+
+       /* did we read anything? */
+       if (!spd.nr_pages) {
+               if (flags & SPLICE_F_NONBLOCK)
+                       ret = -EAGAIN;
+               else
+                       ret = 0;
+               /* TODO: block */
+               return ret;
+       }
+
+       ret = splice_to_pipe(pipe, &spd);
+
+       return ret;
+}
+
+static const struct file_operations tracing_buffers_fops = {
+       .open           = tracing_buffers_open,
+       .read           = tracing_buffers_read,
+       .release        = tracing_buffers_release,
+       .splice_read    = tracing_buffers_splice_read,
+       .llseek         = no_llseek,
+};
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+int __weak ftrace_arch_read_dyn_info(char *buf, int size)
+{
+       return 0;
+}
+
+static ssize_t
+tracing_read_dyn_info(struct file *filp, char __user *ubuf,
+                 size_t cnt, loff_t *ppos)
+{
+       static char ftrace_dyn_info_buffer[1024];
+       static DEFINE_MUTEX(dyn_info_mutex);
+       unsigned long *p = filp->private_data;
+       char *buf = ftrace_dyn_info_buffer;
+       int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
+       int r;
+
+       mutex_lock(&dyn_info_mutex);
+       r = sprintf(buf, "%ld ", *p);
+
+       r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
+       buf[r++] = '\n';
+
+       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+
+       mutex_unlock(&dyn_info_mutex);
+
+       return r;
+}
+
+static const struct file_operations tracing_dyn_info_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_read_dyn_info,
+};
+#endif
 
 static struct dentry *d_tracer;
 
@@ -3526,15 +3327,346 @@ struct dentry *tracing_init_dentry(void)
        return d_tracer;
 }
 
+static struct dentry *d_percpu;
+
+struct dentry *tracing_dentry_percpu(void)
+{
+       static int once;
+       struct dentry *d_tracer;
+
+       if (d_percpu)
+               return d_percpu;
+
+       d_tracer = tracing_init_dentry();
+
+       if (!d_tracer)
+               return NULL;
+
+       d_percpu = debugfs_create_dir("per_cpu", d_tracer);
+
+       if (!d_percpu && !once) {
+               once = 1;
+               pr_warning("Could not create debugfs directory 'per_cpu'\n");
+               return NULL;
+       }
+
+       return d_percpu;
+}
+
+static void tracing_init_debugfs_percpu(long cpu)
+{
+       struct dentry *d_percpu = tracing_dentry_percpu();
+       struct dentry *entry, *d_cpu;
+       /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
+       char cpu_dir[7];
+
+       if (cpu > 999 || cpu < 0)
+               return;
+
+       sprintf(cpu_dir, "cpu%ld", cpu);
+       d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
+       if (!d_cpu) {
+               pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
+               return;
+       }
+
+       /* per cpu trace_pipe */
+       entry = debugfs_create_file("trace_pipe", 0444, d_cpu,
+                               (void *) cpu, &tracing_pipe_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs 'trace_pipe' entry\n");
+
+       /* per cpu trace */
+       entry = debugfs_create_file("trace", 0444, d_cpu,
+                               (void *) cpu, &tracing_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs 'trace' entry\n");
+}
+
 #ifdef CONFIG_FTRACE_SELFTEST
 /* Let selftest have access to static functions in this file */
 #include "trace_selftest.c"
 #endif
 
+struct trace_option_dentry {
+       struct tracer_opt               *opt;
+       struct tracer_flags             *flags;
+       struct dentry                   *entry;
+};
+
+static ssize_t
+trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
+                       loff_t *ppos)
+{
+       struct trace_option_dentry *topt = filp->private_data;
+       char *buf;
+
+       if (topt->flags->val & topt->opt->bit)
+               buf = "1\n";
+       else
+               buf = "0\n";
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                        loff_t *ppos)
+{
+       struct trace_option_dentry *topt = filp->private_data;
+       unsigned long val;
+       char buf[64];
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       ret = 0;
+       switch (val) {
+       case 0:
+               /* do nothing if already cleared */
+               if (!(topt->flags->val & topt->opt->bit))
+                       break;
+
+               mutex_lock(&trace_types_lock);
+               if (current_trace->set_flag)
+                       ret = current_trace->set_flag(topt->flags->val,
+                                                     topt->opt->bit, 0);
+               mutex_unlock(&trace_types_lock);
+               if (ret)
+                       return ret;
+               topt->flags->val &= ~topt->opt->bit;
+               break;
+       case 1:
+               /* do nothing if already set */
+               if (topt->flags->val & topt->opt->bit)
+                       break;
+
+               mutex_lock(&trace_types_lock);
+               if (current_trace->set_flag)
+                       ret = current_trace->set_flag(topt->flags->val,
+                                                     topt->opt->bit, 1);
+               mutex_unlock(&trace_types_lock);
+               if (ret)
+                       return ret;
+               topt->flags->val |= topt->opt->bit;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       *ppos += cnt;
+
+       return cnt;
+}
+
+
+static const struct file_operations trace_options_fops = {
+       .open = tracing_open_generic,
+       .read = trace_options_read,
+       .write = trace_options_write,
+};
+
+static ssize_t
+trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
+                       loff_t *ppos)
+{
+       long index = (long)filp->private_data;
+       char *buf;
+
+       if (trace_flags & (1 << index))
+               buf = "1\n";
+       else
+               buf = "0\n";
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                        loff_t *ppos)
+{
+       long index = (long)filp->private_data;
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       switch (val) {
+       case 0:
+               trace_flags &= ~(1 << index);
+               break;
+       case 1:
+               trace_flags |= 1 << index;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       *ppos += cnt;
+
+       return cnt;
+}
+
+static const struct file_operations trace_options_core_fops = {
+       .open = tracing_open_generic,
+       .read = trace_options_core_read,
+       .write = trace_options_core_write,
+};
+
+static struct dentry *trace_options_init_dentry(void)
+{
+       struct dentry *d_tracer;
+       static struct dentry *t_options;
+
+       if (t_options)
+               return t_options;
+
+       d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return NULL;
+
+       t_options = debugfs_create_dir("options", d_tracer);
+       if (!t_options) {
+               pr_warning("Could not create debugfs directory 'options'\n");
+               return NULL;
+       }
+
+       return t_options;
+}
+
+static void
+create_trace_option_file(struct trace_option_dentry *topt,
+                        struct tracer_flags *flags,
+                        struct tracer_opt *opt)
+{
+       struct dentry *t_options;
+       struct dentry *entry;
+
+       t_options = trace_options_init_dentry();
+       if (!t_options)
+               return;
+
+       topt->flags = flags;
+       topt->opt = opt;
+
+       entry = debugfs_create_file(opt->name, 0644, t_options, topt,
+                                   &trace_options_fops);
+
+       topt->entry = entry;
+
+}
+
+static struct trace_option_dentry *
+create_trace_option_files(struct tracer *tracer)
+{
+       struct trace_option_dentry *topts;
+       struct tracer_flags *flags;
+       struct tracer_opt *opts;
+       int cnt;
+
+       if (!tracer)
+               return NULL;
+
+       flags = tracer->flags;
+
+       if (!flags || !flags->opts)
+               return NULL;
+
+       opts = flags->opts;
+
+       for (cnt = 0; opts[cnt].name; cnt++)
+               ;
+
+       topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
+       if (!topts)
+               return NULL;
+
+       for (cnt = 0; opts[cnt].name; cnt++)
+               create_trace_option_file(&topts[cnt], flags,
+                                        &opts[cnt]);
+
+       return topts;
+}
+
+static void
+destroy_trace_option_files(struct trace_option_dentry *topts)
+{
+       int cnt;
+
+       if (!topts)
+               return;
+
+       for (cnt = 0; topts[cnt].opt; cnt++) {
+               if (topts[cnt].entry)
+                       debugfs_remove(topts[cnt].entry);
+       }
+
+       kfree(topts);
+}
+
+static struct dentry *
+create_trace_option_core_file(const char *option, long index)
+{
+       struct dentry *t_options;
+       struct dentry *entry;
+
+       t_options = trace_options_init_dentry();
+       if (!t_options)
+               return NULL;
+
+       entry = debugfs_create_file(option, 0644, t_options, (void *)index,
+                                   &trace_options_core_fops);
+
+       return entry;
+}
+
+static __init void create_trace_options_dir(void)
+{
+       struct dentry *t_options;
+       struct dentry *entry;
+       int i;
+
+       t_options = trace_options_init_dentry();
+       if (!t_options)
+               return;
+
+       for (i = 0; trace_options[i]; i++) {
+               entry = create_trace_option_core_file(trace_options[i], i);
+               if (!entry)
+                       pr_warning("Could not create debugfs %s entry\n",
+                                  trace_options[i]);
+       }
+}
+
 static __init int tracer_init_debugfs(void)
 {
        struct dentry *d_tracer;
+       struct dentry *buffers;
        struct dentry *entry;
+       int cpu;
 
        d_tracer = tracing_init_dentry();
 
@@ -3548,18 +3680,15 @@ static __init int tracer_init_debugfs(void)
        if (!entry)
                pr_warning("Could not create debugfs 'trace_options' entry\n");
 
+       create_trace_options_dir();
+
        entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
                                    NULL, &tracing_cpumask_fops);
        if (!entry)
                pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
 
-       entry = debugfs_create_file("latency_trace", 0444, d_tracer,
-                                   &global_trace, &tracing_lt_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs 'latency_trace' entry\n");
-
        entry = debugfs_create_file("trace", 0444, d_tracer,
-                                   &global_trace, &tracing_fops);
+                                (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
        if (!entry)
                pr_warning("Could not create debugfs 'trace' entry\n");
 
@@ -3590,8 +3719,8 @@ static __init int tracer_init_debugfs(void)
        if (!entry)
                pr_warning("Could not create debugfs 'README' entry\n");
 
-       entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
-                                   NULL, &tracing_pipe_fops);
+       entry = debugfs_create_file("trace_pipe", 0444, d_tracer,
+                       (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
                           "'trace_pipe' entry\n");
@@ -3608,6 +3737,26 @@ static __init int tracer_init_debugfs(void)
                pr_warning("Could not create debugfs "
                           "'trace_marker' entry\n");
 
+       buffers = debugfs_create_dir("binary_buffers", d_tracer);
+
+       if (!buffers)
+               pr_warning("Could not create buffers directory\n");
+       else {
+               int cpu;
+               char buf[64];
+
+               for_each_tracing_cpu(cpu) {
+                       sprintf(buf, "%d", cpu);
+
+                       entry = debugfs_create_file(buf, 0444, buffers,
+                                                   (void *)(long)cpu,
+                                                   &tracing_buffers_fops);
+                       if (!entry)
+                               pr_warning("Could not create debugfs buffers "
+                                          "'%s' entry\n", buf);
+               }
+       }
+
 #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
@@ -3619,77 +3768,12 @@ static __init int tracer_init_debugfs(void)
 #ifdef CONFIG_SYSPROF_TRACER
        init_tracer_sysprof_debugfs(d_tracer);
 #endif
-       return 0;
-}
-
-int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
-{
-       static DEFINE_SPINLOCK(trace_buf_lock);
-       static char trace_buf[TRACE_BUF_SIZE];
-
-       struct ring_buffer_event *event;
-       struct trace_array *tr = &global_trace;
-       struct trace_array_cpu *data;
-       int cpu, len = 0, size, pc;
-       struct print_entry *entry;
-       unsigned long irq_flags;
-
-       if (tracing_disabled || tracing_selftest_running)
-               return 0;
 
-       pc = preempt_count();
-       preempt_disable_notrace();
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-
-       if (unlikely(atomic_read(&data->disabled)))
-               goto out;
-
-       pause_graph_tracing();
-       spin_lock_irqsave(&trace_buf_lock, irq_flags);
-       len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
-
-       len = min(len, TRACE_BUF_SIZE-1);
-       trace_buf[len] = 0;
-
-       size = sizeof(*entry) + len + 1;
-       event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
-       if (!event)
-               goto out_unlock;
-       entry = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, irq_flags, pc);
-       entry->ent.type                 = TRACE_PRINT;
-       entry->ip                       = ip;
-       entry->depth                    = depth;
-
-       memcpy(&entry->buf, trace_buf, len);
-       entry->buf[len] = 0;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- out_unlock:
-       spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
-       unpause_graph_tracing();
- out:
-       preempt_enable_notrace();
-
-       return len;
-}
-EXPORT_SYMBOL_GPL(trace_vprintk);
-
-int __ftrace_printk(unsigned long ip, const char *fmt, ...)
-{
-       int ret;
-       va_list ap;
-
-       if (!(trace_flags & TRACE_ITER_PRINTK))
-               return 0;
+       for_each_tracing_cpu(cpu)
+               tracing_init_debugfs_percpu(cpu);
 
-       va_start(ap, fmt);
-       ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
-       va_end(ap);
-       return ret;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(__ftrace_printk);
 
 static int trace_panic_handler(struct notifier_block *this,
                               unsigned long event, void *unused)
@@ -3750,7 +3834,7 @@ trace_printk_seq(struct trace_seq *s)
 
        printk(KERN_TRACE "%s", s->buffer);
 
-       trace_seq_reset(s);
+       trace_seq_init(s);
 }
 
 void ftrace_dump(void)
@@ -3782,8 +3866,10 @@ void ftrace_dump(void)
 
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
 
+       /* Simulate the iterator */
        iter.tr = &global_trace;
        iter.trace = current_trace;
+       iter.cpu_file = TRACE_PIPE_ALL_CPU;
 
        /*
         * We need to stop all tracing on all CPUS to read the
@@ -3835,8 +3921,12 @@ __init static int tracer_alloc_buffers(void)
        if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
                goto out_free_buffer_mask;
 
+       if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+               goto out_free_tracing_cpumask;
+
        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
        cpumask_copy(tracing_cpumask, cpu_all_mask);
+       cpumask_clear(tracing_reader_cpumask);
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(trace_buf_size,
@@ -3871,14 +3961,10 @@ __init static int tracer_alloc_buffers(void)
        trace_init_cmdlines();
 
        register_tracer(&nop_trace);
+       current_trace = &nop_trace;
 #ifdef CONFIG_BOOT_TRACER
        register_tracer(&boot_tracer);
-       current_trace = &boot_tracer;
-       current_trace->init(&global_trace);
-#else
-       current_trace = &nop_trace;
 #endif
-
        /* All seems OK, enable tracing */
        tracing_disabled = 0;
 
@@ -3889,11 +3975,34 @@ __init static int tracer_alloc_buffers(void)
        ret = 0;
 
 out_free_cpumask:
+       free_cpumask_var(tracing_reader_cpumask);
+out_free_tracing_cpumask:
        free_cpumask_var(tracing_cpumask);
 out_free_buffer_mask:
        free_cpumask_var(tracing_buffer_mask);
 out:
        return ret;
 }
+
+__init static int clear_boot_tracer(void)
+{
+       /*
+        * The default tracer at boot buffer is an init section.
+        * This function is called in lateinit. If we did not
+        * find the boot tracer, then clear it out, to prevent
+        * later registration from accessing the buffer that is
+        * about to be freed.
+        */
+       if (!default_bootup_tracer)
+               return 0;
+
+       printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
+              default_bootup_tracer);
+       default_bootup_tracer = NULL;
+
+       return 0;
+}
+
 early_initcall(tracer_alloc_buffers);
 fs_initcall(tracer_init_debugfs);
+late_initcall(clear_boot_tracer);
index 4d3d381..c5e1d88 100644 (file)
@@ -9,6 +9,8 @@
 #include <linux/mmiotrace.h>
 #include <linux/ftrace.h>
 #include <trace/boot.h>
+#include <trace/kmemtrace.h>
+#include <trace/power.h>
 
 enum trace_type {
        __TRACE_FIRST_TYPE = 0,
@@ -16,7 +18,6 @@ enum trace_type {
        TRACE_FN,
        TRACE_CTX,
        TRACE_WAKE,
-       TRACE_CONT,
        TRACE_STACK,
        TRACE_PRINT,
        TRACE_SPECIAL,
@@ -29,9 +30,12 @@ enum trace_type {
        TRACE_GRAPH_ENT,
        TRACE_USER_STACK,
        TRACE_HW_BRANCHES,
+       TRACE_KMEM_ALLOC,
+       TRACE_KMEM_FREE,
        TRACE_POWER,
+       TRACE_BLK,
 
-       __TRACE_LAST_TYPE
+       __TRACE_LAST_TYPE,
 };
 
 /*
@@ -42,7 +46,6 @@ enum trace_type {
  */
 struct trace_entry {
        unsigned char           type;
-       unsigned char           cpu;
        unsigned char           flags;
        unsigned char           preempt_count;
        int                     pid;
@@ -60,13 +63,13 @@ struct ftrace_entry {
 
 /* Function call entry */
 struct ftrace_graph_ent_entry {
-       struct trace_entry                      ent;
+       struct trace_entry              ent;
        struct ftrace_graph_ent         graph_ent;
 };
 
 /* Function return entry */
 struct ftrace_graph_ret_entry {
-       struct trace_entry                      ent;
+       struct trace_entry              ent;
        struct ftrace_graph_ret         ret;
 };
 extern struct tracer boot_tracer;
@@ -112,13 +115,14 @@ struct userstack_entry {
 };
 
 /*
- * ftrace_printk entry:
+ * trace_printk entry:
  */
 struct print_entry {
        struct trace_entry      ent;
        unsigned long           ip;
        int                     depth;
-       char                    buf[];
+       const char              *fmt;
+       u32                     buf[];
 };
 
 #define TRACE_OLD_SIZE         88
@@ -170,15 +174,32 @@ struct trace_power {
        struct power_trace      state_data;
 };
 
+struct kmemtrace_alloc_entry {
+       struct trace_entry      ent;
+       enum kmemtrace_type_id type_id;
+       unsigned long call_site;
+       const void *ptr;
+       size_t bytes_req;
+       size_t bytes_alloc;
+       gfp_t gfp_flags;
+       int node;
+};
+
+struct kmemtrace_free_entry {
+       struct trace_entry      ent;
+       enum kmemtrace_type_id type_id;
+       unsigned long call_site;
+       const void *ptr;
+};
+
 /*
  * trace_flag_type is an enumeration that holds different
  * states when a trace occurs. These are:
  *  IRQS_OFF           - interrupts were disabled
- *  IRQS_NOSUPPORT     - arch does not support irqs_disabled_flags
+ *  IRQS_NOSUPPORT     - arch does not support irqs_disabled_flags
  *  NEED_RESCED                - reschedule is requested
  *  HARDIRQ            - inside an interrupt handler
  *  SOFTIRQ            - inside a softirq handler
- *  CONT               - multiple entries hold the trace item
  */
 enum trace_flag_type {
        TRACE_FLAG_IRQS_OFF             = 0x01,
@@ -186,7 +207,6 @@ enum trace_flag_type {
        TRACE_FLAG_NEED_RESCHED         = 0x04,
        TRACE_FLAG_HARDIRQ              = 0x08,
        TRACE_FLAG_SOFTIRQ              = 0x10,
-       TRACE_FLAG_CONT                 = 0x20,
 };
 
 #define TRACE_BUF_SIZE         1024
@@ -198,6 +218,7 @@ enum trace_flag_type {
  */
 struct trace_array_cpu {
        atomic_t                disabled;
+       void                    *buffer_page;   /* ring buffer spare */
 
        /* these fields get copied into max-trace: */
        unsigned long           trace_idx;
@@ -262,7 +283,6 @@ extern void __ftrace_bad_type(void);
        do {                                                            \
                IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);     \
                IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);        \
-               IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
                IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);   \
                IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
                IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);   \
@@ -279,7 +299,11 @@ extern void __ftrace_bad_type(void);
                IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
                          TRACE_GRAPH_RET);             \
                IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
-               IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+               IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+               IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry,       \
+                         TRACE_KMEM_ALLOC);    \
+               IF_ASSIGN(var, ent, struct kmemtrace_free_entry,        \
+                         TRACE_KMEM_FREE);     \
                __ftrace_bad_type();                                    \
        } while (0)
 
@@ -287,7 +311,8 @@ extern void __ftrace_bad_type(void);
 enum print_line_t {
        TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
        TRACE_TYPE_HANDLED      = 1,
-       TRACE_TYPE_UNHANDLED    = 2     /* Relay to other output functions */
+       TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
+       TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
 };
 
 
@@ -297,8 +322,8 @@ enum print_line_t {
  * flags value in struct tracer_flags.
  */
 struct tracer_opt {
-       const char      *name; /* Will appear on the trace_options file */
-       u32             bit; /* Mask assigned in val field in tracer_flags */
+       const char      *name; /* Will appear on the trace_options file */
+       u32             bit; /* Mask assigned in val field in tracer_flags */
 };
 
 /*
@@ -307,28 +332,51 @@ struct tracer_opt {
  */
 struct tracer_flags {
        u32                     val;
-       struct tracer_opt       *opts;
+       struct tracer_opt       *opts;
 };
 
 /* Makes more easy to define a tracer opt */
 #define TRACER_OPT(s, b)       .name = #s, .bit = b
 
-/*
- * A specific tracer, represented by methods that operate on a trace array:
+
+/**
+ * struct tracer - a specific tracer and its callbacks to interact with debugfs
+ * @name: the name chosen to select it on the available_tracers file
+ * @init: called when one switches to this tracer (echo name > current_tracer)
+ * @reset: called when one switches to another tracer
+ * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
+ * @stop: called when tracing is paused (echo 0 > tracing_enabled)
+ * @open: called when the trace file is opened
+ * @pipe_open: called when the trace_pipe file is opened
+ * @wait_pipe: override how the user waits for traces on trace_pipe
+ * @close: called when the trace file is released
+ * @read: override the default read callback on trace_pipe
+ * @splice_read: override the default splice_read callback on trace_pipe
+ * @selftest: selftest to run on boot (see trace_selftest.c)
+ * @print_headers: override the first lines that describe your columns
+ * @print_line: callback that prints a trace
+ * @set_flag: signals one of your private flags changed (trace_options file)
+ * @flags: your private flags
  */
 struct tracer {
        const char              *name;
-       /* Your tracer should raise a warning if init fails */
        int                     (*init)(struct trace_array *tr);
        void                    (*reset)(struct trace_array *tr);
        void                    (*start)(struct trace_array *tr);
        void                    (*stop)(struct trace_array *tr);
        void                    (*open)(struct trace_iterator *iter);
        void                    (*pipe_open)(struct trace_iterator *iter);
+       void                    (*wait_pipe)(struct trace_iterator *iter);
        void                    (*close)(struct trace_iterator *iter);
        ssize_t                 (*read)(struct trace_iterator *iter,
                                        struct file *filp, char __user *ubuf,
                                        size_t cnt, loff_t *ppos);
+       ssize_t                 (*splice_read)(struct trace_iterator *iter,
+                                              struct file *filp,
+                                              loff_t *ppos,
+                                              struct pipe_inode_info *pipe,
+                                              size_t len,
+                                              unsigned int flags);
 #ifdef CONFIG_FTRACE_STARTUP_TEST
        int                     (*selftest)(struct tracer *trace,
                                            struct trace_array *tr);
@@ -339,7 +387,8 @@ struct tracer {
        int                     (*set_flag)(u32 old_flags, u32 bit, int set);
        struct tracer           *next;
        int                     print_max;
-       struct tracer_flags     *flags;
+       struct tracer_flags     *flags;
+       struct tracer_stat      *stats;
 };
 
 struct trace_seq {
@@ -348,6 +397,16 @@ struct trace_seq {
        unsigned int            readpos;
 };
 
+static inline void
+trace_seq_init(struct trace_seq *s)
+{
+       s->len = 0;
+       s->readpos = 0;
+}
+
+
+#define TRACE_PIPE_ALL_CPU     -1
+
 /*
  * Trace iterator - used by printout routines who present trace
  * results to users and which routines might sleep, etc:
@@ -356,6 +415,8 @@ struct trace_iterator {
        struct trace_array      *tr;
        struct tracer           *trace;
        void                    *private;
+       int                     cpu_file;
+       struct mutex            mutex;
        struct ring_buffer_iter *buffer_iter[NR_CPUS];
 
        /* The below is zeroed out in pipe_read */
@@ -371,6 +432,7 @@ struct trace_iterator {
        cpumask_var_t           started;
 };
 
+int tracer_init(struct tracer *t, struct trace_array *tr);
 int tracing_is_enabled(void);
 void trace_wake_up(void);
 void tracing_reset(struct trace_array *tr, int cpu);
@@ -379,26 +441,48 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
 struct dentry *tracing_init_dentry(void);
 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
 
+struct ring_buffer_event;
+
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+                                                   unsigned char type,
+                                                   unsigned long len,
+                                                   unsigned long flags,
+                                                   int pc);
+void trace_buffer_unlock_commit(struct trace_array *tr,
+                               struct ring_buffer_event *event,
+                               unsigned long flags, int pc);
+
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
+                                 unsigned long flags, int pc);
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+                                       unsigned long flags, int pc);
+
 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
                                                struct trace_array_cpu *data);
+
+struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
+                                         int *ent_cpu, u64 *ent_ts);
+
 void tracing_generic_entry_update(struct trace_entry *entry,
                                  unsigned long flags,
                                  int pc);
 
+void default_wait_pipe(struct trace_iterator *iter);
+void poll_wait_pipe(struct trace_iterator *iter);
+
 void ftrace(struct trace_array *tr,
                            struct trace_array_cpu *data,
                            unsigned long ip,
                            unsigned long parent_ip,
                            unsigned long flags, int pc);
 void tracing_sched_switch_trace(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct task_struct *prev,
                                struct task_struct *next,
                                unsigned long flags, int pc);
 void tracing_record_cmdline(struct task_struct *tsk);
 
 void tracing_sched_wakeup_trace(struct trace_array *tr,
-                               struct trace_array_cpu *data,
                                struct task_struct *wakee,
                                struct task_struct *cur,
                                unsigned long flags, int pc);
@@ -408,14 +492,12 @@ void trace_special(struct trace_array *tr,
                   unsigned long arg2,
                   unsigned long arg3, int pc);
 void trace_function(struct trace_array *tr,
-                   struct trace_array_cpu *data,
                    unsigned long ip,
                    unsigned long parent_ip,
                    unsigned long flags, int pc);
 
 void trace_graph_return(struct ftrace_graph_ret *trace);
 int trace_graph_entry(struct ftrace_graph_ent *trace);
-void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
 
 void tracing_start_cmdline_record(void);
 void tracing_stop_cmdline_record(void);
@@ -434,15 +516,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 void update_max_tr_single(struct trace_array *tr,
                          struct task_struct *tsk, int cpu);
 
-extern cycle_t ftrace_now(int cpu);
+void __trace_stack(struct trace_array *tr,
+                  unsigned long flags,
+                  int skip, int pc);
 
-#ifdef CONFIG_FUNCTION_TRACER
-void tracing_start_function_trace(void);
-void tracing_stop_function_trace(void);
-#else
-# define tracing_start_function_trace()                do { } while (0)
-# define tracing_stop_function_trace()         do { } while (0)
-#endif
+extern cycle_t ftrace_now(int cpu);
 
 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
 typedef void
@@ -456,10 +534,10 @@ struct tracer_switch_ops {
        void                            *private;
        struct tracer_switch_ops        *next;
 };
-
-char *trace_find_cmdline(int pid);
 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
 
+extern char *trace_find_cmdline(int pid);
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
@@ -469,6 +547,8 @@ extern int DYN_FTRACE_TEST_NAME(void);
 #ifdef CONFIG_FTRACE_STARTUP_TEST
 extern int trace_selftest_startup_function(struct tracer *trace,
                                           struct trace_array *tr);
+extern int trace_selftest_startup_function_graph(struct tracer *trace,
+                                                struct trace_array *tr);
 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
                                          struct trace_array *tr);
 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
@@ -488,15 +568,6 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
 #endif /* CONFIG_FTRACE_STARTUP_TEST */
 
 extern void *head_page(struct trace_array_cpu *data);
-extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
-extern void trace_seq_print_cont(struct trace_seq *s,
-                                struct trace_iterator *iter);
-
-extern int
-seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
-               unsigned long sym_flags);
-extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
-                                size_t cnt);
 extern long ns2usecs(cycle_t nsec);
 extern int
 trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
@@ -580,7 +651,9 @@ enum trace_iterator_flags {
        TRACE_ITER_ANNOTATE             = 0x2000,
        TRACE_ITER_USERSTACKTRACE       = 0x4000,
        TRACE_ITER_SYM_USEROBJ          = 0x8000,
-       TRACE_ITER_PRINTK_MSGONLY       = 0x10000
+       TRACE_ITER_PRINTK_MSGONLY       = 0x10000,
+       TRACE_ITER_CONTEXT_INFO         = 0x20000, /* Print pid/cpu/time */
+       TRACE_ITER_LATENCY_FMT          = 0x40000,
 };
 
 /*
@@ -601,12 +674,12 @@ extern struct tracer nop_trace;
  * preempt_enable (after a disable), a schedule might take place
  * causing an infinite recursion.
  *
- * To prevent this, we read the need_recshed flag before
+ * To prevent this, we read the need_resched flag before
  * disabling preemption. When we want to enable preemption we
  * check the flag, if it is set, then we call preempt_enable_no_resched.
  * Otherwise, we call preempt_enable.
  *
- * The rational for doing the above is that if need resched is set
+ * The rational for doing the above is that if need_resched is set
  * and we have yet to reschedule, we are either in an atomic location
  * (where we do not need to check for scheduling) or we are inside
  * the scheduler and do not want to resched.
@@ -627,7 +700,7 @@ static inline int ftrace_preempt_disable(void)
  *
  * This is a scheduler safe way to enable preemption and not miss
  * any preemption checks. The disabled saved the state of preemption.
- * If resched is set, then we were either inside an atomic or
+ * If resched is set, then we are either inside an atomic or
  * are inside the scheduler (we would have already scheduled
  * otherwise). In this case, we do not want to call normal
  * preempt_enable, but preempt_enable_no_resched instead.
@@ -664,4 +737,26 @@ static inline void trace_branch_disable(void)
 }
 #endif /* CONFIG_BRANCH_TRACER */
 
+/* trace event type bit fields, not numeric */
+enum {
+       TRACE_EVENT_TYPE_PRINTF         = 1,
+       TRACE_EVENT_TYPE_RAW            = 2,
+};
+
+struct ftrace_event_call {
+       char            *name;
+       char            *system;
+       struct dentry   *dir;
+       int             enabled;
+       int             (*regfunc)(void);
+       void            (*unregfunc)(void);
+       int             id;
+       int             (*raw_init)(void);
+       int             (*show_format)(struct trace_seq *s);
+};
+
+void event_trace_printk(unsigned long ip, const char *fmt, ...);
+extern struct ftrace_event_call __start_ftrace_events[];
+extern struct ftrace_event_call __stop_ftrace_events[];
+
 #endif /* _LINUX_KERNEL_TRACE_H */
index 366c8c3..7a30fc4 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kallsyms.h>
 
 #include "trace.h"
+#include "trace_output.h"
 
 static struct trace_array *boot_trace;
 static bool pre_initcalls_finished;
@@ -27,13 +28,13 @@ void start_boot_trace(void)
 
 void enable_boot_trace(void)
 {
-       if (pre_initcalls_finished)
+       if (boot_trace && pre_initcalls_finished)
                tracing_start_sched_switch_record();
 }
 
 void disable_boot_trace(void)
 {
-       if (pre_initcalls_finished)
+       if (boot_trace && pre_initcalls_finished)
                tracing_stop_sched_switch_record();
 }
 
@@ -42,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr)
        int cpu;
        boot_trace = tr;
 
+       if (!tr)
+               return 0;
+
        for_each_cpu(cpu, cpu_possible_mask)
                tracing_reset(tr, cpu);
 
@@ -128,10 +132,9 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
 {
        struct ring_buffer_event *event;
        struct trace_boot_call *entry;
-       unsigned long irq_flags;
        struct trace_array *tr = boot_trace;
 
-       if (!pre_initcalls_finished)
+       if (!tr || !pre_initcalls_finished)
                return;
 
        /* Get its name now since this function could
@@ -140,18 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
        sprint_symbol(bt->func, (unsigned long)fn);
        preempt_disable();
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
+                                         sizeof(*entry), 0, 0);
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, 0);
-       entry->ent.type = TRACE_BOOT_CALL;
        entry->boot_call = *bt;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
-       trace_wake_up();
-
+       trace_buffer_unlock_commit(tr, event, 0, 0);
  out:
        preempt_enable();
 }
@@ -160,27 +158,21 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
 {
        struct ring_buffer_event *event;
        struct trace_boot_ret *entry;
-       unsigned long irq_flags;
        struct trace_array *tr = boot_trace;
 
-       if (!pre_initcalls_finished)
+       if (!tr || !pre_initcalls_finished)
                return;
 
        sprint_symbol(bt->func, (unsigned long)fn);
        preempt_disable();
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
+                                         sizeof(*entry), 0, 0);
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, 0);
-       entry->ent.type = TRACE_BOOT_RET;
        entry->boot_ret = *bt;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
-       trace_wake_up();
-
+       trace_buffer_unlock_commit(tr, event, 0, 0);
  out:
        preempt_enable();
 }
index 6c00feb..aaa0755 100644 (file)
 #include <linux/hash.h>
 #include <linux/fs.h>
 #include <asm/local.h>
+
 #include "trace.h"
+#include "trace_stat.h"
+#include "trace_output.h"
 
 #ifdef CONFIG_BRANCH_TRACER
 
+static struct tracer branch_trace;
 static int branch_tracing_enabled __read_mostly;
 static DEFINE_MUTEX(branch_tracing_mutex);
+
 static struct trace_array *branch_tracer;
 
 static void
@@ -28,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        struct trace_array *tr = branch_tracer;
        struct ring_buffer_event *event;
        struct trace_branch *entry;
-       unsigned long flags, irq_flags;
+       unsigned long flags;
        int cpu, pc;
        const char *p;
 
@@ -47,15 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
                goto out;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       pc = preempt_count();
+       event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
+                                         sizeof(*entry), flags, pc);
        if (!event)
                goto out;
 
-       pc = preempt_count();
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type         = TRACE_BRANCH;
 
        /* Strip off the path, only save the file */
        p = f->file + strlen(f->file);
@@ -70,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        entry->line = f->line;
        entry->correct = val == expect;
 
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
@@ -88,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
 
 int enable_branch_tracing(struct trace_array *tr)
 {
-       int ret = 0;
-
        mutex_lock(&branch_tracing_mutex);
        branch_tracer = tr;
        /*
@@ -100,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
        branch_tracing_enabled++;
        mutex_unlock(&branch_tracing_mutex);
 
-       return ret;
+       return 0;
 }
 
 void disable_branch_tracing(void)
@@ -128,11 +129,6 @@ static void stop_branch_trace(struct trace_array *tr)
 
 static int branch_trace_init(struct trace_array *tr)
 {
-       int cpu;
-
-       for_each_online_cpu(cpu)
-               tracing_reset(tr, cpu);
-
        start_branch_trace(tr);
        return 0;
 }
@@ -142,22 +138,53 @@ static void branch_trace_reset(struct trace_array *tr)
        stop_branch_trace(tr);
 }
 
-struct tracer branch_trace __read_mostly =
+static enum print_line_t trace_branch_print(struct trace_iterator *iter,
+                                           int flags)
+{
+       struct trace_branch *field;
+
+       trace_assign_type(field, iter->ent);
+
+       if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
+                            field->correct ? "  ok  " : " MISS ",
+                            field->func,
+                            field->file,
+                            field->line))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+
+static struct trace_event trace_branch_event = {
+       .type           = TRACE_BRANCH,
+       .trace          = trace_branch_print,
+};
+
+static struct tracer branch_trace __read_mostly =
 {
        .name           = "branch",
        .init           = branch_trace_init,
        .reset          = branch_trace_reset,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest       = trace_selftest_startup_branch,
-#endif
+#endif /* CONFIG_FTRACE_SELFTEST */
 };
 
-__init static int init_branch_trace(void)
+__init static int init_branch_tracer(void)
 {
+       int ret;
+
+       ret = register_ftrace_event(&trace_branch_event);
+       if (!ret) {
+               printk(KERN_WARNING "Warning: could not register "
+                                   "branch events\n");
+               return 1;
+       }
        return register_tracer(&branch_trace);
 }
+device_initcall(init_branch_tracer);
 
-device_initcall(init_branch_trace);
 #else
 static inline
 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
@@ -183,66 +210,39 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
 }
 EXPORT_SYMBOL(ftrace_likely_update);
 
-struct ftrace_pointer {
-       void            *start;
-       void            *stop;
-       int             hit;
-};
+extern unsigned long __start_annotated_branch_profile[];
+extern unsigned long __stop_annotated_branch_profile[];
 
-static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+static int annotated_branch_stat_headers(struct seq_file *m)
 {
-       const struct ftrace_pointer *f = m->private;
-       struct ftrace_branch_data *p = v;
-
-       (*pos)++;
-
-       if (v == (void *)1)
-               return f->start;
-
-       ++p;
-
-       if ((void *)p >= (void *)f->stop)
-               return NULL;
-
-       return p;
+       seq_printf(m, " correct incorrect  %% ");
+       seq_printf(m, "       Function                "
+                             "  File              Line\n"
+                             " ------- ---------  - "
+                             "       --------                "
+                             "  ----              ----\n");
+       return 0;
 }
 
-static void *t_start(struct seq_file *m, loff_t *pos)
+static inline long get_incorrect_percent(struct ftrace_branch_data *p)
 {
-       void *t = (void *)1;
-       loff_t l = 0;
-
-       for (; t && l < *pos; t = t_next(m, t, &l))
-               ;
+       long percent;
 
-       return t;
-}
+       if (p->correct) {
+               percent = p->incorrect * 100;
+               percent /= p->correct + p->incorrect;
+       } else
+               percent = p->incorrect ? 100 : -1;
 
-static void t_stop(struct seq_file *m, void *p)
-{
+       return percent;
 }
 
-static int t_show(struct seq_file *m, void *v)
+static int branch_stat_show(struct seq_file *m, void *v)
 {
-       const struct ftrace_pointer *fp = m->private;
        struct ftrace_branch_data *p = v;
        const char *f;
        long percent;
 
-       if (v == (void *)1) {
-               if (fp->hit)
-                       seq_printf(m, "   miss      hit    %% ");
-               else
-                       seq_printf(m, " correct incorrect  %% ");
-               seq_printf(m, "       Function                "
-                             "  File              Line\n"
-                             " ------- ---------  - "
-                             "       --------                "
-                             "  ----              ----\n");
-               return 0;
-       }
-
        /* Only print the file, not the path */
        f = p->file + strlen(p->file);
        while (f >= p->file && *f != '/')
@@ -252,11 +252,7 @@ static int t_show(struct seq_file *m, void *v)
        /*
         * The miss is overlayed on correct, and hit on incorrect.
         */
-       if (p->correct) {
-               percent = p->incorrect * 100;
-               percent /= p->correct + p->incorrect;
-       } else
-               percent = p->incorrect ? 100 : -1;
+       percent = get_incorrect_percent(p);
 
        seq_printf(m, "%8lu %8lu ",  p->correct, p->incorrect);
        if (percent < 0)
@@ -267,76 +263,118 @@ static int t_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static struct seq_operations tracing_likely_seq_ops = {
-       .start          = t_start,
-       .next           = t_next,
-       .stop           = t_stop,
-       .show           = t_show,
+static void *annotated_branch_stat_start(void)
+{
+       return __start_annotated_branch_profile;
+}
+
+static void *
+annotated_branch_stat_next(void *v, int idx)
+{
+       struct ftrace_branch_data *p = v;
+
+       ++p;
+
+       if ((void *)p >= (void *)__stop_annotated_branch_profile)
+               return NULL;
+
+       return p;
+}
+
+static int annotated_branch_stat_cmp(void *p1, void *p2)
+{
+       struct ftrace_branch_data *a = p1;
+       struct ftrace_branch_data *b = p2;
+
+       long percent_a, percent_b;
+
+       percent_a = get_incorrect_percent(a);
+       percent_b = get_incorrect_percent(b);
+
+       if (percent_a < percent_b)
+               return -1;
+       if (percent_a > percent_b)
+               return 1;
+       else
+               return 0;
+}
+
+static struct tracer_stat annotated_branch_stats = {
+       .name = "branch_annotated",
+       .stat_start = annotated_branch_stat_start,
+       .stat_next = annotated_branch_stat_next,
+       .stat_cmp = annotated_branch_stat_cmp,
+       .stat_headers = annotated_branch_stat_headers,
+       .stat_show = branch_stat_show
 };
 
-static int tracing_branch_open(struct inode *inode, struct file *file)
+__init static int init_annotated_branch_stats(void)
 {
        int ret;
 
-       ret = seq_open(file, &tracing_likely_seq_ops);
+       ret = register_stat_tracer(&annotated_branch_stats);
        if (!ret) {
-               struct seq_file *m = file->private_data;
-               m->private = (void *)inode->i_private;
+               printk(KERN_WARNING "Warning: could not register "
+                                   "annotated branches stats\n");
+               return 1;
        }
-
-       return ret;
+       return 0;
 }
-
-static const struct file_operations tracing_branch_fops = {
-       .open           = tracing_branch_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-};
+fs_initcall(init_annotated_branch_stats);
 
 #ifdef CONFIG_PROFILE_ALL_BRANCHES
+
 extern unsigned long __start_branch_profile[];
 extern unsigned long __stop_branch_profile[];
 
-static const struct ftrace_pointer ftrace_branch_pos = {
-       .start                  = __start_branch_profile,
-       .stop                   = __stop_branch_profile,
-       .hit                    = 1,
-};
+static int all_branch_stat_headers(struct seq_file *m)
+{
+       seq_printf(m, "   miss      hit    %% ");
+       seq_printf(m, "       Function                "
+                             "  File              Line\n"
+                             " ------- ---------  - "
+                             "       --------                "
+                             "  ----              ----\n");
+       return 0;
+}
 
-#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+static void *all_branch_stat_start(void)
+{
+       return __start_branch_profile;
+}
 
-extern unsigned long __start_annotated_branch_profile[];
-extern unsigned long __stop_annotated_branch_profile[];
+static void *
+all_branch_stat_next(void *v, int idx)
+{
+       struct ftrace_branch_data *p = v;
 
-static const struct ftrace_pointer ftrace_annotated_branch_pos = {
-       .start                  = __start_annotated_branch_profile,
-       .stop                   = __stop_annotated_branch_profile,
-};
+       ++p;
 
-static __init int ftrace_branch_init(void)
-{
-       struct dentry *d_tracer;
-       struct dentry *entry;
+       if ((void *)p >= (void *)__stop_branch_profile)
+               return NULL;
 
-       d_tracer = tracing_init_dentry();
+       return p;
+}
 
-       entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
-                                   (void *)&ftrace_annotated_branch_pos,
-                                   &tracing_branch_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'profile_annotatet_branch' entry\n");
+static struct tracer_stat all_branch_stats = {
+       .name = "branch_all",
+       .stat_start = all_branch_stat_start,
+       .stat_next = all_branch_stat_next,
+       .stat_headers = all_branch_stat_headers,
+       .stat_show = branch_stat_show
+};
 
-#ifdef CONFIG_PROFILE_ALL_BRANCHES
-       entry = debugfs_create_file("profile_branch", 0444, d_tracer,
-                                   (void *)&ftrace_branch_pos,
-                                   &tracing_branch_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs"
-                          " 'profile_branch' entry\n");
-#endif
+__init static int all_annotated_branch_stats(void)
+{
+       int ret;
 
+       ret = register_stat_tracer(&all_branch_stats);
+       if (!ret) {
+               printk(KERN_WARNING "Warning: could not register "
+                                   "all branches stats\n");
+               return 1;
+       }
        return 0;
 }
-
-device_initcall(ftrace_branch_init);
+fs_initcall(all_annotated_branch_stats);
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
new file mode 100644 (file)
index 0000000..2d4953f
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * tracing clocks
+ *
+ *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Implements 3 trace clock variants, with differing scalability/precision
+ * tradeoffs:
+ *
+ *  -   local: CPU-local trace clock
+ *  -  medium: scalable global clock with some jitter
+ *  -  global: globally monotonic, serialized clock
+ *
+ * Tracer plugins will chose a default from these clocks.
+ */
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/ktime.h>
+
+/*
+ * trace_clock_local(): the simplest and least coherent tracing clock.
+ *
+ * Useful for tracing that does not cross to other CPUs nor
+ * does it go through idle events.
+ */
+u64 notrace trace_clock_local(void)
+{
+       /*
+        * sched_clock() is an architecture implemented, fast, scalable,
+        * lockless clock. It is not guaranteed to be coherent across
+        * CPUs, nor across CPU idle events.
+        */
+       return sched_clock();
+}
+
+/*
+ * trace_clock(): 'inbetween' trace clock. Not completely serialized,
+ * but not completely incorrect when crossing CPUs either.
+ *
+ * This is based on cpu_clock(), which will allow at most ~1 jiffy of
+ * jitter between CPUs. So it's a pretty scalable clock, but there
+ * can be offsets in the trace data.
+ */
+u64 notrace trace_clock(void)
+{
+       return cpu_clock(raw_smp_processor_id());
+}
+
+
+/*
+ * trace_clock_global(): special globally coherent trace clock
+ *
+ * It has higher overhead than the other trace clocks but is still
+ * an order of magnitude faster than GTOD derived hardware clocks.
+ *
+ * Used by plugins that need globally coherent timestamps.
+ */
+
+static u64 prev_trace_clock_time;
+
+static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
+       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+u64 notrace trace_clock_global(void)
+{
+       unsigned long flags;
+       int this_cpu;
+       u64 now;
+
+       raw_local_irq_save(flags);
+
+       this_cpu = raw_smp_processor_id();
+       now = cpu_clock(this_cpu);
+       /*
+        * If in an NMI context then dont risk lockups and return the
+        * cpu_clock() time:
+        */
+       if (unlikely(in_nmi()))
+               goto out;
+
+       __raw_spin_lock(&trace_clock_lock);
+
+       /*
+        * TODO: if this happens often then maybe we should reset
+        * my_scd->clock to prev_trace_clock_time+1, to make sure
+        * we start ticking with the local clock from now on?
+        */
+       if ((s64)(now - prev_trace_clock_time) < 0)
+               now = prev_trace_clock_time + 1;
+
+       prev_trace_clock_time = now;
+
+       __raw_spin_unlock(&trace_clock_lock);
+
+ out:
+       raw_local_irq_restore(flags);
+
+       return now;
+}
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
new file mode 100644 (file)
index 0000000..5cca4c9
--- /dev/null
@@ -0,0 +1,166 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM   ftrace
+
+/*
+ * We cheat and use the proto type field as the ID
+ * and args as the entry type (minus 'struct')
+ */
+TRACE_EVENT_FORMAT(function, TRACE_FN, ftrace_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, ip, ip)
+               TRACE_FIELD(unsigned long, parent_ip, parent_ip)
+       ),
+       TP_RAW_FMT(" %lx <-- %lx")
+);
+
+TRACE_EVENT_FORMAT(funcgraph_entry, TRACE_GRAPH_ENT,
+                  ftrace_graph_ent_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, graph_ent.func, func)
+               TRACE_FIELD(int, graph_ent.depth, depth)
+       ),
+       TP_RAW_FMT("--> %lx (%d)")
+);
+
+TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
+                  ftrace_graph_ret_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, ret.func, func)
+               TRACE_FIELD(int, ret.depth, depth)
+       ),
+       TP_RAW_FMT("<-- %lx (%d)")
+);
+
+TRACE_EVENT_FORMAT(wakeup, TRACE_WAKE, ctx_switch_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned int, prev_pid, prev_pid)
+               TRACE_FIELD(unsigned char, prev_prio, prev_prio)
+               TRACE_FIELD(unsigned char, prev_state, prev_state)
+               TRACE_FIELD(unsigned int, next_pid, next_pid)
+               TRACE_FIELD(unsigned char, next_prio, next_prio)
+               TRACE_FIELD(unsigned char, next_state, next_state)
+               TRACE_FIELD(unsigned int, next_cpu, next_cpu)
+       ),
+       TP_RAW_FMT("%u:%u:%u  ==+ %u:%u:%u [%03u]")
+);
+
+TRACE_EVENT_FORMAT(context_switch, TRACE_CTX, ctx_switch_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned int, prev_pid, prev_pid)
+               TRACE_FIELD(unsigned char, prev_prio, prev_prio)
+               TRACE_FIELD(unsigned char, prev_state, prev_state)
+               TRACE_FIELD(unsigned int, next_pid, next_pid)
+               TRACE_FIELD(unsigned char, next_prio, next_prio)
+               TRACE_FIELD(unsigned char, next_state, next_state)
+               TRACE_FIELD(unsigned int, next_cpu, next_cpu)
+       ),
+       TP_RAW_FMT("%u:%u:%u  ==+ %u:%u:%u [%03u]")
+);
+
+TRACE_EVENT_FORMAT(special, TRACE_SPECIAL, special_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, arg1, arg1)
+               TRACE_FIELD(unsigned long, arg2, arg2)
+               TRACE_FIELD(unsigned long, arg3, arg3)
+       ),
+       TP_RAW_FMT("(%08lx) (%08lx) (%08lx)")
+);
+
+/*
+ * Stack-trace entry:
+ */
+
+/* #define FTRACE_STACK_ENTRIES   8 */
+
+TRACE_EVENT_FORMAT(kernel_stack, TRACE_STACK, stack_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, caller[0], stack0)
+               TRACE_FIELD(unsigned long, caller[1], stack1)
+               TRACE_FIELD(unsigned long, caller[2], stack2)
+               TRACE_FIELD(unsigned long, caller[3], stack3)
+               TRACE_FIELD(unsigned long, caller[4], stack4)
+               TRACE_FIELD(unsigned long, caller[5], stack5)
+               TRACE_FIELD(unsigned long, caller[6], stack6)
+               TRACE_FIELD(unsigned long, caller[7], stack7)
+       ),
+       TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
+                "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
+);
+
+TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, caller[0], stack0)
+               TRACE_FIELD(unsigned long, caller[1], stack1)
+               TRACE_FIELD(unsigned long, caller[2], stack2)
+               TRACE_FIELD(unsigned long, caller[3], stack3)
+               TRACE_FIELD(unsigned long, caller[4], stack4)
+               TRACE_FIELD(unsigned long, caller[5], stack5)
+               TRACE_FIELD(unsigned long, caller[6], stack6)
+               TRACE_FIELD(unsigned long, caller[7], stack7)
+       ),
+       TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
+                "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
+);
+
+TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned long, ip, ip)
+               TRACE_FIELD(unsigned int, depth, depth)
+               TRACE_FIELD(char *, fmt, fmt)
+               TRACE_FIELD_ZERO_CHAR(buf)
+       ),
+       TP_RAW_FMT("%08lx (%d) fmt:%p %s")
+);
+
+TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(unsigned int, line, line)
+               TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, func)
+               TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, file)
+               TRACE_FIELD(char, correct, correct)
+       ),
+       TP_RAW_FMT("%u:%s:%s (%u)")
+);
+
+TRACE_EVENT_FORMAT(hw_branch, TRACE_HW_BRANCHES, hw_branch_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(u64, from, from)
+               TRACE_FIELD(u64, to, to)
+       ),
+       TP_RAW_FMT("from: %llx to: %llx")
+);
+
+TRACE_EVENT_FORMAT(power, TRACE_POWER, trace_power, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(ktime_t, state_data.stamp, stamp)
+               TRACE_FIELD(ktime_t, state_data.end, end)
+               TRACE_FIELD(int, state_data.type, type)
+               TRACE_FIELD(int, state_data.state, state)
+       ),
+       TP_RAW_FMT("%llx->%llx type:%u state:%u")
+);
+
+TRACE_EVENT_FORMAT(kmem_alloc, TRACE_KMEM_ALLOC, kmemtrace_alloc_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
+               TRACE_FIELD(unsigned long, call_site, call_site)
+               TRACE_FIELD(const void *, ptr, ptr)
+               TRACE_FIELD(size_t, bytes_req, bytes_req)
+               TRACE_FIELD(size_t, bytes_alloc, bytes_alloc)
+               TRACE_FIELD(gfp_t, gfp_flags, gfp_flags)
+               TRACE_FIELD(int, node, node)
+       ),
+       TP_RAW_FMT("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu"
+                " flags:%x node:%d")
+);
+
+TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore,
+       TRACE_STRUCT(
+               TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
+               TRACE_FIELD(unsigned long, call_site, call_site)
+               TRACE_FIELD(const void *, ptr, ptr)
+       ),
+       TP_RAW_FMT("type:%u call_site:%lx ptr:%p")
+);
+
+#undef TRACE_SYSTEM
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
new file mode 100644 (file)
index 0000000..1880a64
--- /dev/null
@@ -0,0 +1,581 @@
+/*
+ * event tracer
+ *
+ * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ *  - Added format output of fields of the trace point.
+ *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include "trace_output.h"
+
+#define TRACE_SYSTEM "TRACE_SYSTEM"
+
+static DEFINE_MUTEX(event_mutex);
+
+#define events_for_each(event)                                         \
+       for (event = __start_ftrace_events;                             \
+            (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+            event++)
+
+void event_trace_printk(unsigned long ip, const char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       tracing_record_cmdline(current);
+       trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
+       va_end(ap);
+}
+
+static void ftrace_clear_events(void)
+{
+       struct ftrace_event_call *call = (void *)__start_ftrace_events;
+
+
+       while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
+
+               if (call->enabled) {
+                       call->enabled = 0;
+                       call->unregfunc();
+               }
+               call++;
+       }
+}
+
+static void ftrace_event_enable_disable(struct ftrace_event_call *call,
+                                       int enable)
+{
+
+       switch (enable) {
+       case 0:
+               if (call->enabled) {
+                       call->enabled = 0;
+                       call->unregfunc();
+               }
+               break;
+       case 1:
+               if (!call->enabled) {
+                       call->enabled = 1;
+                       call->regfunc();
+               }
+               break;
+       }
+}
+
+static int ftrace_set_clr_event(char *buf, int set)
+{
+       struct ftrace_event_call *call = __start_ftrace_events;
+       char *event = NULL, *sub = NULL, *match;
+       int ret = -EINVAL;
+
+       /*
+        * The buf format can be <subsystem>:<event-name>
+        *  *:<event-name> means any event by that name.
+        *  :<event-name> is the same.
+        *
+        *  <subsystem>:* means all events in that subsystem
+        *  <subsystem>: means the same.
+        *
+        *  <name> (no ':') means all events in a subsystem with
+        *  the name <name> or any event that matches <name>
+        */
+
+       match = strsep(&buf, ":");
+       if (buf) {
+               sub = match;
+               event = buf;
+               match = NULL;
+
+               if (!strlen(sub) || strcmp(sub, "*") == 0)
+                       sub = NULL;
+               if (!strlen(event) || strcmp(event, "*") == 0)
+                       event = NULL;
+       }
+
+       mutex_lock(&event_mutex);
+       events_for_each(call) {
+
+               if (!call->name)
+                       continue;
+
+               if (match &&
+                   strcmp(match, call->name) != 0 &&
+                   strcmp(match, call->system) != 0)
+                       continue;
+
+               if (sub && strcmp(sub, call->system) != 0)
+                       continue;
+
+               if (event && strcmp(event, call->name) != 0)
+                       continue;
+
+               ftrace_event_enable_disable(call, set);
+
+               ret = 0;
+       }
+       mutex_unlock(&event_mutex);
+
+       return ret;
+}
+
+/* 128 should be much more than enough */
+#define EVENT_BUF_SIZE         127
+
+static ssize_t
+ftrace_event_write(struct file *file, const char __user *ubuf,
+                  size_t cnt, loff_t *ppos)
+{
+       size_t read = 0;
+       int i, set = 1;
+       ssize_t ret;
+       char *buf;
+       char ch;
+
+       if (!cnt || cnt < 0)
+               return 0;
+
+       ret = get_user(ch, ubuf++);
+       if (ret)
+               return ret;
+       read++;
+       cnt--;
+
+       /* skip white space */
+       while (cnt && isspace(ch)) {
+               ret = get_user(ch, ubuf++);
+               if (ret)
+                       return ret;
+               read++;
+               cnt--;
+       }
+
+       /* Only white space found? */
+       if (isspace(ch)) {
+               file->f_pos += read;
+               ret = read;
+               return ret;
+       }
+
+       buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       if (cnt > EVENT_BUF_SIZE)
+               cnt = EVENT_BUF_SIZE;
+
+       i = 0;
+       while (cnt && !isspace(ch)) {
+               if (!i && ch == '!')
+                       set = 0;
+               else
+                       buf[i++] = ch;
+
+               ret = get_user(ch, ubuf++);
+               if (ret)
+                       goto out_free;
+               read++;
+               cnt--;
+       }
+       buf[i] = 0;
+
+       file->f_pos += read;
+
+       ret = ftrace_set_clr_event(buf, set);
+       if (ret)
+               goto out_free;
+
+       ret = read;
+
+ out_free:
+       kfree(buf);
+
+       return ret;
+}
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_event_call *call = m->private;
+       struct ftrace_event_call *next = call;
+
+       (*pos)++;
+
+       if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
+               return NULL;
+
+       m->private = ++next;
+
+       return call;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+       return t_next(m, NULL, pos);
+}
+
+static void *
+s_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_event_call *call = m->private;
+       struct ftrace_event_call *next;
+
+       (*pos)++;
+
+ retry:
+       if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
+               return NULL;
+
+       if (!call->enabled) {
+               call++;
+               goto retry;
+       }
+
+       next = call;
+       m->private = ++next;
+
+       return call;
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+       return s_next(m, NULL, pos);
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+       struct ftrace_event_call *call = v;
+
+       if (strcmp(call->system, TRACE_SYSTEM) != 0)
+               seq_printf(m, "%s:", call->system);
+       seq_printf(m, "%s\n", call->name);
+
+       return 0;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static int
+ftrace_event_seq_open(struct inode *inode, struct file *file)
+{
+       int ret;
+       const struct seq_operations *seq_ops;
+
+       if ((file->f_mode & FMODE_WRITE) &&
+           !(file->f_flags & O_APPEND))
+               ftrace_clear_events();
+
+       seq_ops = inode->i_private;
+       ret = seq_open(file, seq_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+
+               m->private = __start_ftrace_events;
+       }
+       return ret;
+}
+
+static ssize_t
+event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+                 loff_t *ppos)
+{
+       struct ftrace_event_call *call = filp->private_data;
+       char *buf;
+
+       if (call->enabled)
+               buf = "1\n";
+       else
+               buf = "0\n";
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                  loff_t *ppos)
+{
+       struct ftrace_event_call *call = filp->private_data;
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       switch (val) {
+       case 0:
+       case 1:
+               mutex_lock(&event_mutex);
+               ftrace_event_enable_disable(call, val);
+               mutex_unlock(&event_mutex);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       *ppos += cnt;
+
+       return cnt;
+}
+
+#undef FIELD
+#define FIELD(type, name)                                              \
+       #type, #name, (unsigned int)offsetof(typeof(field), name),      \
+               (unsigned int)sizeof(field.name)
+
+static int trace_write_header(struct trace_seq *s)
+{
+       struct trace_entry field;
+
+       /* struct trace_entry */
+       return trace_seq_printf(s,
+                               "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
+                               "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
+                               "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
+                               "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
+                               "\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
+                               "\n",
+                               FIELD(unsigned char, type),
+                               FIELD(unsigned char, flags),
+                               FIELD(unsigned char, preempt_count),
+                               FIELD(int, pid),
+                               FIELD(int, tgid));
+}
+
+static ssize_t
+event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
+                 loff_t *ppos)
+{
+       struct ftrace_event_call *call = filp->private_data;
+       struct trace_seq *s;
+       char *buf;
+       int r;
+
+       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       trace_seq_init(s);
+
+       if (*ppos)
+               return 0;
+
+       /* If any of the first writes fail, so will the show_format. */
+
+       trace_seq_printf(s, "name: %s\n", call->name);
+       trace_seq_printf(s, "ID: %d\n", call->id);
+       trace_seq_printf(s, "format:\n");
+       trace_write_header(s);
+
+       r = call->show_format(s);
+       if (!r) {
+               /*
+                * ug!  The format output is bigger than a PAGE!!
+                */
+               buf = "FORMAT TOO BIG\n";
+               r = simple_read_from_buffer(ubuf, cnt, ppos,
+                                             buf, strlen(buf));
+               goto out;
+       }
+
+       r = simple_read_from_buffer(ubuf, cnt, ppos,
+                                   s->buffer, s->len);
+ out:
+       kfree(s);
+       return r;
+}
+
+static const struct seq_operations show_event_seq_ops = {
+       .start = t_start,
+       .next = t_next,
+       .show = t_show,
+       .stop = t_stop,
+};
+
+static const struct seq_operations show_set_event_seq_ops = {
+       .start = s_start,
+       .next = s_next,
+       .show = t_show,
+       .stop = t_stop,
+};
+
+static const struct file_operations ftrace_set_event_fops = {
+       .open = ftrace_event_seq_open,
+       .read = seq_read,
+       .write = ftrace_event_write,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static const struct file_operations ftrace_enable_fops = {
+       .open = tracing_open_generic,
+       .read = event_enable_read,
+       .write = event_enable_write,
+};
+
+static const struct file_operations ftrace_event_format_fops = {
+       .open = tracing_open_generic,
+       .read = event_format_read,
+};
+
+static struct dentry *event_trace_events_dir(void)
+{
+       static struct dentry *d_tracer;
+       static struct dentry *d_events;
+
+       if (d_events)
+               return d_events;
+
+       d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return NULL;
+
+       d_events = debugfs_create_dir("events", d_tracer);
+       if (!d_events)
+               pr_warning("Could not create debugfs "
+                          "'events' directory\n");
+
+       return d_events;
+}
+
+struct event_subsystem {
+       struct list_head        list;
+       const char              *name;
+       struct dentry           *entry;
+};
+
+static LIST_HEAD(event_subsystems);
+
+static struct dentry *
+event_subsystem_dir(const char *name, struct dentry *d_events)
+{
+       struct event_subsystem *system;
+
+       /* First see if we did not already create this dir */
+       list_for_each_entry(system, &event_subsystems, list) {
+               if (strcmp(system->name, name) == 0)
+                       return system->entry;
+       }
+
+       /* need to create new entry */
+       system = kmalloc(sizeof(*system), GFP_KERNEL);
+       if (!system) {
+               pr_warning("No memory to create event subsystem %s\n",
+                          name);
+               return d_events;
+       }
+
+       system->entry = debugfs_create_dir(name, d_events);
+       if (!system->entry) {
+               pr_warning("Could not create event subsystem %s\n",
+                          name);
+               kfree(system);
+               return d_events;
+       }
+
+       system->name = name;
+       list_add(&system->list, &event_subsystems);
+
+       return system->entry;
+}
+
+static int
+event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
+{
+       struct dentry *entry;
+       int ret;
+
+       /*
+        * If the trace point header did not define TRACE_SYSTEM
+        * then the system would be called "TRACE_SYSTEM".
+        */
+       if (strcmp(call->system, "TRACE_SYSTEM") != 0)
+               d_events = event_subsystem_dir(call->system, d_events);
+
+       if (call->raw_init) {
+               ret = call->raw_init();
+               if (ret < 0) {
+                       pr_warning("Could not initialize trace point"
+                                  " events/%s\n", call->name);
+                       return ret;
+               }
+       }
+
+       call->dir = debugfs_create_dir(call->name, d_events);
+       if (!call->dir) {
+               pr_warning("Could not create debugfs "
+                          "'%s' directory\n", call->name);
+               return -1;
+       }
+
+       if (call->regfunc) {
+               entry = debugfs_create_file("enable", 0644, call->dir, call,
+                                           &ftrace_enable_fops);
+               if (!entry)
+                       pr_warning("Could not create debugfs "
+                                  "'%s/enable' entry\n", call->name);
+       }
+
+       /* A trace may not want to export its format */
+       if (!call->show_format)
+               return 0;
+
+       entry = debugfs_create_file("format", 0444, call->dir, call,
+                                   &ftrace_event_format_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'%s/format' entry\n", call->name);
+
+       return 0;
+}
+
+static __init int event_trace_init(void)
+{
+       struct ftrace_event_call *call = __start_ftrace_events;
+       struct dentry *d_tracer;
+       struct dentry *entry;
+       struct dentry *d_events;
+
+       d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return 0;
+
+       entry = debugfs_create_file("set_event", 0644, d_tracer,
+                                   (void *)&show_set_event_seq_ops,
+                                   &ftrace_set_event_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'set_event' entry\n");
+
+       d_events = event_trace_events_dir();
+       if (!d_events)
+               return 0;
+
+       events_for_each(call) {
+               /* The linker may leave blanks */
+               if (!call->name)
+                       continue;
+               event_create_dir(call, d_events);
+       }
+
+       return 0;
+}
+fs_initcall(event_trace_init);
diff --git a/kernel/trace/trace_events_stage_1.h b/kernel/trace/trace_events_stage_1.h
new file mode 100644 (file)
index 0000000..15e9bf9
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Stage 1 of the trace events.
+ *
+ * Override the macros in <trace/trace_event_types.h> to include the following:
+ *
+ * struct ftrace_raw_<call> {
+ *     struct trace_entry              ent;
+ *     <type>                          <item>;
+ *     [...]
+ * };
+ *
+ * The <type> <item> is created by the TRACE_FIELD(type, item, assign)
+ * macro. We simply do "type item;", and that will create the fields
+ * in the structure.
+ */
+
+#undef TRACE_FORMAT
+#define TRACE_FORMAT(call, proto, args, fmt)
+
+#undef __array
+#define __array(type, item, len)       type    item[len];
+
+#undef __field
+#define __field(type, item)            type    item;
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, print, assign) \
+       struct ftrace_raw_##name {                              \
+               struct trace_entry      ent;                    \
+               tstruct                                         \
+       };                                                      \
+       static struct ftrace_event_call event_##name
+
+#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
new file mode 100644 (file)
index 0000000..d91bf4c
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Stage 2 of the trace events.
+ *
+ * Override the macros in <trace/trace_event_types.h> to include the following:
+ *
+ * enum print_line_t
+ * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * {
+ *     struct trace_seq *s = &iter->seq;
+ *     struct ftrace_raw_<call> *field; <-- defined in stage 1
+ *     struct trace_entry *entry;
+ *     int ret;
+ *
+ *     entry = iter->ent;
+ *
+ *     if (entry->type != event_<call>.id) {
+ *             WARN_ON_ONCE(1);
+ *             return TRACE_TYPE_UNHANDLED;
+ *     }
+ *
+ *     field = (typeof(field))entry;
+ *
+ *     ret = trace_seq_printf(s, <TP_RAW_FMT> "%s", <ARGS> "\n");
+ *     if (!ret)
+ *             return TRACE_TYPE_PARTIAL_LINE;
+ *
+ *     return TRACE_TYPE_HANDLED;
+ * }
+ *
+ * This is the method used to print the raw event to the trace
+ * output format. Note, this is not needed if the data is read
+ * in binary.
+ */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, print, assign)         \
+enum print_line_t                                                      \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags)       \
+{                                                                      \
+       struct trace_seq *s = &iter->seq;                               \
+       struct ftrace_raw_##call *field;                                \
+       struct trace_entry *entry;                                      \
+       int ret;                                                        \
+                                                                       \
+       entry = iter->ent;                                              \
+                                                                       \
+       if (entry->type != event_##call.id) {                           \
+               WARN_ON_ONCE(1);                                        \
+               return TRACE_TYPE_UNHANDLED;                            \
+       }                                                               \
+                                                                       \
+       field = (typeof(field))entry;                                   \
+                                                                       \
+       ret = trace_seq_printf(s, print);                               \
+       if (!ret)                                                       \
+               return TRACE_TYPE_PARTIAL_LINE;                         \
+                                                                       \
+       return TRACE_TYPE_HANDLED;                                      \
+}
+       
+#include <trace/trace_event_types.h>
+
+/*
+ * Setup the showing format of trace point.
+ *
+ * int
+ * ftrace_format_##call(struct trace_seq *s)
+ * {
+ *     struct ftrace_raw_##call field;
+ *     int ret;
+ *
+ *     ret = trace_seq_printf(s, #type " " #item ";"
+ *                            " size:%d; offset:%d;\n",
+ *                            sizeof(field.type),
+ *                            offsetof(struct ftrace_raw_##call,
+ *                                     item));
+ *
+ * }
+ */
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef __field
+#define __field(type, item)                                    \
+       ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"      \
+                              "offset:%u;\tsize:%u;\n",                \
+                              (unsigned int)offsetof(typeof(field), item), \
+                              (unsigned int)sizeof(field.item));       \
+       if (!ret)                                                       \
+               return 0;
+
+#undef __array
+#define __array(type, item, len)                                               \
+       ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"    \
+                              "offset:%u;\tsize:%u;\n",                \
+                              (unsigned int)offsetof(typeof(field), item), \
+                              (unsigned int)sizeof(field.item));       \
+       if (!ret)                                                       \
+               return 0;
+
+#undef __entry
+#define __entry "REC"
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, print, func)           \
+static int                                                             \
+ftrace_format_##call(struct trace_seq *s)                              \
+{                                                                      \
+       struct ftrace_raw_##call field;                                 \
+       int ret;                                                        \
+                                                                       \
+       tstruct;                                                        \
+                                                                       \
+       trace_seq_printf(s, "\nprint fmt: " print);                     \
+                                                                       \
+       return ret;                                                     \
+}
+
+#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h
new file mode 100644 (file)
index 0000000..3ba55d4
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Stage 3 of the trace events.
+ *
+ * Override the macros in <trace/trace_event_types.h> to include the following:
+ *
+ * static void ftrace_event_<call>(proto)
+ * {
+ *     event_trace_printk(_RET_IP_, "<call>: " <fmt>);
+ * }
+ *
+ * static int ftrace_reg_event_<call>(void)
+ * {
+ *     int ret;
+ *
+ *     ret = register_trace_<call>(ftrace_event_<call>);
+ *     if (!ret)
+ *             pr_info("event trace: Could not activate trace point "
+ *                     "probe to  <call>");
+ *     return ret;
+ * }
+ *
+ * static void ftrace_unreg_event_<call>(void)
+ * {
+ *     unregister_trace_<call>(ftrace_event_<call>);
+ * }
+ *
+ * For those macros defined with TRACE_FORMAT:
+ *
+ * static struct ftrace_event_call __used
+ * __attribute__((__aligned__(4)))
+ * __attribute__((section("_ftrace_events"))) event_<call> = {
+ *     .name                   = "<call>",
+ *     .regfunc                = ftrace_reg_event_<call>,
+ *     .unregfunc              = ftrace_unreg_event_<call>,
+ * }
+ *
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct ftrace_event_call event_<call>;
+ *
+ * static void ftrace_raw_event_<call>(proto)
+ * {
+ *     struct ring_buffer_event *event;
+ *     struct ftrace_raw_<call> *entry; <-- defined in stage 1
+ *     unsigned long irq_flags;
+ *     int pc;
+ *
+ *     local_save_flags(irq_flags);
+ *     pc = preempt_count();
+ *
+ *     event = trace_current_buffer_lock_reserve(event_<call>.id,
+ *                               sizeof(struct ftrace_raw_<call>),
+ *                               irq_flags, pc);
+ *     if (!event)
+ *             return;
+ *     entry   = ring_buffer_event_data(event);
+ *
+ *     <tstruct>;  <-- Here we assign the entries by the TRACE_FIELD.
+ *
+ *     trace_current_buffer_unlock_commit(event, irq_flags, pc);
+ * }
+ *
+ * static int ftrace_raw_reg_event_<call>(void)
+ * {
+ *     int ret;
+ *
+ *     ret = register_trace_<call>(ftrace_raw_event_<call>);
+ *     if (!ret)
+ *             pr_info("event trace: Could not activate trace point "
+ *                     "probe to <call>");
+ *     return ret;
+ * }
+ *
+ * static void ftrace_unreg_event_<call>(void)
+ * {
+ *     unregister_trace_<call>(ftrace_raw_event_<call>);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ *     .trace                  = ftrace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static int ftrace_raw_init_event_<call>(void)
+ * {
+ *     int id;
+ *
+ *     id = register_ftrace_event(&ftrace_event_type_<call>);
+ *     if (!id)
+ *             return -ENODEV;
+ *     event_<call>.id = id;
+ *     return 0;
+ * }
+ *
+ * static struct ftrace_event_call __used
+ * __attribute__((__aligned__(4)))
+ * __attribute__((section("_ftrace_events"))) event_<call> = {
+ *     .name                   = "<call>",
+ *     .regfunc                = ftrace_reg_event_<call>,
+ *     .unregfunc              = ftrace_unreg_event_<call>,
+ *     .raw_init               = ftrace_raw_init_event_<call>,
+ *     .raw_reg                = ftrace_raw_reg_event_<call>,
+ *     .raw_unreg              = ftrace_raw_unreg_event_<call>,
+ *     .show_format            = ftrace_format_<call>,
+ * }
+ *
+ */
+
+#undef TP_FMT
+#define TP_FMT(fmt, args...)   fmt "\n", ##args
+
+#define _TRACE_FORMAT(call, proto, args, fmt)                          \
+static void ftrace_event_##call(proto)                                 \
+{                                                                      \
+       event_trace_printk(_RET_IP_, #call ": " fmt);                   \
+}                                                                      \
+                                                                       \
+static int ftrace_reg_event_##call(void)                               \
+{                                                                      \
+       int ret;                                                        \
+                                                                       \
+       ret = register_trace_##call(ftrace_event_##call);               \
+       if (ret)                                                        \
+               pr_info("event trace: Could not activate trace point "  \
+                       "probe to " #call "\n");                        \
+       return ret;                                                     \
+}                                                                      \
+                                                                       \
+static void ftrace_unreg_event_##call(void)                            \
+{                                                                      \
+       unregister_trace_##call(ftrace_event_##call);                   \
+}                                                                      \
+
+
+#undef TRACE_FORMAT
+#define TRACE_FORMAT(call, proto, args, fmt)                           \
+_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt))          \
+static struct ftrace_event_call __used                                 \
+__attribute__((__aligned__(4)))                                                \
+__attribute__((section("_ftrace_events"))) event_##call = {            \
+       .name                   = #call,                                \
+       .system                 = __stringify(TRACE_SYSTEM),            \
+       .regfunc                = ftrace_reg_event_##call,              \
+       .unregfunc              = ftrace_unreg_event_##call,            \
+}
+
+#undef __entry
+#define __entry entry
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, print, assign)         \
+                                                                       \
+static struct ftrace_event_call event_##call;                          \
+                                                                       \
+static void ftrace_raw_event_##call(proto)                             \
+{                                                                      \
+       struct ring_buffer_event *event;                                \
+       struct ftrace_raw_##call *entry;                                \
+       unsigned long irq_flags;                                        \
+       int pc;                                                         \
+                                                                       \
+       local_save_flags(irq_flags);                                    \
+       pc = preempt_count();                                           \
+                                                                       \
+       event = trace_current_buffer_lock_reserve(event_##call.id,      \
+                                 sizeof(struct ftrace_raw_##call),     \
+                                 irq_flags, pc);                       \
+       if (!event)                                                     \
+               return;                                                 \
+       entry   = ring_buffer_event_data(event);                        \
+                                                                       \
+       assign;                                                         \
+                                                                       \
+       trace_current_buffer_unlock_commit(event, irq_flags, pc);       \
+}                                                                      \
+                                                                       \
+static int ftrace_raw_reg_event_##call(void)                           \
+{                                                                      \
+       int ret;                                                        \
+                                                                       \
+       ret = register_trace_##call(ftrace_raw_event_##call);           \
+       if (ret)                                                        \
+               pr_info("event trace: Could not activate trace point "  \
+                       "probe to " #call "\n");                        \
+       return ret;                                                     \
+}                                                                      \
+                                                                       \
+static void ftrace_raw_unreg_event_##call(void)                                \
+{                                                                      \
+       unregister_trace_##call(ftrace_raw_event_##call);               \
+}                                                                      \
+                                                                       \
+static struct trace_event ftrace_event_type_##call = {                 \
+       .trace                  = ftrace_raw_output_##call,             \
+};                                                                     \
+                                                                       \
+static int ftrace_raw_init_event_##call(void)                          \
+{                                                                      \
+       int id;                                                         \
+                                                                       \
+       id = register_ftrace_event(&ftrace_event_type_##call);          \
+       if (!id)                                                        \
+               return -ENODEV;                                         \
+       event_##call.id = id;                                           \
+       return 0;                                                       \
+}                                                                      \
+                                                                       \
+static struct ftrace_event_call __used                                 \
+__attribute__((__aligned__(4)))                                                \
+__attribute__((section("_ftrace_events"))) event_##call = {            \
+       .name                   = #call,                                \
+       .system                 = __stringify(TRACE_SYSTEM),            \
+       .raw_init               = ftrace_raw_init_event_##call,         \
+       .regfunc                = ftrace_raw_reg_event_##call,          \
+       .unregfunc              = ftrace_raw_unreg_event_##call,        \
+       .show_format            = ftrace_format_##call,                 \
+}
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
new file mode 100644 (file)
index 0000000..23ae784
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * trace_export.c - export basic ftrace utilities to user space
+ *
+ * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
+ */
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include "trace_output.h"
+
+
+#undef TRACE_STRUCT
+#define TRACE_STRUCT(args...) args
+
+#undef TRACE_FIELD
+#define TRACE_FIELD(type, item, assign)                                        \
+       ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"      \
+                              "offset:%u;\tsize:%u;\n",                \
+                              (unsigned int)offsetof(typeof(field), item), \
+                              (unsigned int)sizeof(field.item));       \
+       if (!ret)                                                       \
+               return 0;
+
+
+#undef TRACE_FIELD_SPECIAL
+#define TRACE_FIELD_SPECIAL(type_item, item, cmd)                      \
+       ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t"   \
+                              "offset:%u;\tsize:%u;\n",                \
+                              (unsigned int)offsetof(typeof(field), item), \
+                              (unsigned int)sizeof(field.item));       \
+       if (!ret)                                                       \
+               return 0;
+
+#undef TRACE_FIELD_ZERO_CHAR
+#define TRACE_FIELD_ZERO_CHAR(item)                                    \
+       ret = trace_seq_printf(s, "\tfield: char " #item ";\t"          \
+                              "offset:%u;\tsize:0;\n",                 \
+                              (unsigned int)offsetof(typeof(field), item)); \
+       if (!ret)                                                       \
+               return 0;
+
+
+#undef TP_RAW_FMT
+#define TP_RAW_FMT(args...) args
+
+#undef TRACE_EVENT_FORMAT
+#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt)     \
+static int                                                             \
+ftrace_format_##call(struct trace_seq *s)                              \
+{                                                                      \
+       struct args field;                                              \
+       int ret;                                                        \
+                                                                       \
+       tstruct;                                                        \
+                                                                       \
+       trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt);            \
+                                                                       \
+       return ret;                                                     \
+}
+
+#include "trace_event_types.h"
+
+#undef TRACE_ZERO_CHAR
+#define TRACE_ZERO_CHAR(arg)
+
+#undef TRACE_FIELD
+#define TRACE_FIELD(type, item, assign)\
+       entry->item = assign;
+
+#undef TRACE_FIELD
+#define TRACE_FIELD(type, item, assign)\
+       entry->item = assign;
+
+#undef TP_CMD
+#define TP_CMD(cmd...) cmd
+
+#undef TRACE_ENTRY
+#define TRACE_ENTRY    entry
+
+#undef TRACE_FIELD_SPECIAL
+#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
+       cmd;
+
+#undef TRACE_EVENT_FORMAT
+#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt)     \
+                                                                       \
+static struct ftrace_event_call __used                                 \
+__attribute__((__aligned__(4)))                                                \
+__attribute__((section("_ftrace_events"))) event_##call = {            \
+       .name                   = #call,                                \
+       .id                     = proto,                                \
+       .system                 = __stringify(TRACE_SYSTEM),            \
+       .show_format            = ftrace_format_##call,                 \
+}
+#include "trace_event_types.h"
index 9236d7e..c9a0b7d 100644 (file)
@@ -9,6 +9,7 @@
  *  Copyright (C) 2004-2006 Ingo Molnar
  *  Copyright (C) 2004 William Lee Irwin III
  */
+#include <linux/ring_buffer.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
 
 #include "trace.h"
 
-static void start_function_trace(struct trace_array *tr)
+/* function tracing enabled */
+static int                     ftrace_function_enabled;
+
+static struct trace_array      *func_trace;
+
+static void tracing_start_function_trace(void);
+static void tracing_stop_function_trace(void);
+
+static int function_trace_init(struct trace_array *tr)
 {
+       func_trace = tr;
        tr->cpu = get_cpu();
-       tracing_reset_online_cpus(tr);
        put_cpu();
 
        tracing_start_cmdline_record();
        tracing_start_function_trace();
+       return 0;
 }
 
-static void stop_function_trace(struct trace_array *tr)
+static void function_trace_reset(struct trace_array *tr)
 {
        tracing_stop_function_trace();
        tracing_stop_cmdline_record();
 }
 
-static int function_trace_init(struct trace_array *tr)
+static void function_trace_start(struct trace_array *tr)
 {
-       start_function_trace(tr);
-       return 0;
+       tracing_reset_online_cpus(tr);
 }
 
-static void function_trace_reset(struct trace_array *tr)
+static void
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = func_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu, resched;
+       int pc;
+
+       if (unlikely(!ftrace_function_enabled))
+               return;
+
+       pc = preempt_count();
+       resched = ftrace_preempt_disable();
+       local_save_flags(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               trace_function(tr, ip, parent_ip, flags, pc);
+
+       atomic_dec(&data->disabled);
+       ftrace_preempt_enable(resched);
+}
+
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
 {
-       stop_function_trace(tr);
+       struct trace_array *tr = func_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       if (unlikely(!ftrace_function_enabled))
+               return;
+
+       /*
+        * Need to use raw, since this must be called before the
+        * recursive protection is performed.
+        */
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               trace_function(tr, ip, parent_ip, flags, pc);
+       }
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
 }
 
-static void function_trace_start(struct trace_array *tr)
+static void
+function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
 {
-       tracing_reset_online_cpus(tr);
+       struct trace_array *tr = func_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       if (unlikely(!ftrace_function_enabled))
+               return;
+
+       /*
+        * Need to use raw, since this must be called before the
+        * recursive protection is performed.
+        */
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               trace_function(tr, ip, parent_ip, flags, pc);
+               /*
+                * skip over 5 funcs:
+                *    __ftrace_trace_stack,
+                *    __trace_stack,
+                *    function_stack_trace_call
+                *    ftrace_list_func
+                *    ftrace_call
+                */
+               __trace_stack(tr, flags, 5, pc);
+       }
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+       .func = function_trace_call,
+};
+
+static struct ftrace_ops trace_stack_ops __read_mostly =
+{
+       .func = function_stack_trace_call,
+};
+
+/* Our two options */
+enum {
+       TRACE_FUNC_OPT_STACK = 0x1,
+};
+
+static struct tracer_opt func_opts[] = {
+#ifdef CONFIG_STACKTRACE
+       { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
+#endif
+       { } /* Always set a last empty entry */
+};
+
+static struct tracer_flags func_flags = {
+       .val = 0, /* By default: all flags disabled */
+       .opts = func_opts
+};
+
+static void tracing_start_function_trace(void)
+{
+       ftrace_function_enabled = 0;
+
+       if (trace_flags & TRACE_ITER_PREEMPTONLY)
+               trace_ops.func = function_trace_call_preempt_only;
+       else
+               trace_ops.func = function_trace_call;
+
+       if (func_flags.val & TRACE_FUNC_OPT_STACK)
+               register_ftrace_function(&trace_stack_ops);
+       else
+               register_ftrace_function(&trace_ops);
+
+       ftrace_function_enabled = 1;
+}
+
+static void tracing_stop_function_trace(void)
+{
+       ftrace_function_enabled = 0;
+       /* OK if they are not registered */
+       unregister_ftrace_function(&trace_stack_ops);
+       unregister_ftrace_function(&trace_ops);
+}
+
+static int func_set_flag(u32 old_flags, u32 bit, int set)
+{
+       if (bit == TRACE_FUNC_OPT_STACK) {
+               /* do nothing if already set */
+               if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
+                       return 0;
+
+               if (set) {
+                       unregister_ftrace_function(&trace_ops);
+                       register_ftrace_function(&trace_stack_ops);
+               } else {
+                       unregister_ftrace_function(&trace_stack_ops);
+                       register_ftrace_function(&trace_ops);
+               }
+
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static struct tracer function_trace __read_mostly =
 {
-       .name        = "function",
-       .init        = function_trace_init,
-       .reset       = function_trace_reset,
-       .start       = function_trace_start,
+       .name           = "function",
+       .init           = function_trace_init,
+       .reset          = function_trace_reset,
+       .start          = function_trace_start,
+       .wait_pipe      = poll_wait_pipe,
+       .flags          = &func_flags,
+       .set_flag       = func_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
-       .selftest    = trace_selftest_startup_function,
+       .selftest       = trace_selftest_startup_function,
 #endif
 };
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void
+ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       long *count = (long *)data;
+
+       if (tracing_is_on())
+               return;
+
+       if (!*count)
+               return;
+
+       if (*count != -1)
+               (*count)--;
+
+       tracing_on();
+}
+
+static void
+ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       long *count = (long *)data;
+
+       if (!tracing_is_on())
+               return;
+
+       if (!*count)
+               return;
+
+       if (*count != -1)
+               (*count)--;
+
+       tracing_off();
+}
+
+static int
+ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
+                        struct ftrace_probe_ops *ops, void *data);
+
+static struct ftrace_probe_ops traceon_probe_ops = {
+       .func                   = ftrace_traceon,
+       .print                  = ftrace_trace_onoff_print,
+};
+
+static struct ftrace_probe_ops traceoff_probe_ops = {
+       .func                   = ftrace_traceoff,
+       .print                  = ftrace_trace_onoff_print,
+};
+
+static int
+ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
+                        struct ftrace_probe_ops *ops, void *data)
+{
+       char str[KSYM_SYMBOL_LEN];
+       long count = (long)data;
+
+       kallsyms_lookup(ip, NULL, NULL, NULL, str);
+       seq_printf(m, "%s:", str);
+
+       if (ops == &traceon_probe_ops)
+               seq_printf(m, "traceon");
+       else
+               seq_printf(m, "traceoff");
+
+       if (count == -1)
+               seq_printf(m, ":unlimited\n");
+       else
+               seq_printf(m, ":count=%ld", count);
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
+static int
+ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
+{
+       struct ftrace_probe_ops *ops;
+
+       /* we register both traceon and traceoff to this callback */
+       if (strcmp(cmd, "traceon") == 0)
+               ops = &traceon_probe_ops;
+       else
+               ops = &traceoff_probe_ops;
+
+       unregister_ftrace_function_probe_func(glob, ops);
+
+       return 0;
+}
+
+static int
+ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
+{
+       struct ftrace_probe_ops *ops;
+       void *count = (void *)-1;
+       char *number;
+       int ret;
+
+       /* hash funcs only work with set_ftrace_filter */
+       if (!enable)
+               return -EINVAL;
+
+       if (glob[0] == '!')
+               return ftrace_trace_onoff_unreg(glob+1, cmd, param);
+
+       /* we register both traceon and traceoff to this callback */
+       if (strcmp(cmd, "traceon") == 0)
+               ops = &traceon_probe_ops;
+       else
+               ops = &traceoff_probe_ops;
+
+       if (!param)
+               goto out_reg;
+
+       number = strsep(&param, ":");
+
+       if (!strlen(number))
+               goto out_reg;
+
+       /*
+        * We use the callback data field (which is a pointer)
+        * as our counter.
+        */
+       ret = strict_strtoul(number, 0, (unsigned long *)&count);
+       if (ret)
+               return ret;
+
+ out_reg:
+       ret = register_ftrace_function_probe(glob, ops, count);
+
+       return ret;
+}
+
+static struct ftrace_func_command ftrace_traceon_cmd = {
+       .name                   = "traceon",
+       .func                   = ftrace_trace_onoff_callback,
+};
+
+static struct ftrace_func_command ftrace_traceoff_cmd = {
+       .name                   = "traceoff",
+       .func                   = ftrace_trace_onoff_callback,
+};
+
+static int __init init_func_cmd_traceon(void)
+{
+       int ret;
+
+       ret = register_ftrace_command(&ftrace_traceoff_cmd);
+       if (ret)
+               return ret;
+
+       ret = register_ftrace_command(&ftrace_traceon_cmd);
+       if (ret)
+               unregister_ftrace_command(&ftrace_traceoff_cmd);
+       return ret;
+}
+#else
+static inline int init_func_cmd_traceon(void)
+{
+       return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
 static __init int init_function_trace(void)
 {
+       init_func_cmd_traceon();
        return register_tracer(&function_trace);
 }
-
 device_initcall(init_function_trace);
+
index 930c08e..35257be 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *
  * Function graph tracer.
- * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  * Mostly borrowed from function tracer which
  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  *
@@ -12,6 +12,7 @@
 #include <linux/fs.h>
 
 #include "trace.h"
+#include "trace_output.h"
 
 #define TRACE_GRAPH_INDENT     2
 
 #define TRACE_GRAPH_PRINT_CPU          0x2
 #define TRACE_GRAPH_PRINT_OVERHEAD     0x4
 #define TRACE_GRAPH_PRINT_PROC         0x8
+#define TRACE_GRAPH_PRINT_DURATION     0x10
+#define TRACE_GRAPH_PRINT_ABS_TIME     0X20
 
 static struct tracer_opt trace_opts[] = {
-       /* Display overruns ? */
+       /* Display overruns? (for self-debug purpose) */
        { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
        /* Display CPU ? */
        { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
@@ -30,26 +33,101 @@ static struct tracer_opt trace_opts[] = {
        { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
        /* Display proc name/pid */
        { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
+       /* Display duration of execution */
+       { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
+       /* Display absolute time of an entry */
+       { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
        { } /* Empty entry */
 };
 
 static struct tracer_flags tracer_flags = {
        /* Don't display overruns and proc by default */
-       .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
+       .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
+              TRACE_GRAPH_PRINT_DURATION,
        .opts = trace_opts
 };
 
 /* pid on the last trace processed */
-static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
 
-static int graph_trace_init(struct trace_array *tr)
+
+/* Add a function return address to the trace stack on thread info.*/
+int
+ftrace_push_return_trace(unsigned long ret, unsigned long long time,
+                        unsigned long func, int *depth)
+{
+       int index;
+
+       if (!current->ret_stack)
+               return -EBUSY;
+
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+               atomic_inc(&current->trace_overrun);
+               return -EBUSY;
+       }
+
+       index = ++current->curr_ret_stack;
+       barrier();
+       current->ret_stack[index].ret = ret;
+       current->ret_stack[index].func = func;
+       current->ret_stack[index].calltime = time;
+       *depth = index;
+
+       return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+void
+ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+       int index;
+
+       index = current->curr_ret_stack;
+
+       if (unlikely(index < 0)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic, otherwise we have no where to go */
+               *ret = (unsigned long)panic;
+               return;
+       }
+
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(&current->trace_overrun);
+       trace->depth = index;
+       barrier();
+       current->curr_ret_stack--;
+
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
 {
-       int cpu, ret;
+       struct ftrace_graph_ret trace;
+       unsigned long ret;
+
+       ftrace_pop_return_trace(&trace, &ret);
+       trace.rettime = trace_clock_local();
+       ftrace_graph_return(&trace);
+
+       if (unlikely(!ret)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic. What else to do? */
+               ret = (unsigned long)panic;
+       }
 
-       for_each_online_cpu(cpu)
-               tracing_reset(tr, cpu);
+       return ret;
+}
 
-       ret = register_ftrace_graph(&trace_graph_return,
+static int graph_trace_init(struct trace_array *tr)
+{
+       int ret = register_ftrace_graph(&trace_graph_return,
                                        &trace_graph_entry);
        if (ret)
                return ret;
@@ -153,17 +231,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
 
 /* If the pid changed since the last trace, output this event */
 static enum print_line_t
-verif_pid(struct trace_seq *s, pid_t pid, int cpu)
+verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
 {
        pid_t prev_pid;
+       pid_t *last_pid;
        int ret;
 
-       if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
+       if (!last_pids_cpu)
+               return TRACE_TYPE_HANDLED;
+
+       last_pid = per_cpu_ptr(last_pids_cpu, cpu);
+
+       if (*last_pid == pid)
                return TRACE_TYPE_HANDLED;
 
-       prev_pid = last_pid[cpu];
-       last_pid[cpu] = pid;
+       prev_pid = *last_pid;
+       *last_pid = pid;
 
+       if (prev_pid == -1)
+               return TRACE_TYPE_HANDLED;
 /*
  * Context-switch trace line:
 
@@ -175,34 +261,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu)
        ret = trace_seq_printf(s,
                " ------------------------------------------\n");
        if (!ret)
-               TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_PARTIAL_LINE;
 
        ret = print_graph_cpu(s, cpu);
        if (ret == TRACE_TYPE_PARTIAL_LINE)
-               TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_PARTIAL_LINE;
 
        ret = print_graph_proc(s, prev_pid);
        if (ret == TRACE_TYPE_PARTIAL_LINE)
-               TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_PARTIAL_LINE;
 
        ret = trace_seq_printf(s, " => ");
        if (!ret)
-               TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_PARTIAL_LINE;
 
        ret = print_graph_proc(s, pid);
        if (ret == TRACE_TYPE_PARTIAL_LINE)
-               TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_PARTIAL_LINE;
 
        ret = trace_seq_printf(s,
                "\n ------------------------------------------\n\n");
        if (!ret)
-               TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_PARTIAL_LINE;
 
-       return ret;
+       return TRACE_TYPE_HANDLED;
 }
 
-static bool
-trace_branch_is_leaf(struct trace_iterator *iter,
+static struct ftrace_graph_ret_entry *
+get_return_for_leaf(struct trace_iterator *iter,
                struct ftrace_graph_ent_entry *curr)
 {
        struct ring_buffer_iter *ring_iter;
@@ -211,65 +297,123 @@ trace_branch_is_leaf(struct trace_iterator *iter,
 
        ring_iter = iter->buffer_iter[iter->cpu];
 
-       if (!ring_iter)
-               return false;
-
-       event = ring_buffer_iter_peek(ring_iter, NULL);
+       /* First peek to compare current entry and the next one */
+       if (ring_iter)
+               event = ring_buffer_iter_peek(ring_iter, NULL);
+       else {
+       /* We need to consume the current entry to see the next one */
+               ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
+               event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
+                                       NULL);
+       }
 
        if (!event)
-               return false;
+               return NULL;
 
        next = ring_buffer_event_data(event);
 
        if (next->ent.type != TRACE_GRAPH_RET)
-               return false;
+               return NULL;
 
        if (curr->ent.pid != next->ent.pid ||
                        curr->graph_ent.func != next->ret.func)
-               return false;
+               return NULL;
+
+       /* this is a leaf, now advance the iterator */
+       if (ring_iter)
+               ring_buffer_read(ring_iter, NULL);
 
-       return true;
+       return next;
+}
+
+/* Signal a overhead of time execution to the output */
+static int
+print_graph_overhead(unsigned long long duration, struct trace_seq *s)
+{
+       /* If duration disappear, we don't need anything */
+       if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
+               return 1;
+
+       /* Non nested entry or return */
+       if (duration == -1)
+               return trace_seq_printf(s, "  ");
+
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+               /* Duration exceeded 100 msecs */
+               if (duration > 100000ULL)
+                       return trace_seq_printf(s, "! ");
+
+               /* Duration exceeded 10 msecs */
+               if (duration > 10000ULL)
+                       return trace_seq_printf(s, "+ ");
+       }
+
+       return trace_seq_printf(s, "  ");
+}
+
+static int print_graph_abs_time(u64 t, struct trace_seq *s)
+{
+       unsigned long usecs_rem;
+
+       usecs_rem = do_div(t, NSEC_PER_SEC);
+       usecs_rem /= 1000;
+
+       return trace_seq_printf(s, "%5lu.%06lu |  ",
+                       (unsigned long)t, usecs_rem);
 }
 
 static enum print_line_t
-print_graph_irq(struct trace_seq *s, unsigned long addr,
-                               enum trace_type type, int cpu, pid_t pid)
+print_graph_irq(struct trace_iterator *iter, unsigned long addr,
+               enum trace_type type, int cpu, pid_t pid)
 {
        int ret;
+       struct trace_seq *s = &iter->seq;
 
        if (addr < (unsigned long)__irqentry_text_start ||
                addr >= (unsigned long)__irqentry_text_end)
                return TRACE_TYPE_UNHANDLED;
 
-       if (type == TRACE_GRAPH_ENT) {
-               ret = trace_seq_printf(s, "==========> |  ");
-       } else {
-               /* Cpu */
-               if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
-                       ret = print_graph_cpu(s, cpu);
-                       if (ret == TRACE_TYPE_PARTIAL_LINE)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
-               /* Proc */
-               if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
-                       ret = print_graph_proc(s, pid);
-                       if (ret == TRACE_TYPE_PARTIAL_LINE)
-                               return TRACE_TYPE_PARTIAL_LINE;
+       /* Absolute time */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+               ret = print_graph_abs_time(iter->ts, s);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
 
-                       ret = trace_seq_printf(s, " | ");
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
+       /* Cpu */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+               ret = print_graph_cpu(s, cpu);
+               if (ret == TRACE_TYPE_PARTIAL_LINE)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+       /* Proc */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+               ret = print_graph_proc(s, pid);
+               if (ret == TRACE_TYPE_PARTIAL_LINE)
+                       return TRACE_TYPE_PARTIAL_LINE;
+               ret = trace_seq_printf(s, " | ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
 
-               /* No overhead */
-               if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
-                       ret = trace_seq_printf(s, "  ");
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
+       /* No overhead */
+       ret = print_graph_overhead(-1, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (type == TRACE_GRAPH_ENT)
+               ret = trace_seq_printf(s, "==========>");
+       else
+               ret = trace_seq_printf(s, "<==========");
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Don't close the duration column if haven't one */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+               trace_seq_printf(s, " |");
+       ret = trace_seq_printf(s, "\n");
 
-               ret = trace_seq_printf(s, "<========== |\n");
-       }
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
        return TRACE_TYPE_HANDLED;
@@ -288,7 +432,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
        sprintf(msecs_str, "%lu", (unsigned long) duration);
 
        /* Print msecs */
-       ret = trace_seq_printf(s, msecs_str);
+       ret = trace_seq_printf(s, "%s", msecs_str);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
@@ -321,51 +465,33 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
 
 }
 
-/* Signal a overhead of time execution to the output */
-static int
-print_graph_overhead(unsigned long long duration, struct trace_seq *s)
-{
-       /* Duration exceeded 100 msecs */
-       if (duration > 100000ULL)
-               return trace_seq_printf(s, "! ");
-
-       /* Duration exceeded 10 msecs */
-       if (duration > 10000ULL)
-               return trace_seq_printf(s, "+ ");
-
-       return trace_seq_printf(s, "  ");
-}
-
 /* Case of a leaf function on its call entry */
 static enum print_line_t
 print_graph_entry_leaf(struct trace_iterator *iter,
-               struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
+               struct ftrace_graph_ent_entry *entry,
+               struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
 {
-       struct ftrace_graph_ret_entry *ret_entry;
        struct ftrace_graph_ret *graph_ret;
-       struct ring_buffer_event *event;
        struct ftrace_graph_ent *call;
        unsigned long long duration;
        int ret;
        int i;
 
-       event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
-       ret_entry = ring_buffer_event_data(event);
        graph_ret = &ret_entry->ret;
        call = &entry->graph_ent;
        duration = graph_ret->rettime - graph_ret->calltime;
 
        /* Overhead */
-       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
-               ret = print_graph_overhead(duration, s);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-       }
+       ret = print_graph_overhead(duration, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
 
        /* Duration */
-       ret = print_graph_duration(duration, s);
-       if (ret == TRACE_TYPE_PARTIAL_LINE)
-               return TRACE_TYPE_PARTIAL_LINE;
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+               ret = print_graph_duration(duration, s);
+               if (ret == TRACE_TYPE_PARTIAL_LINE)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
 
        /* Function */
        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
@@ -394,25 +520,17 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
        struct ftrace_graph_ent *call = &entry->graph_ent;
 
        /* No overhead */
-       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
-               ret = trace_seq_printf(s, "  ");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-       }
+       ret = print_graph_overhead(-1, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
 
-       /* Interrupt */
-       ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
-       if (ret == TRACE_TYPE_UNHANDLED) {
-               /* No time */
+       /* No time */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
                ret = trace_seq_printf(s, "            |  ");
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
-       } else {
-               if (ret == TRACE_TYPE_PARTIAL_LINE)
-                       return TRACE_TYPE_PARTIAL_LINE;
        }
 
-
        /* Function */
        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
                ret = trace_seq_printf(s, " ");
@@ -428,20 +546,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
-       return TRACE_TYPE_HANDLED;
+       /*
+        * we already consumed the current entry to check the next one
+        * and see if this is a leaf.
+        */
+       return TRACE_TYPE_NO_CONSUME;
 }
 
 static enum print_line_t
 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
-                       struct trace_iterator *iter, int cpu)
+                       struct trace_iterator *iter)
 {
        int ret;
+       int cpu = iter->cpu;
+       pid_t *last_entry = iter->private;
        struct trace_entry *ent = iter->ent;
+       struct ftrace_graph_ent *call = &field->graph_ent;
+       struct ftrace_graph_ret_entry *leaf_ret;
 
        /* Pid */
-       if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
+       if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Interrupt */
+       ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid);
+       if (ret == TRACE_TYPE_PARTIAL_LINE)
                return TRACE_TYPE_PARTIAL_LINE;
 
+       /* Absolute time */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+               ret = print_graph_abs_time(iter->ts, s);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
        /* Cpu */
        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
                ret = print_graph_cpu(s, cpu);
@@ -460,8 +598,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
                        return TRACE_TYPE_PARTIAL_LINE;
        }
 
-       if (trace_branch_is_leaf(iter, field))
-               return print_graph_entry_leaf(iter, field, s);
+       leaf_ret = get_return_for_leaf(iter, field);
+       if (leaf_ret)
+               return print_graph_entry_leaf(iter, field, leaf_ret, s);
        else
                return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
 
@@ -469,16 +608,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
 
 static enum print_line_t
 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
-                  struct trace_entry *ent, int cpu)
+                  struct trace_entry *ent, struct trace_iterator *iter)
 {
        int i;
        int ret;
+       int cpu = iter->cpu;
+       pid_t *last_pid = iter->private, pid = ent->pid;
        unsigned long long duration = trace->rettime - trace->calltime;
 
        /* Pid */
-       if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
+       if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
                return TRACE_TYPE_PARTIAL_LINE;
 
+       /* Absolute time */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+               ret = print_graph_abs_time(iter->ts, s);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
        /* Cpu */
        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
                ret = print_graph_cpu(s, cpu);
@@ -498,16 +646,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
        }
 
        /* Overhead */
-       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
-               ret = print_graph_overhead(duration, s);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-       }
+       ret = print_graph_overhead(duration, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
 
        /* Duration */
-       ret = print_graph_duration(duration, s);
-       if (ret == TRACE_TYPE_PARTIAL_LINE)
-               return TRACE_TYPE_PARTIAL_LINE;
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+               ret = print_graph_duration(duration, s);
+               if (ret == TRACE_TYPE_PARTIAL_LINE)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
 
        /* Closing brace */
        for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
@@ -528,7 +676,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
                        return TRACE_TYPE_PARTIAL_LINE;
        }
 
-       ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
+       ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
        if (ret == TRACE_TYPE_PARTIAL_LINE)
                return TRACE_TYPE_PARTIAL_LINE;
 
@@ -541,14 +689,23 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
 {
        int i;
        int ret;
+       int cpu = iter->cpu;
+       pid_t *last_pid = iter->private;
 
        /* Pid */
-       if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
+       if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
                return TRACE_TYPE_PARTIAL_LINE;
 
+       /* Absolute time */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+               ret = print_graph_abs_time(iter->ts, s);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
        /* Cpu */
        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
-               ret = print_graph_cpu(s, iter->cpu);
+               ret = print_graph_cpu(s, cpu);
                if (ret == TRACE_TYPE_PARTIAL_LINE)
                        return TRACE_TYPE_PARTIAL_LINE;
        }
@@ -565,17 +722,17 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
        }
 
        /* No overhead */
-       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
-               ret = trace_seq_printf(s, "  ");
+       ret = print_graph_overhead(-1, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* No time */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+               ret = trace_seq_printf(s, "            |  ");
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
        }
 
-       /* No time */
-       ret = trace_seq_printf(s, "            |  ");
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
        /* Indentation */
        if (trace->depth > 0)
                for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
@@ -585,12 +742,19 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
                }
 
        /* The comment */
-       ret = trace_seq_printf(s, "/* %s", trace->buf);
+       ret = trace_seq_printf(s, "/* ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       ret = trace_seq_bprintf(s, trace->fmt, trace->buf);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
-       if (ent->flags & TRACE_FLAG_CONT)
-               trace_seq_print_cont(s, iter);
+       /* Strip ending newline */
+       if (s->buffer[s->len - 1] == '\n') {
+               s->buffer[s->len - 1] = '\0';
+               s->len--;
+       }
 
        ret = trace_seq_printf(s, " */\n");
        if (!ret)
@@ -610,13 +774,12 @@ print_graph_function(struct trace_iterator *iter)
        case TRACE_GRAPH_ENT: {
                struct ftrace_graph_ent_entry *field;
                trace_assign_type(field, entry);
-               return print_graph_entry(field, s, iter,
-                                        iter->cpu);
+               return print_graph_entry(field, s, iter);
        }
        case TRACE_GRAPH_RET: {
                struct ftrace_graph_ret_entry *field;
                trace_assign_type(field, entry);
-               return print_graph_return(&field->ret, s, entry, iter->cpu);
+               return print_graph_return(&field->ret, s, entry, iter);
        }
        case TRACE_PRINT: {
                struct print_entry *field;
@@ -632,33 +795,64 @@ static void print_graph_headers(struct seq_file *s)
 {
        /* 1st line */
        seq_printf(s, "# ");
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+               seq_printf(s, "     TIME       ");
        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
-               seq_printf(s, "CPU ");
+               seq_printf(s, "CPU");
        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
-               seq_printf(s, "TASK/PID     ");
-       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
-               seq_printf(s, "OVERHEAD/");
-       seq_printf(s, "DURATION            FUNCTION CALLS\n");
+               seq_printf(s, "  TASK/PID      ");
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+               seq_printf(s, "  DURATION   ");
+       seq_printf(s, "               FUNCTION CALLS\n");
 
        /* 2nd line */
        seq_printf(s, "# ");
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+               seq_printf(s, "      |         ");
        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
-               seq_printf(s, "|   ");
+               seq_printf(s, "|  ");
        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
-               seq_printf(s, "|      |     ");
-       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
-               seq_printf(s, "|        ");
-               seq_printf(s, "|                   |   |   |   |\n");
-       } else
-               seq_printf(s, "    |               |   |   |   |\n");
+               seq_printf(s, "  |    |        ");
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+               seq_printf(s, "   |   |      ");
+       seq_printf(s, "               |   |   |   |\n");
 }
+
+static void graph_trace_open(struct trace_iterator *iter)
+{
+       /* pid on the last trace processed */
+       pid_t *last_pid = alloc_percpu(pid_t);
+       int cpu;
+
+       if (!last_pid)
+               pr_warning("function graph tracer: not enough memory\n");
+       else
+               for_each_possible_cpu(cpu) {
+                       pid_t *pid = per_cpu_ptr(last_pid, cpu);
+                       *pid = -1;
+               }
+
+       iter->private = last_pid;
+}
+
+static void graph_trace_close(struct trace_iterator *iter)
+{
+       free_percpu(iter->private);
+}
+
 static struct tracer graph_trace __read_mostly = {
        .name           = "function_graph",
+       .open           = graph_trace_open,
+       .close          = graph_trace_close,
+       .wait_pipe      = poll_wait_pipe,
        .init           = graph_trace_init,
        .reset          = graph_trace_reset,
        .print_line     = print_graph_function,
        .print_header   = print_graph_headers,
        .flags          = &tracer_flags,
+#ifdef CONFIG_FTRACE_SELFTEST
+       .selftest       = trace_selftest_startup_function_graph,
+#endif
 };
 
 static __init int init_graph_trace(void)
index 649df22..7bfdf4c 100644 (file)
@@ -1,30 +1,53 @@
 /*
  * h/w branch tracer for x86 based on bts
  *
- * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
- *
+ * Copyright (C) 2008-2009 Intel Corporation.
+ * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
  */
-
-#include <linux/module.h>
-#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
 #include <linux/debugfs.h>
 #include <linux/ftrace.h>
-#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
 
 #include <asm/ds.h>
 
 #include "trace.h"
+#include "trace_output.h"
 
 
 #define SIZEOF_BTS (1 << 13)
 
+/*
+ * The tracer lock protects the below per-cpu tracer array.
+ * It needs to be held to:
+ * - start tracing on all cpus
+ * - stop tracing on all cpus
+ * - start tracing on a single hotplug cpu
+ * - stop tracing on a single hotplug cpu
+ * - read the trace from all cpus
+ * - read the trace from a single cpu
+ */
+static DEFINE_SPINLOCK(bts_tracer_lock);
 static DEFINE_PER_CPU(struct bts_tracer *, tracer);
 static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
 
 #define this_tracer per_cpu(tracer, smp_processor_id())
 #define this_buffer per_cpu(buffer, smp_processor_id())
 
+static int __read_mostly trace_hw_branches_enabled;
+static struct trace_array *hw_branch_trace __read_mostly;
+
 
+/*
+ * Start tracing on the current cpu.
+ * The argument is ignored.
+ *
+ * pre: bts_tracer_lock must be locked.
+ */
 static void bts_trace_start_cpu(void *arg)
 {
        if (this_tracer)
@@ -42,14 +65,20 @@ static void bts_trace_start_cpu(void *arg)
 
 static void bts_trace_start(struct trace_array *tr)
 {
-       int cpu;
+       spin_lock(&bts_tracer_lock);
 
-       tracing_reset_online_cpus(tr);
+       on_each_cpu(bts_trace_start_cpu, NULL, 1);
+       trace_hw_branches_enabled = 1;
 
-       for_each_cpu(cpu, cpu_possible_mask)
-               smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+       spin_unlock(&bts_tracer_lock);
 }
 
+/*
+ * Stop tracing on the current cpu.
+ * The argument is ignored.
+ *
+ * pre: bts_tracer_lock must be locked.
+ */
 static void bts_trace_stop_cpu(void *arg)
 {
        if (this_tracer) {
@@ -60,26 +89,60 @@ static void bts_trace_stop_cpu(void *arg)
 
 static void bts_trace_stop(struct trace_array *tr)
 {
-       int cpu;
+       spin_lock(&bts_tracer_lock);
+
+       trace_hw_branches_enabled = 0;
+       on_each_cpu(bts_trace_stop_cpu, NULL, 1);
+
+       spin_unlock(&bts_tracer_lock);
+}
+
+static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
+                                    unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
 
-       for_each_cpu(cpu, cpu_possible_mask)
+       spin_lock(&bts_tracer_lock);
+
+       if (!trace_hw_branches_enabled)
+               goto out;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
+               smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+               break;
+       case CPU_DOWN_PREPARE:
                smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
+               break;
+       }
+
+ out:
+       spin_unlock(&bts_tracer_lock);
+       return NOTIFY_DONE;
 }
 
+static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
+       .notifier_call = bts_hotcpu_handler
+};
+
 static int bts_trace_init(struct trace_array *tr)
 {
-       tracing_reset_online_cpus(tr);
+       hw_branch_trace = tr;
+
        bts_trace_start(tr);
 
        return 0;
 }
 
+static void bts_trace_reset(struct trace_array *tr)
+{
+       bts_trace_stop(tr);
+}
+
 static void bts_trace_print_header(struct seq_file *m)
 {
-       seq_puts(m,
-                "# CPU#        FROM                   TO         FUNCTION\n");
-       seq_puts(m,
-                "#  |           |                     |             |\n");
+       seq_puts(m, "# CPU#        TO  <-  FROM\n");
 }
 
 static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
@@ -87,15 +150,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
        struct trace_entry *entry = iter->ent;
        struct trace_seq *seq = &iter->seq;
        struct hw_branch_entry *it;
+       unsigned long symflags = TRACE_ITER_SYM_OFFSET;
 
        trace_assign_type(it, entry);
 
        if (entry->type == TRACE_HW_BRANCHES) {
-               if (trace_seq_printf(seq, "%4d  ", entry->cpu) &&
-                   trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
-                                    it->from, it->to) &&
-                   (!it->from ||
-                    seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
+               if (trace_seq_printf(seq, "%4d  ", iter->cpu) &&
+                   seq_print_ip_sym(seq, it->to, symflags) &&
+                   trace_seq_printf(seq, "\t  <-  ") &&
+                   seq_print_ip_sym(seq, it->from, symflags) &&
                    trace_seq_printf(seq, "\n"))
                        return TRACE_TYPE_HANDLED;
                return TRACE_TYPE_PARTIAL_LINE;;
@@ -103,26 +166,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
        return TRACE_TYPE_UNHANDLED;
 }
 
-void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
+void trace_hw_branch(u64 from, u64 to)
 {
+       struct trace_array *tr = hw_branch_trace;
        struct ring_buffer_event *event;
        struct hw_branch_entry *entry;
-       unsigned long irq;
+       unsigned long irq1;
+       int cpu;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
-       if (!event)
+       if (unlikely(!tr))
                return;
+
+       if (unlikely(!trace_hw_branches_enabled))
+               return;
+
+       local_irq_save(irq1);
+       cpu = raw_smp_processor_id();
+       if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+               goto out;
+
+       event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+                                         sizeof(*entry), 0, 0);
+       if (!event)
+               goto out;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, 0, from);
        entry->ent.type = TRACE_HW_BRANCHES;
-       entry->ent.cpu = smp_processor_id();
        entry->from = from;
        entry->to   = to;
-       ring_buffer_unlock_commit(tr->buffer, event, irq);
+       trace_buffer_unlock_commit(tr, event, 0, 0);
+
+ out:
+       atomic_dec(&tr->data[cpu]->disabled);
+       local_irq_restore(irq1);
 }
 
-static void trace_bts_at(struct trace_array *tr,
-                        const struct bts_trace *trace, void *at)
+static void trace_bts_at(const struct bts_trace *trace, void *at)
 {
        struct bts_struct bts;
        int err = 0;
@@ -137,18 +216,29 @@ static void trace_bts_at(struct trace_array *tr,
 
        switch (bts.qualifier) {
        case BTS_BRANCH:
-               trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
+               trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
                break;
        }
 }
 
+/*
+ * Collect the trace on the current cpu and write it into the ftrace buffer.
+ *
+ * pre: bts_tracer_lock must be locked
+ */
 static void trace_bts_cpu(void *arg)
 {
        struct trace_array *tr = (struct trace_array *) arg;
        const struct bts_trace *trace;
        unsigned char *at;
 
-       if (!this_tracer)
+       if (unlikely(!tr))
+               return;
+
+       if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
+               return;
+
+       if (unlikely(!this_tracer))
                return;
 
        ds_suspend_bts(this_tracer);
@@ -158,11 +248,11 @@ static void trace_bts_cpu(void *arg)
 
        for (at = trace->ds.top; (void *)at < trace->ds.end;
             at += trace->ds.size)
-               trace_bts_at(tr, trace, at);
+               trace_bts_at(trace, at);
 
        for (at = trace->ds.begin; (void *)at < trace->ds.top;
             at += trace->ds.size)
-               trace_bts_at(tr, trace, at);
+               trace_bts_at(trace, at);
 
 out:
        ds_resume_bts(this_tracer);
@@ -170,26 +260,43 @@ out:
 
 static void trace_bts_prepare(struct trace_iterator *iter)
 {
-       int cpu;
+       spin_lock(&bts_tracer_lock);
+
+       on_each_cpu(trace_bts_cpu, iter->tr, 1);
+
+       spin_unlock(&bts_tracer_lock);
+}
+
+static void trace_bts_close(struct trace_iterator *iter)
+{
+       tracing_reset_online_cpus(iter->tr);
+}
+
+void trace_hw_branch_oops(void)
+{
+       spin_lock(&bts_tracer_lock);
+
+       trace_bts_cpu(hw_branch_trace);
 
-       for_each_cpu(cpu, cpu_possible_mask)
-               smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
+       spin_unlock(&bts_tracer_lock);
 }
 
 struct tracer bts_tracer __read_mostly =
 {
        .name           = "hw-branch-tracer",
        .init           = bts_trace_init,
-       .reset          = bts_trace_stop,
+       .reset          = bts_trace_reset,
        .print_header   = bts_trace_print_header,
        .print_line     = bts_trace_print_line,
        .start          = bts_trace_start,
        .stop           = bts_trace_stop,
-       .open           = trace_bts_prepare
+       .open           = trace_bts_prepare,
+       .close          = trace_bts_close
 };
 
 __init static int init_bts_trace(void)
 {
+       register_hotcpu_notifier(&bts_hotcpu_notifier);
        return register_tracer(&bts_tracer);
 }
 device_initcall(init_bts_trace);
index 62a78d9..b923d13 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * trace irqs off criticall timings
+ * trace irqs off critical timings
  *
  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
@@ -32,6 +32,8 @@ enum {
 
 static int trace_type __read_mostly;
 
+static int save_lat_flag;
+
 #ifdef CONFIG_PREEMPT_TRACER
 static inline int
 preempt_trace(void)
@@ -95,7 +97,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+               trace_function(tr, ip, parent_ip, flags, preempt_count());
 
        atomic_dec(&data->disabled);
 }
@@ -153,7 +155,7 @@ check_critical_timing(struct trace_array *tr,
        if (!report_latency(delta))
                goto out_unlock;
 
-       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+       trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 
        latency = nsecs_to_usecs(delta);
 
@@ -177,7 +179,7 @@ out:
        data->critical_sequence = max_sequence;
        data->preempt_timestamp = ftrace_now(cpu);
        tracing_reset(tr, cpu);
-       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+       trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 }
 
 static inline void
@@ -210,7 +212,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
 
        local_save_flags(flags);
 
-       trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+       trace_function(tr, ip, parent_ip, flags, preempt_count());
 
        per_cpu(tracing_cpu, cpu) = 1;
 
@@ -244,7 +246,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
        atomic_inc(&data->disabled);
 
        local_save_flags(flags);
-       trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+       trace_function(tr, ip, parent_ip, flags, preempt_count());
        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
        data->critical_start = 0;
        atomic_dec(&data->disabled);
@@ -353,33 +355,26 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
 }
 #endif /* CONFIG_PREEMPT_TRACER */
 
-/*
- * save_tracer_enabled is used to save the state of the tracer_enabled
- * variable when we disable it when we open a trace output file.
- */
-static int save_tracer_enabled;
-
 static void start_irqsoff_tracer(struct trace_array *tr)
 {
        register_ftrace_function(&trace_ops);
-       if (tracing_is_enabled()) {
+       if (tracing_is_enabled())
                tracer_enabled = 1;
-               save_tracer_enabled = 1;
-       } else {
+       else
                tracer_enabled = 0;
-               save_tracer_enabled = 0;
-       }
 }
 
 static void stop_irqsoff_tracer(struct trace_array *tr)
 {
        tracer_enabled = 0;
-       save_tracer_enabled = 0;
        unregister_ftrace_function(&trace_ops);
 }
 
 static void __irqsoff_tracer_init(struct trace_array *tr)
 {
+       save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+       trace_flags |= TRACE_ITER_LATENCY_FMT;
+
        tracing_max_latency = 0;
        irqsoff_trace = tr;
        /* make sure that the tracer is visible */
@@ -390,30 +385,19 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
 static void irqsoff_tracer_reset(struct trace_array *tr)
 {
        stop_irqsoff_tracer(tr);
+
+       if (!save_lat_flag)
+               trace_flags &= ~TRACE_ITER_LATENCY_FMT;
 }
 
 static void irqsoff_tracer_start(struct trace_array *tr)
 {
        tracer_enabled = 1;
-       save_tracer_enabled = 1;
 }
 
 static void irqsoff_tracer_stop(struct trace_array *tr)
 {
        tracer_enabled = 0;
-       save_tracer_enabled = 0;
-}
-
-static void irqsoff_tracer_open(struct trace_iterator *iter)
-{
-       /* stop the trace while dumping */
-       tracer_enabled = 0;
-}
-
-static void irqsoff_tracer_close(struct trace_iterator *iter)
-{
-       /* restart tracing */
-       tracer_enabled = save_tracer_enabled;
 }
 
 #ifdef CONFIG_IRQSOFF_TRACER
@@ -431,8 +415,6 @@ static struct tracer irqsoff_tracer __read_mostly =
        .reset          = irqsoff_tracer_reset,
        .start          = irqsoff_tracer_start,
        .stop           = irqsoff_tracer_stop,
-       .open           = irqsoff_tracer_open,
-       .close          = irqsoff_tracer_close,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_irqsoff,
@@ -459,8 +441,6 @@ static struct tracer preemptoff_tracer __read_mostly =
        .reset          = irqsoff_tracer_reset,
        .start          = irqsoff_tracer_start,
        .stop           = irqsoff_tracer_stop,
-       .open           = irqsoff_tracer_open,
-       .close          = irqsoff_tracer_close,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptoff,
@@ -489,8 +469,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
        .reset          = irqsoff_tracer_reset,
        .start          = irqsoff_tracer_start,
        .stop           = irqsoff_tracer_stop,
-       .open           = irqsoff_tracer_open,
-       .close          = irqsoff_tracer_close,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptirqsoff,
index 80e503e..23e346a 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/atomic.h>
 
 #include "trace.h"
+#include "trace_output.h"
 
 struct header_iter {
        struct pci_dev *dev;
@@ -183,21 +184,22 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
        switch (rw->opcode) {
        case MMIO_READ:
                ret = trace_seq_printf(s,
-                       "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
+                       "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
                        rw->width, secs, usec_rem, rw->map_id,
                        (unsigned long long)rw->phys,
                        rw->value, rw->pc, 0);
                break;
        case MMIO_WRITE:
                ret = trace_seq_printf(s,
-                       "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
+                       "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
                        rw->width, secs, usec_rem, rw->map_id,
                        (unsigned long long)rw->phys,
                        rw->value, rw->pc, 0);
                break;
        case MMIO_UNKNOWN_OP:
                ret = trace_seq_printf(s,
-                       "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
+                       "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
+                       "%02lx 0x%lx %d\n",
                        secs, usec_rem, rw->map_id,
                        (unsigned long long)rw->phys,
                        (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
@@ -229,14 +231,14 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
        switch (m->opcode) {
        case MMIO_PROBE:
                ret = trace_seq_printf(s,
-                       "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
+                       "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
                        secs, usec_rem, m->map_id,
                        (unsigned long long)m->phys, m->virt, m->len,
                        0UL, 0);
                break;
        case MMIO_UNPROBE:
                ret = trace_seq_printf(s,
-                       "UNMAP %lu.%06lu %d 0x%lx %d\n",
+                       "UNMAP %u.%06lu %d 0x%lx %d\n",
                        secs, usec_rem, m->map_id, 0UL, 0);
                break;
        default:
@@ -252,20 +254,20 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
 {
        struct trace_entry *entry = iter->ent;
        struct print_entry *print = (struct print_entry *)entry;
-       const char *msg         = print->buf;
        struct trace_seq *s     = &iter->seq;
        unsigned long long t    = ns2usecs(iter->ts);
-       unsigned long usec_rem  = do_div(t, 1000000ULL);
+       unsigned long usec_rem  = do_div(t, USEC_PER_SEC);
        unsigned secs           = (unsigned long)t;
        int ret;
 
        /* The trailing newline must be in the message. */
-       ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
+       ret = trace_seq_printf(s, "MARK %u.%06lu ", secs, usec_rem);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
-       if (entry->flags & TRACE_FLAG_CONT)
-               trace_seq_print_cont(s, iter);
+       ret = trace_seq_bprintf(s, print->fmt, print->buf);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
 
        return TRACE_TYPE_HANDLED;
 }
@@ -308,21 +310,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
 {
        struct ring_buffer_event *event;
        struct trace_mmiotrace_rw *entry;
-       unsigned long irq_flags;
+       int pc = preempt_count();
 
-       event   = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
+                                         sizeof(*entry), 0, pc);
        if (!event) {
                atomic_inc(&dropped_count);
                return;
        }
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, preempt_count());
-       entry->ent.type                 = TRACE_MMIO_RW;
        entry->rw                       = *rw;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
-       trace_wake_up();
+       trace_buffer_unlock_commit(tr, event, 0, pc);
 }
 
 void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -338,21 +336,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
 {
        struct ring_buffer_event *event;
        struct trace_mmiotrace_map *entry;
-       unsigned long irq_flags;
+       int pc = preempt_count();
 
-       event   = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
+                                         sizeof(*entry), 0, pc);
        if (!event) {
                atomic_inc(&dropped_count);
                return;
        }
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, preempt_count());
-       entry->ent.type                 = TRACE_MMIO_MAP;
        entry->map                      = *map;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
-       trace_wake_up();
+       trace_buffer_unlock_commit(tr, event, 0, pc);
 }
 
 void mmio_trace_mapping(struct mmiotrace_map *map)
index b9767ac..9aa84bd 100644 (file)
@@ -47,12 +47,7 @@ static void stop_nop_trace(struct trace_array *tr)
 
 static int nop_trace_init(struct trace_array *tr)
 {
-       int cpu;
        ctx_trace = tr;
-
-       for_each_online_cpu(cpu)
-               tracing_reset(tr, cpu);
-
        start_nop_trace(tr);
        return 0;
 }
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
new file mode 100644 (file)
index 0000000..ef8fd66
--- /dev/null
@@ -0,0 +1,916 @@
+/*
+ * trace_output.c
+ *
+ * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+
+#include "trace_output.h"
+
+/* must be a power of 2 */
+#define EVENT_HASHSIZE 128
+
+static DEFINE_MUTEX(trace_event_mutex);
+static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
+
+static int next_event_type = __TRACE_LAST_TYPE + 1;
+
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ */
+int
+trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+       int len = (PAGE_SIZE - 1) - s->len;
+       va_list ap;
+       int ret;
+
+       if (!len)
+               return 0;
+
+       va_start(ap, fmt);
+       ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
+       va_end(ap);
+
+       /* If we can't write it all, don't bother writing anything */
+       if (ret >= len)
+               return 0;
+
+       s->len += ret;
+
+       return len;
+}
+
+int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+       int len = (PAGE_SIZE - 1) - s->len;
+       int ret;
+
+       if (!len)
+               return 0;
+
+       ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+
+       /* If we can't write it all, don't bother writing anything */
+       if (ret >= len)
+               return 0;
+
+       s->len += ret;
+
+       return len;
+}
+
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ */
+int trace_seq_puts(struct trace_seq *s, const char *str)
+{
+       int len = strlen(str);
+
+       if (len > ((PAGE_SIZE - 1) - s->len))
+               return 0;
+
+       memcpy(s->buffer + s->len, str, len);
+       s->len += len;
+
+       return len;
+}
+
+int trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+       if (s->len >= (PAGE_SIZE - 1))
+               return 0;
+
+       s->buffer[s->len++] = c;
+
+       return 1;
+}
+
+int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
+{
+       if (len > ((PAGE_SIZE - 1) - s->len))
+               return 0;
+
+       memcpy(s->buffer + s->len, mem, len);
+       s->len += len;
+
+       return len;
+}
+
+int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
+{
+       unsigned char hex[HEX_CHARS];
+       unsigned char *data = mem;
+       int i, j;
+
+#ifdef __BIG_ENDIAN
+       for (i = 0, j = 0; i < len; i++) {
+#else
+       for (i = len-1, j = 0; i >= 0; i--) {
+#endif
+               hex[j++] = hex_asc_hi(data[i]);
+               hex[j++] = hex_asc_lo(data[i]);
+       }
+       hex[j++] = ' ';
+
+       return trace_seq_putmem(s, hex, j);
+}
+
+int trace_seq_path(struct trace_seq *s, struct path *path)
+{
+       unsigned char *p;
+
+       if (s->len >= (PAGE_SIZE - 1))
+               return 0;
+       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+       if (!IS_ERR(p)) {
+               p = mangle_path(s->buffer + s->len, p, "\n");
+               if (p) {
+                       s->len = p - s->buffer;
+                       return 1;
+               }
+       } else {
+               s->buffer[s->len++] = '?';
+               return 1;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_KRETPROBES
+static inline const char *kretprobed(const char *name)
+{
+       static const char tramp_name[] = "kretprobe_trampoline";
+       int size = sizeof(tramp_name);
+
+       if (strncmp(tramp_name, name, size) == 0)
+               return "[unknown/kretprobe'd]";
+       return name;
+}
+#else
+static inline const char *kretprobed(const char *name)
+{
+       return name;
+}
+#endif /* CONFIG_KRETPROBES */
+
+static int
+seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+       char str[KSYM_SYMBOL_LEN];
+       const char *name;
+
+       kallsyms_lookup(address, NULL, NULL, NULL, str);
+
+       name = kretprobed(str);
+
+       return trace_seq_printf(s, fmt, name);
+#endif
+       return 1;
+}
+
+static int
+seq_print_sym_offset(struct trace_seq *s, const char *fmt,
+                    unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+       char str[KSYM_SYMBOL_LEN];
+       const char *name;
+
+       sprint_symbol(str, address);
+       name = kretprobed(str);
+
+       return trace_seq_printf(s, fmt, name);
+#endif
+       return 1;
+}
+
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
+int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+                     unsigned long ip, unsigned long sym_flags)
+{
+       struct file *file = NULL;
+       unsigned long vmstart = 0;
+       int ret = 1;
+
+       if (mm) {
+               const struct vm_area_struct *vma;
+
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, ip);
+               if (vma) {
+                       file = vma->vm_file;
+                       vmstart = vma->vm_start;
+               }
+               if (file) {
+                       ret = trace_seq_path(s, &file->f_path);
+                       if (ret)
+                               ret = trace_seq_printf(s, "[+0x%lx]",
+                                                      ip - vmstart);
+               }
+               up_read(&mm->mmap_sem);
+       }
+       if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
+               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+       return ret;
+}
+
+int
+seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
+                     unsigned long sym_flags)
+{
+       struct mm_struct *mm = NULL;
+       int ret = 1;
+       unsigned int i;
+
+       if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
+               struct task_struct *task;
+               /*
+                * we do the lookup on the thread group leader,
+                * since individual threads might have already quit!
+                */
+               rcu_read_lock();
+               task = find_task_by_vpid(entry->ent.tgid);
+               if (task)
+                       mm = get_task_mm(task);
+               rcu_read_unlock();
+       }
+
+       for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+               unsigned long ip = entry->caller[i];
+
+               if (ip == ULONG_MAX || !ret)
+                       break;
+               if (i && ret)
+                       ret = trace_seq_puts(s, " <- ");
+               if (!ip) {
+                       if (ret)
+                               ret = trace_seq_puts(s, "??");
+                       continue;
+               }
+               if (!ret)
+                       break;
+               if (ret)
+                       ret = seq_print_user_ip(s, mm, ip, sym_flags);
+       }
+
+       if (mm)
+               mmput(mm);
+       return ret;
+}
+
+int
+seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
+{
+       int ret;
+
+       if (!ip)
+               return trace_seq_printf(s, "0");
+
+       if (sym_flags & TRACE_ITER_SYM_OFFSET)
+               ret = seq_print_sym_offset(s, "%s", ip);
+       else
+               ret = seq_print_sym_short(s, "%s", ip);
+
+       if (!ret)
+               return 0;
+
+       if (sym_flags & TRACE_ITER_SYM_ADDR)
+               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+       return ret;
+}
+
+static int
+lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
+{
+       int hardirq, softirq;
+       char *comm;
+
+       comm = trace_find_cmdline(entry->pid);
+       hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
+       softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
+
+       if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
+                             comm, entry->pid, cpu,
+                             (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+                               (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
+                                 'X' : '.',
+                             (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
+                               'N' : '.',
+                             (hardirq && softirq) ? 'H' :
+                               hardirq ? 'h' : softirq ? 's' : '.'))
+               return 0;
+
+       if (entry->preempt_count)
+               return trace_seq_printf(s, "%x", entry->preempt_count);
+       return trace_seq_puts(s, ".");
+}
+
+static unsigned long preempt_mark_thresh = 100;
+
+static int
+lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
+                   unsigned long rel_usecs)
+{
+       return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
+                               rel_usecs > preempt_mark_thresh ? '!' :
+                                 rel_usecs > 1 ? '+' : ' ');
+}
+
+int trace_print_context(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+       char *comm = trace_find_cmdline(entry->pid);
+       unsigned long long t = ns2usecs(iter->ts);
+       unsigned long usec_rem = do_div(t, USEC_PER_SEC);
+       unsigned long secs = (unsigned long)t;
+
+       return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
+                               comm, entry->pid, iter->cpu, secs, usec_rem);
+}
+
+int trace_print_lat_context(struct trace_iterator *iter)
+{
+       u64 next_ts;
+       int ret;
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent,
+                          *next_entry = trace_find_next_entry(iter, NULL,
+                                                              &next_ts);
+       unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
+       unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
+       unsigned long rel_usecs;
+
+       if (!next_entry)
+               next_ts = iter->ts;
+       rel_usecs = ns2usecs(next_ts - iter->ts);
+
+       if (verbose) {
+               char *comm = trace_find_cmdline(entry->pid);
+               ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]"
+                                      " %ld.%03ldms (+%ld.%03ldms): ", comm,
+                                      entry->pid, iter->cpu, entry->flags,
+                                      entry->preempt_count, iter->idx,
+                                      ns2usecs(iter->ts),
+                                      abs_usecs / USEC_PER_MSEC,
+                                      abs_usecs % USEC_PER_MSEC,
+                                      rel_usecs / USEC_PER_MSEC,
+                                      rel_usecs % USEC_PER_MSEC);
+       } else {
+               ret = lat_print_generic(s, entry, iter->cpu);
+               if (ret)
+                       ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
+       }
+
+       return ret;
+}
+
+static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
+
+static int task_state_char(unsigned long state)
+{
+       int bit = state ? __ffs(state) + 1 : 0;
+
+       return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
+}
+
+/**
+ * ftrace_find_event - find a registered event
+ * @type: the type of event to look for
+ *
+ * Returns an event of type @type otherwise NULL
+ */
+struct trace_event *ftrace_find_event(int type)
+{
+       struct trace_event *event;
+       struct hlist_node *n;
+       unsigned key;
+
+       key = type & (EVENT_HASHSIZE - 1);
+
+       hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
+               if (event->type == type)
+                       return event;
+       }
+
+       return NULL;
+}
+
+/**
+ * register_ftrace_event - register output for an event type
+ * @event: the event type to register
+ *
+ * Event types are stored in a hash and this hash is used to
+ * find a way to print an event. If the @event->type is set
+ * then it will use that type, otherwise it will assign a
+ * type to use.
+ *
+ * If you assign your own type, please make sure it is added
+ * to the trace_type enum in trace.h, to avoid collisions
+ * with the dynamic types.
+ *
+ * Returns the event type number or zero on error.
+ */
+int register_ftrace_event(struct trace_event *event)
+{
+       unsigned key;
+       int ret = 0;
+
+       mutex_lock(&trace_event_mutex);
+
+       if (!event->type)
+               event->type = next_event_type++;
+       else if (event->type > __TRACE_LAST_TYPE) {
+               printk(KERN_WARNING "Need to add type to trace.h\n");
+               WARN_ON(1);
+       }
+
+       if (ftrace_find_event(event->type))
+               goto out;
+
+       if (event->trace == NULL)
+               event->trace = trace_nop_print;
+       if (event->raw == NULL)
+               event->raw = trace_nop_print;
+       if (event->hex == NULL)
+               event->hex = trace_nop_print;
+       if (event->binary == NULL)
+               event->binary = trace_nop_print;
+
+       key = event->type & (EVENT_HASHSIZE - 1);
+
+       hlist_add_head_rcu(&event->node, &event_hash[key]);
+
+       ret = event->type;
+ out:
+       mutex_unlock(&trace_event_mutex);
+
+       return ret;
+}
+
+/**
+ * unregister_ftrace_event - remove a no longer used event
+ * @event: the event to remove
+ */
+int unregister_ftrace_event(struct trace_event *event)
+{
+       mutex_lock(&trace_event_mutex);
+       hlist_del(&event->node);
+       mutex_unlock(&trace_event_mutex);
+
+       return 0;
+}
+
+/*
+ * Standard events
+ */
+
+enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
+{
+       return TRACE_TYPE_HANDLED;
+}
+
+/* TRACE_FN */
+static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
+{
+       struct ftrace_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!seq_print_ip_sym(s, field->ip, flags))
+               goto partial;
+
+       if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
+               if (!trace_seq_printf(s, " <-"))
+                       goto partial;
+               if (!seq_print_ip_sym(s,
+                                     field->parent_ip,
+                                     flags))
+                       goto partial;
+       }
+       if (!trace_seq_printf(s, "\n"))
+               goto partial;
+
+       return TRACE_TYPE_HANDLED;
+
+ partial:
+       return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
+{
+       struct ftrace_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
+                             field->ip,
+                             field->parent_ip))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
+{
+       struct ftrace_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_HEX_FIELD_RET(s, field->ip);
+       SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
+{
+       struct ftrace_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD_RET(s, field->ip);
+       SEQ_PUT_FIELD_RET(s, field->parent_ip);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event trace_fn_event = {
+       .type           = TRACE_FN,
+       .trace          = trace_fn_trace,
+       .raw            = trace_fn_raw,
+       .hex            = trace_fn_hex,
+       .binary         = trace_fn_bin,
+};
+
+/* TRACE_CTX an TRACE_WAKE */
+static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
+                                            char *delim)
+{
+       struct ctx_switch_entry *field;
+       char *comm;
+       int S, T;
+
+       trace_assign_type(field, iter->ent);
+
+       T = task_state_char(field->next_state);
+       S = task_state_char(field->prev_state);
+       comm = trace_find_cmdline(field->next_pid);
+       if (!trace_seq_printf(&iter->seq,
+                             " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
+                             field->prev_pid,
+                             field->prev_prio,
+                             S, delim,
+                             field->next_cpu,
+                             field->next_pid,
+                             field->next_prio,
+                             T, comm))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
+{
+       return trace_ctxwake_print(iter, "==>");
+}
+
+static enum print_line_t trace_wake_print(struct trace_iterator *iter,
+                                         int flags)
+{
+       return trace_ctxwake_print(iter, "  +");
+}
+
+static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
+{
+       struct ctx_switch_entry *field;
+       int T;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!S)
+               task_state_char(field->prev_state);
+       T = task_state_char(field->next_state);
+       if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
+                             field->prev_pid,
+                             field->prev_prio,
+                             S,
+                             field->next_cpu,
+                             field->next_pid,
+                             field->next_prio,
+                             T))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
+{
+       return trace_ctxwake_raw(iter, 0);
+}
+
+static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
+{
+       return trace_ctxwake_raw(iter, '+');
+}
+
+
+static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
+{
+       struct ctx_switch_entry *field;
+       struct trace_seq *s = &iter->seq;
+       int T;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!S)
+               task_state_char(field->prev_state);
+       T = task_state_char(field->next_state);
+
+       SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
+       SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
+       SEQ_PUT_HEX_FIELD_RET(s, S);
+       SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
+       SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
+       SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
+       SEQ_PUT_HEX_FIELD_RET(s, T);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
+{
+       return trace_ctxwake_hex(iter, 0);
+}
+
+static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
+{
+       return trace_ctxwake_hex(iter, '+');
+}
+
+static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
+                                          int flags)
+{
+       struct ctx_switch_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD_RET(s, field->prev_pid);
+       SEQ_PUT_FIELD_RET(s, field->prev_prio);
+       SEQ_PUT_FIELD_RET(s, field->prev_state);
+       SEQ_PUT_FIELD_RET(s, field->next_pid);
+       SEQ_PUT_FIELD_RET(s, field->next_prio);
+       SEQ_PUT_FIELD_RET(s, field->next_state);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event trace_ctx_event = {
+       .type           = TRACE_CTX,
+       .trace          = trace_ctx_print,
+       .raw            = trace_ctx_raw,
+       .hex            = trace_ctx_hex,
+       .binary         = trace_ctxwake_bin,
+};
+
+static struct trace_event trace_wake_event = {
+       .type           = TRACE_WAKE,
+       .trace          = trace_wake_print,
+       .raw            = trace_wake_raw,
+       .hex            = trace_wake_hex,
+       .binary         = trace_ctxwake_bin,
+};
+
+/* TRACE_SPECIAL */
+static enum print_line_t trace_special_print(struct trace_iterator *iter,
+                                            int flags)
+{
+       struct special_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
+                             field->arg1,
+                             field->arg2,
+                             field->arg3))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_special_hex(struct trace_iterator *iter,
+                                          int flags)
+{
+       struct special_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
+       SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
+       SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_special_bin(struct trace_iterator *iter,
+                                          int flags)
+{
+       struct special_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD_RET(s, field->arg1);
+       SEQ_PUT_FIELD_RET(s, field->arg2);
+       SEQ_PUT_FIELD_RET(s, field->arg3);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event trace_special_event = {
+       .type           = TRACE_SPECIAL,
+       .trace          = trace_special_print,
+       .raw            = trace_special_print,
+       .hex            = trace_special_hex,
+       .binary         = trace_special_bin,
+};
+
+/* TRACE_STACK */
+
+static enum print_line_t trace_stack_print(struct trace_iterator *iter,
+                                          int flags)
+{
+       struct stack_entry *field;
+       struct trace_seq *s = &iter->seq;
+       int i;
+
+       trace_assign_type(field, iter->ent);
+
+       for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+               if (i) {
+                       if (!trace_seq_puts(s, " <= "))
+                               goto partial;
+
+                       if (!seq_print_ip_sym(s, field->caller[i], flags))
+                               goto partial;
+               }
+               if (!trace_seq_puts(s, "\n"))
+                       goto partial;
+       }
+
+       return TRACE_TYPE_HANDLED;
+
+ partial:
+       return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct trace_event trace_stack_event = {
+       .type           = TRACE_STACK,
+       .trace          = trace_stack_print,
+       .raw            = trace_special_print,
+       .hex            = trace_special_hex,
+       .binary         = trace_special_bin,
+};
+
+/* TRACE_USER_STACK */
+static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
+                                               int flags)
+{
+       struct userstack_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!seq_print_userip_objs(field, s, flags))
+               goto partial;
+
+       if (!trace_seq_putc(s, '\n'))
+               goto partial;
+
+       return TRACE_TYPE_HANDLED;
+
+ partial:
+       return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct trace_event trace_user_stack_event = {
+       .type           = TRACE_USER_STACK,
+       .trace          = trace_user_stack_print,
+       .raw            = trace_special_print,
+       .hex            = trace_special_hex,
+       .binary         = trace_special_bin,
+};
+
+/* TRACE_PRINT */
+static enum print_line_t
+trace_print_print(struct trace_iterator *iter, int flags)
+{
+       struct trace_entry *entry = iter->ent;
+       struct trace_seq *s = &iter->seq;
+       struct print_entry *field;
+
+       trace_assign_type(field, entry);
+
+       if (!seq_print_ip_sym(s, field->ip, flags))
+               goto partial;
+
+       if (!trace_seq_puts(s, ": "))
+               goto partial;
+
+       if (!trace_seq_bprintf(s, field->fmt, field->buf))
+               goto partial;
+
+       return TRACE_TYPE_HANDLED;
+
+ partial:
+       return TRACE_TYPE_PARTIAL_LINE;
+}
+
+
+static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
+{
+       struct print_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!trace_seq_printf(s, ": %lx : ", field->ip))
+               goto partial;
+
+       if (!trace_seq_bprintf(s, field->fmt, field->buf))
+               goto partial;
+
+       return TRACE_TYPE_HANDLED;
+
+ partial:
+       return TRACE_TYPE_PARTIAL_LINE;
+}
+
+
+static struct trace_event trace_print_event = {
+       .type           = TRACE_PRINT,
+       .trace          = trace_print_print,
+       .raw            = trace_print_raw,
+};
+
+static struct trace_event *events[] __initdata = {
+       &trace_fn_event,
+       &trace_ctx_event,
+       &trace_wake_event,
+       &trace_special_event,
+       &trace_stack_event,
+       &trace_user_stack_event,
+       &trace_print_event,
+       NULL
+};
+
+__init static int init_events(void)
+{
+       struct trace_event *event;
+       int i, ret;
+
+       for (i = 0; events[i]; i++) {
+               event = events[i];
+
+               ret = register_ftrace_event(event);
+               if (!ret) {
+                       printk(KERN_WARNING "event %d failed to register\n",
+                              event->type);
+                       WARN_ON_ONCE(1);
+               }
+       }
+
+       return 0;
+}
+device_initcall(init_events);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
new file mode 100644 (file)
index 0000000..3b90e6a
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef __TRACE_EVENTS_H
+#define __TRACE_EVENTS_H
+
+#include "trace.h"
+
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
+                                             int flags);
+
+struct trace_event {
+       struct hlist_node       node;
+       int                     type;
+       trace_print_func        trace;
+       trace_print_func        raw;
+       trace_print_func        hex;
+       trace_print_func        binary;
+};
+
+extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+       __attribute__ ((format (printf, 2, 3)));
+extern int
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern int
+seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
+               unsigned long sym_flags);
+extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+                                size_t cnt);
+int trace_seq_puts(struct trace_seq *s, const char *str);
+int trace_seq_putc(struct trace_seq *s, unsigned char c);
+int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len);
+int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len);
+int trace_seq_path(struct trace_seq *s, struct path *path);
+int seq_print_userip_objs(const struct userstack_entry *entry,
+                         struct trace_seq *s, unsigned long sym_flags);
+int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+                     unsigned long ip, unsigned long sym_flags);
+
+int trace_print_context(struct trace_iterator *iter);
+int trace_print_lat_context(struct trace_iterator *iter);
+
+struct trace_event *ftrace_find_event(int type);
+int register_ftrace_event(struct trace_event *event);
+int unregister_ftrace_event(struct trace_event *event);
+
+enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags);
+
+#define MAX_MEMHEX_BYTES       8
+#define HEX_CHARS              (MAX_MEMHEX_BYTES*2 + 1)
+
+#define SEQ_PUT_FIELD_RET(s, x)                                \
+do {                                                   \
+       if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
+               return TRACE_TYPE_PARTIAL_LINE;         \
+} while (0)
+
+#define SEQ_PUT_HEX_FIELD_RET(s, x)                    \
+do {                                                   \
+       BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);     \
+       if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
+               return TRACE_TYPE_PARTIAL_LINE;         \
+} while (0)
+
+#endif
+
index 7bda248..91ce672 100644 (file)
 
 #include <linux/init.h>
 #include <linux/debugfs.h>
-#include <linux/ftrace.h>
+#include <trace/power.h>
 #include <linux/kallsyms.h>
 #include <linux/module.h>
 
 #include "trace.h"
+#include "trace_output.h"
 
 static struct trace_array *power_trace;
 static int __read_mostly trace_power_enabled;
 
+static void probe_power_start(struct power_trace *it, unsigned int type,
+                               unsigned int level)
+{
+       if (!trace_power_enabled)
+               return;
+
+       memset(it, 0, sizeof(struct power_trace));
+       it->state = level;
+       it->type = type;
+       it->stamp = ktime_get();
+}
+
+
+static void probe_power_end(struct power_trace *it)
+{
+       struct ring_buffer_event *event;
+       struct trace_power *entry;
+       struct trace_array_cpu *data;
+       struct trace_array *tr = power_trace;
+
+       if (!trace_power_enabled)
+               return;
+
+       preempt_disable();
+       it->end = ktime_get();
+       data = tr->data[smp_processor_id()];
+
+       event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+                                         sizeof(*entry), 0, 0);
+       if (!event)
+               goto out;
+       entry   = ring_buffer_event_data(event);
+       entry->state_data = *it;
+       trace_buffer_unlock_commit(tr, event, 0, 0);
+ out:
+       preempt_enable();
+}
+
+static void probe_power_mark(struct power_trace *it, unsigned int type,
+                               unsigned int level)
+{
+       struct ring_buffer_event *event;
+       struct trace_power *entry;
+       struct trace_array_cpu *data;
+       struct trace_array *tr = power_trace;
+
+       if (!trace_power_enabled)
+               return;
+
+       memset(it, 0, sizeof(struct power_trace));
+       it->state = level;
+       it->type = type;
+       it->stamp = ktime_get();
+       preempt_disable();
+       it->end = it->stamp;
+       data = tr->data[smp_processor_id()];
+
+       event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+                                         sizeof(*entry), 0, 0);
+       if (!event)
+               goto out;
+       entry   = ring_buffer_event_data(event);
+       entry->state_data = *it;
+       trace_buffer_unlock_commit(tr, event, 0, 0);
+ out:
+       preempt_enable();
+}
+
+static int tracing_power_register(void)
+{
+       int ret;
+
+       ret = register_trace_power_start(probe_power_start);
+       if (ret) {
+               pr_info("power trace: Couldn't activate tracepoint"
+                       " probe to trace_power_start\n");
+               return ret;
+       }
+       ret = register_trace_power_end(probe_power_end);
+       if (ret) {
+               pr_info("power trace: Couldn't activate tracepoint"
+                       " probe to trace_power_end\n");
+               goto fail_start;
+       }
+       ret = register_trace_power_mark(probe_power_mark);
+       if (ret) {
+               pr_info("power trace: Couldn't activate tracepoint"
+                       " probe to trace_power_mark\n");
+               goto fail_end;
+       }
+       return ret;
+fail_end:
+       unregister_trace_power_end(probe_power_end);
+fail_start:
+       unregister_trace_power_start(probe_power_start);
+       return ret;
+}
 
 static void start_power_trace(struct trace_array *tr)
 {
        trace_power_enabled = 1;
+       tracing_power_register();
 }
 
 static void stop_power_trace(struct trace_array *tr)
 {
        trace_power_enabled = 0;
+       unregister_trace_power_start(probe_power_start);
+       unregister_trace_power_end(probe_power_end);
+       unregister_trace_power_mark(probe_power_mark);
 }
 
 
@@ -38,6 +140,7 @@ static int power_trace_init(struct trace_array *tr)
        power_trace = tr;
 
        trace_power_enabled = 1;
+       tracing_power_register();
 
        for_each_cpu(cpu, cpu_possible_mask)
                tracing_reset(tr, cpu);
@@ -94,86 +197,3 @@ static int init_power_trace(void)
        return register_tracer(&power_tracer);
 }
 device_initcall(init_power_trace);
-
-void trace_power_start(struct power_trace *it, unsigned int type,
-                        unsigned int level)
-{
-       if (!trace_power_enabled)
-               return;
-
-       memset(it, 0, sizeof(struct power_trace));
-       it->state = level;
-       it->type = type;
-       it->stamp = ktime_get();
-}
-EXPORT_SYMBOL_GPL(trace_power_start);
-
-
-void trace_power_end(struct power_trace *it)
-{
-       struct ring_buffer_event *event;
-       struct trace_power *entry;
-       struct trace_array_cpu *data;
-       unsigned long irq_flags;
-       struct trace_array *tr = power_trace;
-
-       if (!trace_power_enabled)
-               return;
-
-       preempt_disable();
-       it->end = ktime_get();
-       data = tr->data[smp_processor_id()];
-
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
-       if (!event)
-               goto out;
-       entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, 0);
-       entry->ent.type = TRACE_POWER;
-       entry->state_data = *it;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
-       trace_wake_up();
-
- out:
-       preempt_enable();
-}
-EXPORT_SYMBOL_GPL(trace_power_end);
-
-void trace_power_mark(struct power_trace *it, unsigned int type,
-                        unsigned int level)
-{
-       struct ring_buffer_event *event;
-       struct trace_power *entry;
-       struct trace_array_cpu *data;
-       unsigned long irq_flags;
-       struct trace_array *tr = power_trace;
-
-       if (!trace_power_enabled)
-               return;
-
-       memset(it, 0, sizeof(struct power_trace));
-       it->state = level;
-       it->type = type;
-       it->stamp = ktime_get();
-       preempt_disable();
-       it->end = it->stamp;
-       data = tr->data[smp_processor_id()];
-
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
-       if (!event)
-               goto out;
-       entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, 0);
-       entry->ent.type = TRACE_POWER;
-       entry->state_data = *it;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
-       trace_wake_up();
-
- out:
-       preempt_enable();
-}
-EXPORT_SYMBOL_GPL(trace_power_mark);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
new file mode 100644 (file)
index 0000000..a50aea2
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * trace binary printk
+ *
+ * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/marker.h>
+#include <linux/uaccess.h>
+
+#include "trace.h"
+
+#ifdef CONFIG_MODULES
+
+/*
+ * modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt
+ * which are queued on trace_bprintk_fmt_list.
+ */
+static LIST_HEAD(trace_bprintk_fmt_list);
+
+/* serialize accesses to trace_bprintk_fmt_list */
+static DEFINE_MUTEX(btrace_mutex);
+
+struct trace_bprintk_fmt {
+       struct list_head list;
+       char fmt[0];
+};
+
+static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
+{
+       struct trace_bprintk_fmt *pos;
+       list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
+               if (!strcmp(pos->fmt, fmt))
+                       return pos;
+       }
+       return NULL;
+}
+
+static
+void hold_module_trace_bprintk_format(const char **start, const char **end)
+{
+       const char **iter;
+
+       mutex_lock(&btrace_mutex);
+       for (iter = start; iter < end; iter++) {
+               struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
+               if (tb_fmt) {
+                       *iter = tb_fmt->fmt;
+                       continue;
+               }
+
+               tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt)
+                               + strlen(*iter) + 1, GFP_KERNEL);
+               if (tb_fmt) {
+                       list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
+                       strcpy(tb_fmt->fmt, *iter);
+                       *iter = tb_fmt->fmt;
+               } else
+                       *iter = NULL;
+       }
+       mutex_unlock(&btrace_mutex);
+}
+
+static int module_trace_bprintk_format_notify(struct notifier_block *self,
+               unsigned long val, void *data)
+{
+       struct module *mod = data;
+       if (mod->num_trace_bprintk_fmt) {
+               const char **start = mod->trace_bprintk_fmt_start;
+               const char **end = start + mod->num_trace_bprintk_fmt;
+
+               if (val == MODULE_STATE_COMING)
+                       hold_module_trace_bprintk_format(start, end);
+       }
+       return 0;
+}
+
+#else /* !CONFIG_MODULES */
+__init static int
+module_trace_bprintk_format_notify(struct notifier_block *self,
+               unsigned long val, void *data)
+{
+       return 0;
+}
+#endif /* CONFIG_MODULES */
+
+
+__initdata_or_module static
+struct notifier_block module_trace_bprintk_format_nb = {
+       .notifier_call = module_trace_bprintk_format_notify,
+};
+
+int __trace_printk(unsigned long ip, const char *fmt, ...)
+ {
+       int ret;
+       va_list ap;
+
+       if (unlikely(!fmt))
+               return 0;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       va_start(ap, fmt);
+       ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
+       va_end(ap);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__trace_printk);
+
+int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
+ {
+       if (unlikely(!fmt))
+               return 0;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
+}
+EXPORT_SYMBOL_GPL(__ftrace_vprintk);
+
+
+static __init int init_trace_printk(void)
+{
+       return register_module_notifier(&module_trace_bprintk_format_nb);
+}
+
+early_initcall(init_trace_printk);
index df175cb..77132c2 100644 (file)
@@ -43,7 +43,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
        data = ctx_trace->data[cpu];
 
        if (likely(!atomic_read(&data->disabled)))
-               tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
+               tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
 
        local_irq_restore(flags);
 }
@@ -66,7 +66,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
        data = ctx_trace->data[cpu];
 
        if (likely(!atomic_read(&data->disabled)))
-               tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
+               tracing_sched_wakeup_trace(ctx_trace, wakee, current,
                                           flags, pc);
 
        local_irq_restore(flags);
@@ -93,7 +93,7 @@ static int tracing_sched_register(void)
        ret = register_trace_sched_switch(probe_sched_switch);
        if (ret) {
                pr_info("sched trace: Couldn't activate tracepoint"
-                       " probe to kernel_sched_schedule\n");
+                       " probe to kernel_sched_switch\n");
                goto fail_deprobe_wake_new;
        }
 
@@ -185,12 +185,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
        ctx_trace = tr;
 }
 
-static void start_sched_trace(struct trace_array *tr)
-{
-       tracing_reset_online_cpus(tr);
-       tracing_start_sched_switch_record();
-}
-
 static void stop_sched_trace(struct trace_array *tr)
 {
        tracing_stop_sched_switch_record();
@@ -199,7 +193,7 @@ static void stop_sched_trace(struct trace_array *tr)
 static int sched_switch_trace_init(struct trace_array *tr)
 {
        ctx_trace = tr;
-       start_sched_trace(tr);
+       tracing_start_sched_switch_record();
        return 0;
 }
 
@@ -227,6 +221,7 @@ static struct tracer sched_switch_trace __read_mostly =
        .reset          = sched_switch_trace_reset,
        .start          = sched_switch_trace_start,
        .stop           = sched_switch_trace_stop,
+       .wait_pipe      = poll_wait_pipe,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_sched_switch,
 #endif
index 42ae1e7..3c5ad6b 100644 (file)
@@ -25,12 +25,15 @@ static int __read_mostly    tracer_enabled;
 static struct task_struct      *wakeup_task;
 static int                     wakeup_cpu;
 static unsigned                        wakeup_prio = -1;
+static int                     wakeup_rt;
 
 static raw_spinlock_t wakeup_lock =
        (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 static void __wakeup_reset(struct trace_array *tr);
 
+static int save_lat_flag;
+
 #ifdef CONFIG_FUNCTION_TRACER
 /*
  * irqsoff uses its own tracer function to keep the overhead down:
@@ -71,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
        if (task_cpu(wakeup_task) != cpu)
                goto unlock;
 
-       trace_function(tr, data, ip, parent_ip, flags, pc);
+       trace_function(tr, ip, parent_ip, flags, pc);
 
  unlock:
        __raw_spin_unlock(&wakeup_lock);
@@ -151,7 +154,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
        if (unlikely(!tracer_enabled || next != wakeup_task))
                goto out_unlock;
 
-       trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+       trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+       tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 
        /*
         * usecs conversion is slow so we try to delay the conversion
@@ -182,13 +186,10 @@ out:
 
 static void __wakeup_reset(struct trace_array *tr)
 {
-       struct trace_array_cpu *data;
        int cpu;
 
-       for_each_possible_cpu(cpu) {
-               data = tr->data[cpu];
+       for_each_possible_cpu(cpu)
                tracing_reset(tr, cpu);
-       }
 
        wakeup_cpu = -1;
        wakeup_prio = -1;
@@ -213,6 +214,7 @@ static void wakeup_reset(struct trace_array *tr)
 static void
 probe_wakeup(struct rq *rq, struct task_struct *p, int success)
 {
+       struct trace_array_cpu *data;
        int cpu = smp_processor_id();
        unsigned long flags;
        long disabled;
@@ -224,7 +226,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
        tracing_record_cmdline(p);
        tracing_record_cmdline(current);
 
-       if (likely(!rt_task(p)) ||
+       if ((wakeup_rt && !rt_task(p)) ||
                        p->prio >= wakeup_prio ||
                        p->prio >= current->prio)
                return;
@@ -252,9 +254,10 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
 
        local_save_flags(flags);
 
-       wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
-       trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
-                      CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+       data = wakeup_trace->data[wakeup_cpu];
+       data->preempt_timestamp = ftrace_now(cpu);
+       tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+       trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
 out_locked:
        __raw_spin_unlock(&wakeup_lock);
@@ -262,12 +265,6 @@ out:
        atomic_dec(&wakeup_trace->data[cpu]->disabled);
 }
 
-/*
- * save_tracer_enabled is used to save the state of the tracer_enabled
- * variable when we disable it when we open a trace output file.
- */
-static int save_tracer_enabled;
-
 static void start_wakeup_tracer(struct trace_array *tr)
 {
        int ret;
@@ -289,7 +286,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
        ret = register_trace_sched_switch(probe_wakeup_sched_switch);
        if (ret) {
                pr_info("sched trace: Couldn't activate tracepoint"
-                       " probe to kernel_sched_schedule\n");
+                       " probe to kernel_sched_switch\n");
                goto fail_deprobe_wake_new;
        }
 
@@ -306,13 +303,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
 
        register_ftrace_function(&trace_ops);
 
-       if (tracing_is_enabled()) {
+       if (tracing_is_enabled())
                tracer_enabled = 1;
-               save_tracer_enabled = 1;
-       } else {
+       else
                tracer_enabled = 0;
-               save_tracer_enabled = 0;
-       }
 
        return;
 fail_deprobe_wake_new:
@@ -324,54 +318,54 @@ fail_deprobe:
 static void stop_wakeup_tracer(struct trace_array *tr)
 {
        tracer_enabled = 0;
-       save_tracer_enabled = 0;
        unregister_ftrace_function(&trace_ops);
        unregister_trace_sched_switch(probe_wakeup_sched_switch);
        unregister_trace_sched_wakeup_new(probe_wakeup);
        unregister_trace_sched_wakeup(probe_wakeup);
 }
 
-static int wakeup_tracer_init(struct trace_array *tr)
+static int __wakeup_tracer_init(struct trace_array *tr)
 {
+       save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+       trace_flags |= TRACE_ITER_LATENCY_FMT;
+
        tracing_max_latency = 0;
        wakeup_trace = tr;
        start_wakeup_tracer(tr);
        return 0;
 }
 
+static int wakeup_tracer_init(struct trace_array *tr)
+{
+       wakeup_rt = 0;
+       return __wakeup_tracer_init(tr);
+}
+
+static int wakeup_rt_tracer_init(struct trace_array *tr)
+{
+       wakeup_rt = 1;
+       return __wakeup_tracer_init(tr);
+}
+
 static void wakeup_tracer_reset(struct trace_array *tr)
 {
        stop_wakeup_tracer(tr);
        /* make sure we put back any tasks we are tracing */
        wakeup_reset(tr);
+
+       if (!save_lat_flag)
+               trace_flags &= ~TRACE_ITER_LATENCY_FMT;
 }
 
 static void wakeup_tracer_start(struct trace_array *tr)
 {
        wakeup_reset(tr);
        tracer_enabled = 1;
-       save_tracer_enabled = 1;
 }
 
 static void wakeup_tracer_stop(struct trace_array *tr)
 {
        tracer_enabled = 0;
-       save_tracer_enabled = 0;
-}
-
-static void wakeup_tracer_open(struct trace_iterator *iter)
-{
-       /* stop the trace while dumping */
-       tracer_enabled = 0;
-}
-
-static void wakeup_tracer_close(struct trace_iterator *iter)
-{
-       /* forget about any processes we were recording */
-       if (save_tracer_enabled) {
-               wakeup_reset(iter->tr);
-               tracer_enabled = 1;
-       }
 }
 
 static struct tracer wakeup_tracer __read_mostly =
@@ -381,8 +375,20 @@ static struct tracer wakeup_tracer __read_mostly =
        .reset          = wakeup_tracer_reset,
        .start          = wakeup_tracer_start,
        .stop           = wakeup_tracer_stop,
-       .open           = wakeup_tracer_open,
-       .close          = wakeup_tracer_close,
+       .print_max      = 1,
+#ifdef CONFIG_FTRACE_SELFTEST
+       .selftest    = trace_selftest_startup_wakeup,
+#endif
+};
+
+static struct tracer wakeup_rt_tracer __read_mostly =
+{
+       .name           = "wakeup_rt",
+       .init           = wakeup_rt_tracer_init,
+       .reset          = wakeup_tracer_reset,
+       .start          = wakeup_tracer_start,
+       .stop           = wakeup_tracer_stop,
+       .wait_pipe      = poll_wait_pipe,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
@@ -397,6 +403,10 @@ __init static int init_wakeup_tracer(void)
        if (ret)
                return ret;
 
+       ret = register_tracer(&wakeup_rt_tracer);
+       if (ret)
+               return ret;
+
        return 0;
 }
 device_initcall(init_wakeup_tracer);
index bc8e80a..f907a2b 100644 (file)
@@ -1,5 +1,6 @@
 /* Include in trace.c */
 
+#include <linux/stringify.h>
 #include <linux/kthread.h>
 #include <linux/delay.h>
 
@@ -9,11 +10,12 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        case TRACE_FN:
        case TRACE_CTX:
        case TRACE_WAKE:
-       case TRACE_CONT:
        case TRACE_STACK:
        case TRACE_PRINT:
        case TRACE_SPECIAL:
        case TRACE_BRANCH:
+       case TRACE_GRAPH_ENT:
+       case TRACE_GRAPH_RET:
                return 1;
        }
        return 0;
@@ -99,9 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
 /* Test dynamic code modification and ftrace filters */
 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
                                           struct trace_array *tr,
@@ -125,17 +124,17 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        func();
 
        /*
-        * Some archs *cough*PowerPC*cough* add charachters to the
+        * Some archs *cough*PowerPC*cough* add characters to the
         * start of the function names. We simply put a '*' to
-        * accomodate them.
+        * accommodate them.
         */
-       func_name = "*" STR(DYN_FTRACE_TEST_NAME);
+       func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 
        /* filter only on our function */
        ftrace_set_filter(func_name, strlen(func_name), 1);
 
        /* enable tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                goto out;
@@ -209,7 +208,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
        ftrace_enabled = 1;
        tracer_enabled = 1;
 
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                goto out;
@@ -247,6 +246,54 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 }
 #endif /* CONFIG_FUNCTION_TRACER */
 
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Pretty much the same than for the function tracer from which the selftest
+ * has been borrowed.
+ */
+int
+trace_selftest_startup_function_graph(struct tracer *trace,
+                                       struct trace_array *tr)
+{
+       int ret;
+       unsigned long count;
+
+       ret = tracer_init(trace, tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               goto out;
+       }
+
+       /* Sleep for a 1/10 of a second */
+       msleep(100);
+
+       tracing_stop();
+
+       /* check the trace buffer */
+       ret = trace_test_buffer(tr, &count);
+
+       trace->reset(tr);
+       tracing_start();
+
+       if (!ret && !count) {
+               printk(KERN_CONT ".. no entries found ..");
+               ret = -1;
+               goto out;
+       }
+
+       /* Don't test dynamic tracing, the function tracer already did */
+
+out:
+       /* Stop it if we failed */
+       if (ret)
+               ftrace_graph_stop();
+
+       return ret;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
 #ifdef CONFIG_IRQSOFF_TRACER
 int
 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
@@ -256,7 +303,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        int ret;
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                return ret;
@@ -310,7 +357,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        }
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                return ret;
@@ -364,7 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        }
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                goto out;
@@ -496,7 +543,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
        wait_for_completion(&isrt);
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                return ret;
@@ -557,7 +604,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
        int ret;
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                return ret;
@@ -589,10 +636,10 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
        int ret;
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
-               return 0;
+               return ret;
        }
 
        /* Sleep for a 1/10 of a second */
@@ -604,6 +651,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
        trace->reset(tr);
        tracing_start();
 
+       if (!ret && !count) {
+               printk(KERN_CONT ".. no entries found ..");
+               ret = -1;
+       }
+
        return ret;
 }
 #endif /* CONFIG_SYSPROF_TRACER */
@@ -616,7 +668,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        int ret;
 
        /* start the tracing */
-       ret = trace->init(tr);
+       ret = tracer_init(trace, tr);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                return ret;
@@ -631,6 +683,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        trace->reset(tr);
        tracing_start();
 
+       if (!ret && !count) {
+               printk(KERN_CONT ".. no entries found ..");
+               ret = -1;
+       }
+
        return ret;
 }
 #endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
new file mode 100644 (file)
index 0000000..39310e3
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Infrastructure for statistic tracing (histogram output).
+ *
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Based on the code from trace_branch.c which is
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+
+/* List of stat entries from a tracer */
+struct trace_stat_list {
+       struct list_head        list;
+       void                    *stat;
+};
+
+/* A stat session is the stats output in one file */
+struct tracer_stat_session {
+       struct list_head        session_list;
+       struct tracer_stat      *ts;
+       struct list_head        stat_list;
+       struct mutex            stat_mutex;
+       struct dentry           *file;
+};
+
+/* All of the sessions currently in use. Each stat file embed one session */
+static LIST_HEAD(all_stat_sessions);
+static DEFINE_MUTEX(all_stat_sessions_mutex);
+
+/* The root directory for all stat files */
+static struct dentry           *stat_dir;
+
+
+static void reset_stat_session(struct tracer_stat_session *session)
+{
+       struct trace_stat_list *node, *next;
+
+       list_for_each_entry_safe(node, next, &session->stat_list, list)
+               kfree(node);
+
+       INIT_LIST_HEAD(&session->stat_list);
+}
+
+static void destroy_session(struct tracer_stat_session *session)
+{
+       debugfs_remove(session->file);
+       reset_stat_session(session);
+       mutex_destroy(&session->stat_mutex);
+       kfree(session);
+}
+
+/*
+ * For tracers that don't provide a stat_cmp callback.
+ * This one will force an immediate insertion on tail of
+ * the list.
+ */
+static int dummy_cmp(void *p1, void *p2)
+{
+       return 1;
+}
+
+/*
+ * Initialize the stat list at each trace_stat file opening.
+ * All of these copies and sorting are required on all opening
+ * since the stats could have changed between two file sessions.
+ */
+static int stat_seq_init(struct tracer_stat_session *session)
+{
+       struct trace_stat_list *iter_entry, *new_entry;
+       struct tracer_stat *ts = session->ts;
+       void *prev_stat;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&session->stat_mutex);
+       reset_stat_session(session);
+
+       if (!ts->stat_cmp)
+               ts->stat_cmp = dummy_cmp;
+
+       /*
+        * The first entry. Actually this is the second, but the first
+        * one (the stat_list head) is pointless.
+        */
+       new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
+       if (!new_entry) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       INIT_LIST_HEAD(&new_entry->list);
+
+       list_add(&new_entry->list, &session->stat_list);
+
+       new_entry->stat = ts->stat_start();
+       prev_stat = new_entry->stat;
+
+       /*
+        * Iterate over the tracer stat entries and store them in a sorted
+        * list.
+        */
+       for (i = 1; ; i++) {
+               new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
+               if (!new_entry) {
+                       ret = -ENOMEM;
+                       goto exit_free_list;
+               }
+
+               INIT_LIST_HEAD(&new_entry->list);
+               new_entry->stat = ts->stat_next(prev_stat, i);
+
+               /* End of insertion */
+               if (!new_entry->stat)
+                       break;
+
+               list_for_each_entry(iter_entry, &session->stat_list, list) {
+
+                       /* Insertion with a descendent sorting */
+                       if (ts->stat_cmp(new_entry->stat,
+                                               iter_entry->stat) > 0) {
+
+                               list_add_tail(&new_entry->list,
+                                               &iter_entry->list);
+                               break;
+
+                       /* The current smaller value */
+                       } else if (list_is_last(&iter_entry->list,
+                                               &session->stat_list)) {
+                               list_add(&new_entry->list, &iter_entry->list);
+                               break;
+                       }
+               }
+
+               prev_stat = new_entry->stat;
+       }
+exit:
+       mutex_unlock(&session->stat_mutex);
+       return ret;
+
+exit_free_list:
+       reset_stat_session(session);
+       mutex_unlock(&session->stat_mutex);
+       return ret;
+}
+
+
+static void *stat_seq_start(struct seq_file *s, loff_t *pos)
+{
+       struct tracer_stat_session *session = s->private;
+
+       /* Prevent from tracer switch or stat_list modification */
+       mutex_lock(&session->stat_mutex);
+
+       /* If we are in the beginning of the file, print the headers */
+       if (!*pos && session->ts->stat_headers)
+               session->ts->stat_headers(s);
+
+       return seq_list_start(&session->stat_list, *pos);
+}
+
+static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
+{
+       struct tracer_stat_session *session = s->private;
+
+       return seq_list_next(p, &session->stat_list, pos);
+}
+
+static void stat_seq_stop(struct seq_file *s, void *p)
+{
+       struct tracer_stat_session *session = s->private;
+       mutex_unlock(&session->stat_mutex);
+}
+
+static int stat_seq_show(struct seq_file *s, void *v)
+{
+       struct tracer_stat_session *session = s->private;
+       struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
+
+       return session->ts->stat_show(s, l->stat);
+}
+
+static const struct seq_operations trace_stat_seq_ops = {
+       .start          = stat_seq_start,
+       .next           = stat_seq_next,
+       .stop           = stat_seq_stop,
+       .show           = stat_seq_show
+};
+
+/* The session stat is refilled and resorted at each stat file opening */
+static int tracing_stat_open(struct inode *inode, struct file *file)
+{
+       int ret;
+
+       struct tracer_stat_session *session = inode->i_private;
+
+       ret = seq_open(file, &trace_stat_seq_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+               m->private = session;
+               ret = stat_seq_init(session);
+       }
+
+       return ret;
+}
+
+/*
+ * Avoid consuming memory with our now useless list.
+ */
+static int tracing_stat_release(struct inode *i, struct file *f)
+{
+       struct tracer_stat_session *session = i->i_private;
+
+       mutex_lock(&session->stat_mutex);
+       reset_stat_session(session);
+       mutex_unlock(&session->stat_mutex);
+
+       return 0;
+}
+
+static const struct file_operations tracing_stat_fops = {
+       .open           = tracing_stat_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = tracing_stat_release
+};
+
+static int tracing_stat_init(void)
+{
+       struct dentry *d_tracing;
+
+       d_tracing = tracing_init_dentry();
+
+       stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+       if (!stat_dir)
+               pr_warning("Could not create debugfs "
+                          "'trace_stat' entry\n");
+       return 0;
+}
+
+static int init_stat_file(struct tracer_stat_session *session)
+{
+       if (!stat_dir && tracing_stat_init())
+               return -ENODEV;
+
+       session->file = debugfs_create_file(session->ts->name, 0644,
+                                           stat_dir,
+                                           session, &tracing_stat_fops);
+       if (!session->file)
+               return -ENOMEM;
+       return 0;
+}
+
+int register_stat_tracer(struct tracer_stat *trace)
+{
+       struct tracer_stat_session *session, *node, *tmp;
+       int ret;
+
+       if (!trace)
+               return -EINVAL;
+
+       if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
+               return -EINVAL;
+
+       /* Already registered? */
+       mutex_lock(&all_stat_sessions_mutex);
+       list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
+               if (node->ts == trace) {
+                       mutex_unlock(&all_stat_sessions_mutex);
+                       return -EINVAL;
+               }
+       }
+       mutex_unlock(&all_stat_sessions_mutex);
+
+       /* Init the session */
+       session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL);
+       if (!session)
+               return -ENOMEM;
+
+       session->ts = trace;
+       INIT_LIST_HEAD(&session->session_list);
+       INIT_LIST_HEAD(&session->stat_list);
+       mutex_init(&session->stat_mutex);
+       session->file = NULL;
+
+       ret = init_stat_file(session);
+       if (ret) {
+               destroy_session(session);
+               return ret;
+       }
+
+       /* Register */
+       mutex_lock(&all_stat_sessions_mutex);
+       list_add_tail(&session->session_list, &all_stat_sessions);
+       mutex_unlock(&all_stat_sessions_mutex);
+
+       return 0;
+}
+
+void unregister_stat_tracer(struct tracer_stat *trace)
+{
+       struct tracer_stat_session *node, *tmp;
+
+       mutex_lock(&all_stat_sessions_mutex);
+       list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
+               if (node->ts == trace) {
+                       list_del(&node->session_list);
+                       destroy_session(node);
+                       break;
+               }
+       }
+       mutex_unlock(&all_stat_sessions_mutex);
+}
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
new file mode 100644 (file)
index 0000000..202274c
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __TRACE_STAT_H
+#define __TRACE_STAT_H
+
+#include <linux/seq_file.h>
+
+/*
+ * If you want to provide a stat file (one-shot statistics), fill
+ * an iterator with stat_start/stat_next and a stat_show callbacks.
+ * The others callbacks are optional.
+ */
+struct tracer_stat {
+       /* The name of your stat file */
+       const char              *name;
+       /* Iteration over statistic entries */
+       void                    *(*stat_start)(void);
+       void                    *(*stat_next)(void *prev, int idx);
+       /* Compare two entries for stats sorting */
+       int                     (*stat_cmp)(void *p1, void *p2);
+       /* Print a stat entry */
+       int                     (*stat_show)(struct seq_file *s, void *p);
+       /* Print the headers of your stat entries */
+       int                     (*stat_headers)(struct seq_file *s);
+};
+
+/*
+ * Destroy or create a stat file
+ */
+extern int register_stat_tracer(struct tracer_stat *trace);
+extern void unregister_stat_tracer(struct tracer_stat *trace);
+
+#endif /* __TRACE_STAT_H */
index eaca5ad..91fd19c 100644 (file)
@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
        }
 }
 
-const static struct stacktrace_ops backtrace_ops = {
+static const struct stacktrace_ops backtrace_ops = {
        .warning                = backtrace_warning,
        .warning_symbol         = backtrace_warning_symbol,
        .stack                  = backtrace_stack,
@@ -226,15 +226,6 @@ static void stop_stack_timers(void)
                stop_stack_timer(cpu);
 }
 
-static void start_stack_trace(struct trace_array *tr)
-{
-       mutex_lock(&sample_timer_lock);
-       tracing_reset_online_cpus(tr);
-       start_stack_timers();
-       tracer_enabled = 1;
-       mutex_unlock(&sample_timer_lock);
-}
-
 static void stop_stack_trace(struct trace_array *tr)
 {
        mutex_lock(&sample_timer_lock);
@@ -247,12 +238,18 @@ static int stack_trace_init(struct trace_array *tr)
 {
        sysprof_trace = tr;
 
-       start_stack_trace(tr);
+       tracing_start_cmdline_record();
+
+       mutex_lock(&sample_timer_lock);
+       start_stack_timers();
+       tracer_enabled = 1;
+       mutex_unlock(&sample_timer_lock);
        return 0;
 }
 
 static void stack_trace_reset(struct trace_array *tr)
 {
+       tracing_stop_cmdline_record();
        stop_stack_trace(tr);
 }
 
@@ -317,7 +314,7 @@ sysprof_sample_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static struct file_operations sysprof_sample_fops = {
+static const struct file_operations sysprof_sample_fops = {
        .read           = sysprof_sample_read,
        .write          = sysprof_sample_write,
 };
@@ -330,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
                        d_tracer, NULL, &sysprof_sample_fops);
        if (entry)
                return;
-       pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
+       pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n");
 }
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
new file mode 100644 (file)
index 0000000..46c8dc8
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Workqueue statistical tracer.
+ *
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ */
+
+
+#include <trace/workqueue.h>
+#include <linux/list.h>
+#include <linux/percpu.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+
+/* A cpu workqueue thread */
+struct cpu_workqueue_stats {
+       struct list_head            list;
+/* Useful to know if we print the cpu headers */
+       bool                        first_entry;
+       int                         cpu;
+       pid_t                       pid;
+/* Can be inserted from interrupt or user context, need to be atomic */
+       atomic_t                    inserted;
+/*
+ *  Don't need to be atomic, works are serialized in a single workqueue thread
+ *  on a single CPU.
+ */
+       unsigned int                executed;
+};
+
+/* List of workqueue threads on one cpu */
+struct workqueue_global_stats {
+       struct list_head        list;
+       spinlock_t              lock;
+};
+
+/* Don't need a global lock because allocated before the workqueues, and
+ * never freed.
+ */
+static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
+#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
+
+/* Insertion of a work */
+static void
+probe_workqueue_insertion(struct task_struct *wq_thread,
+                         struct work_struct *work)
+{
+       int cpu = cpumask_first(&wq_thread->cpus_allowed);
+       struct cpu_workqueue_stats *node, *next;
+       unsigned long flags;
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+       list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
+                                                       list) {
+               if (node->pid == wq_thread->pid) {
+                       atomic_inc(&node->inserted);
+                       goto found;
+               }
+       }
+       pr_debug("trace_workqueue: entry not found\n");
+found:
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+}
+
+/* Execution of a work */
+static void
+probe_workqueue_execution(struct task_struct *wq_thread,
+                         struct work_struct *work)
+{
+       int cpu = cpumask_first(&wq_thread->cpus_allowed);
+       struct cpu_workqueue_stats *node, *next;
+       unsigned long flags;
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+       list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
+                                                       list) {
+               if (node->pid == wq_thread->pid) {
+                       node->executed++;
+                       goto found;
+               }
+       }
+       pr_debug("trace_workqueue: entry not found\n");
+found:
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+}
+
+/* Creation of a cpu workqueue thread */
+static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
+{
+       struct cpu_workqueue_stats *cws;
+       unsigned long flags;
+
+       WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
+
+       /* Workqueues are sometimes created in atomic context */
+       cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
+       if (!cws) {
+               pr_warning("trace_workqueue: not enough memory\n");
+               return;
+       }
+       INIT_LIST_HEAD(&cws->list);
+       cws->cpu = cpu;
+
+       cws->pid = wq_thread->pid;
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+       if (list_empty(&workqueue_cpu_stat(cpu)->list))
+               cws->first_entry = true;
+       list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+}
+
+/* Destruction of a cpu workqueue thread */
+static void probe_workqueue_destruction(struct task_struct *wq_thread)
+{
+       /* Workqueue only execute on one cpu */
+       int cpu = cpumask_first(&wq_thread->cpus_allowed);
+       struct cpu_workqueue_stats *node, *next;
+       unsigned long flags;
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+       list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
+                                                       list) {
+               if (node->pid == wq_thread->pid) {
+                       list_del(&node->list);
+                       kfree(node);
+                       goto found;
+               }
+       }
+
+       pr_debug("trace_workqueue: don't find workqueue to destroy\n");
+found:
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+}
+
+static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
+{
+       unsigned long flags;
+       struct cpu_workqueue_stats *ret = NULL;
+
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+
+       if (!list_empty(&workqueue_cpu_stat(cpu)->list))
+               ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
+                                struct cpu_workqueue_stats, list);
+
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+       return ret;
+}
+
+static void *workqueue_stat_start(void)
+{
+       int cpu;
+       void *ret = NULL;
+
+       for_each_possible_cpu(cpu) {
+               ret = workqueue_stat_start_cpu(cpu);
+               if (ret)
+                       return ret;
+       }
+       return NULL;
+}
+
+static void *workqueue_stat_next(void *prev, int idx)
+{
+       struct cpu_workqueue_stats *prev_cws = prev;
+       int cpu = prev_cws->cpu;
+       unsigned long flags;
+       void *ret = NULL;
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+       if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
+               spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+               for (++cpu ; cpu < num_possible_cpus(); cpu++) {
+                       ret = workqueue_stat_start_cpu(cpu);
+                       if (ret)
+                               return ret;
+               }
+               return NULL;
+       }
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+       return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
+                         list);
+}
+
+static int workqueue_stat_show(struct seq_file *s, void *p)
+{
+       struct cpu_workqueue_stats *cws = p;
+       unsigned long flags;
+       int cpu = cws->cpu;
+       struct task_struct *tsk = find_task_by_vpid(cws->pid);
+
+       seq_printf(s, "%3d %6d     %6u       %s\n", cws->cpu,
+                  atomic_read(&cws->inserted),
+                  cws->executed,
+                  tsk ? tsk->comm : "<...>");
+
+       spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+       if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
+               seq_printf(s, "\n");
+       spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+       return 0;
+}
+
+static int workqueue_stat_headers(struct seq_file *s)
+{
+       seq_printf(s, "# CPU  INSERTED  EXECUTED   NAME\n");
+       seq_printf(s, "# |      |         |          |\n\n");
+       return 0;
+}
+
+struct tracer_stat workqueue_stats __read_mostly = {
+       .name = "workqueues",
+       .stat_start = workqueue_stat_start,
+       .stat_next = workqueue_stat_next,
+       .stat_show = workqueue_stat_show,
+       .stat_headers = workqueue_stat_headers
+};
+
+
+int __init stat_workqueue_init(void)
+{
+       if (register_stat_tracer(&workqueue_stats)) {
+               pr_warning("Unable to register workqueue stat tracer\n");
+               return 1;
+       }
+
+       return 0;
+}
+fs_initcall(stat_workqueue_init);
+
+/*
+ * Workqueues are created very early, just after pre-smp initcalls.
+ * So we must register our tracepoints at this stage.
+ */
+int __init trace_workqueue_early_init(void)
+{
+       int ret, cpu;
+
+       ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
+       if (ret)
+               goto out;
+
+       ret = register_trace_workqueue_execution(probe_workqueue_execution);
+       if (ret)
+               goto no_insertion;
+
+       ret = register_trace_workqueue_creation(probe_workqueue_creation);
+       if (ret)
+               goto no_execution;
+
+       ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
+       if (ret)
+               goto no_creation;
+
+       for_each_possible_cpu(cpu) {
+               spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
+               INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
+       }
+
+       return 0;
+
+no_creation:
+       unregister_trace_workqueue_creation(probe_workqueue_creation);
+no_execution:
+       unregister_trace_workqueue_execution(probe_workqueue_execution);
+no_insertion:
+       unregister_trace_workqueue_insertion(probe_workqueue_insertion);
+out:
+       pr_warning("trace_workqueue: unable to trace workqueues\n");
+
+       return 1;
+}
+early_initcall(trace_workqueue_early_init);
index 1f0c509..e53ee18 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
+#include <trace/workqueue.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -125,9 +126,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
        return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
 }
 
+DEFINE_TRACE(workqueue_insertion);
+
 static void insert_work(struct cpu_workqueue_struct *cwq,
                        struct work_struct *work, struct list_head *head)
 {
+       trace_workqueue_insertion(cwq->thread, work);
+
        set_wq_data(work, cwq);
        /*
         * Ensure that we get the right work->data if we see the
@@ -259,6 +264,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 }
 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
+DEFINE_TRACE(workqueue_execution);
+
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
        spin_lock_irq(&cwq->lock);
@@ -284,7 +291,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                 */
                struct lockdep_map lockdep_map = work->lockdep_map;
 #endif
-
+               trace_workqueue_execution(cwq->thread, work);
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
                spin_unlock_irq(&cwq->lock);
@@ -765,6 +772,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
        return cwq;
 }
 
+DEFINE_TRACE(workqueue_creation);
+
 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -787,6 +796,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
                sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
        cwq->thread = p;
 
+       trace_workqueue_creation(cwq->thread, cpu);
+
        return 0;
 }
 
@@ -868,6 +879,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 }
 EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
+DEFINE_TRACE(workqueue_destruction);
+
 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
        /*
@@ -891,6 +904,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
         * checks list_empty(), and a "normal" queue_work() can't use
         * a dead CPU.
         */
+       trace_workqueue_destruction(cwq->thread);
        kthread_stop(cwq->thread);
        cwq->thread = NULL;
 }
index daa4818..206f36a 100644 (file)
@@ -2,6 +2,9 @@
 # Library configuration
 #
 
+config BINARY_PRINTF
+       def_bool n
+
 menu "Library routines"
 
 config BITREVERSE
index 1bcf9cd..a0879b2 100644 (file)
@@ -402,7 +402,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !X86 && !MIPS && !PPC
+       select FRAME_POINTER if !MIPS && !PPC
        select KALLSYMS
        select KALLSYMS_ALL
 
index 0fbd012..25f0157 100644 (file)
@@ -396,7 +396,38 @@ static noinline char* put_dec(char *buf, unsigned long long num)
 #define SMALL  32              /* Must be 32 == 0x20 */
 #define SPECIAL        64              /* 0x */
 
-static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type)
+enum format_type {
+       FORMAT_TYPE_NONE, /* Just a string part */
+       FORMAT_TYPE_WITDH,
+       FORMAT_TYPE_PRECISION,
+       FORMAT_TYPE_CHAR,
+       FORMAT_TYPE_STR,
+       FORMAT_TYPE_PTR,
+       FORMAT_TYPE_PERCENT_CHAR,
+       FORMAT_TYPE_INVALID,
+       FORMAT_TYPE_LONG_LONG,
+       FORMAT_TYPE_ULONG,
+       FORMAT_TYPE_LONG,
+       FORMAT_TYPE_USHORT,
+       FORMAT_TYPE_SHORT,
+       FORMAT_TYPE_UINT,
+       FORMAT_TYPE_INT,
+       FORMAT_TYPE_NRCHARS,
+       FORMAT_TYPE_SIZE_T,
+       FORMAT_TYPE_PTRDIFF
+};
+
+struct printf_spec {
+       enum format_type        type;
+       int                     flags;          /* flags to number() */
+       int                     field_width;    /* width of output field */
+       int                     base;
+       int                     precision;      /* # of digits/chars */
+       int                     qualifier;
+};
+
+static char *number(char *buf, char *end, unsigned long long num,
+                       struct printf_spec spec)
 {
        /* we are called with base 8, 10 or 16, only, thus don't need "G..."  */
        static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -404,32 +435,32 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
        char tmp[66];
        char sign;
        char locase;
-       int need_pfx = ((type & SPECIAL) && base != 10);
+       int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
        int i;
 
        /* locase = 0 or 0x20. ORing digits or letters with 'locase'
         * produces same digits or (maybe lowercased) letters */
-       locase = (type & SMALL);
-       if (type & LEFT)
-               type &= ~ZEROPAD;
+       locase = (spec.flags & SMALL);
+       if (spec.flags & LEFT)
+               spec.flags &= ~ZEROPAD;
        sign = 0;
-       if (type & SIGN) {
+       if (spec.flags & SIGN) {
                if ((signed long long) num < 0) {
                        sign = '-';
                        num = - (signed long long) num;
-                       size--;
-               } else if (type & PLUS) {
+                       spec.field_width--;
+               } else if (spec.flags & PLUS) {
                        sign = '+';
-                       size--;
-               } else if (type & SPACE) {
+                       spec.field_width--;
+               } else if (spec.flags & SPACE) {
                        sign = ' ';
-                       size--;
+                       spec.field_width--;
                }
        }
        if (need_pfx) {
-               size--;
-               if (base == 16)
-                       size--;
+               spec.field_width--;
+               if (spec.base == 16)
+                       spec.field_width--;
        }
 
        /* generate full string in tmp[], in reverse order */
@@ -441,10 +472,10 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
                tmp[i++] = (digits[do_div(num,base)] | locase);
        } while (num != 0);
        */
-       else if (base != 10) { /* 8 or 16 */
-               int mask = base - 1;
+       else if (spec.base != 10) { /* 8 or 16 */
+               int mask = spec.base - 1;
                int shift = 3;
-               if (base == 16) shift = 4;
+               if (spec.base == 16) shift = 4;
                do {
                        tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
                        num >>= shift;
@@ -454,12 +485,12 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
        }
 
        /* printing 100 using %2d gives "100", not "00" */
-       if (i > precision)
-               precision = i;
+       if (i > spec.precision)
+               spec.precision = i;
        /* leading space padding */
-       size -= precision;
-       if (!(type & (ZEROPAD+LEFT))) {
-               while(--size >= 0) {
+       spec.field_width -= spec.precision;
+       if (!(spec.flags & (ZEROPAD+LEFT))) {
+               while(--spec.field_width >= 0) {
                        if (buf < end)
                                *buf = ' ';
                        ++buf;
@@ -476,23 +507,23 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
                if (buf < end)
                        *buf = '0';
                ++buf;
-               if (base == 16) {
+               if (spec.base == 16) {
                        if (buf < end)
                                *buf = ('X' | locase);
                        ++buf;
                }
        }
        /* zero or space padding */
-       if (!(type & LEFT)) {
-               char c = (type & ZEROPAD) ? '0' : ' ';
-               while (--size >= 0) {
+       if (!(spec.flags & LEFT)) {
+               char c = (spec.flags & ZEROPAD) ? '0' : ' ';
+               while (--spec.field_width >= 0) {
                        if (buf < end)
                                *buf = c;
                        ++buf;
                }
        }
        /* hmm even more zero padding? */
-       while (i <= --precision) {
+       while (i <= --spec.precision) {
                if (buf < end)
                        *buf = '0';
                ++buf;
@@ -504,7 +535,7 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
                ++buf;
        }
        /* trailing space padding */
-       while (--size >= 0) {
+       while (--spec.field_width >= 0) {
                if (buf < end)
                        *buf = ' ';
                ++buf;
@@ -512,17 +543,17 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
        return buf;
 }
 
-static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
+static char *string(char *buf, char *end, char *s, struct printf_spec spec)
 {
        int len, i;
 
        if ((unsigned long)s < PAGE_SIZE)
                s = "<NULL>";
 
-       len = strnlen(s, precision);
+       len = strnlen(s, spec.precision);
 
-       if (!(flags & LEFT)) {
-               while (len < field_width--) {
+       if (!(spec.flags & LEFT)) {
+               while (len < spec.field_width--) {
                        if (buf < end)
                                *buf = ' ';
                        ++buf;
@@ -533,7 +564,7 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
                        *buf = *s;
                ++buf; ++s;
        }
-       while (len < field_width--) {
+       while (len < spec.field_width--) {
                if (buf < end)
                        *buf = ' ';
                ++buf;
@@ -541,21 +572,24 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
        return buf;
 }
 
-static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
+static char *symbol_string(char *buf, char *end, void *ptr,
+                               struct printf_spec spec)
 {
        unsigned long value = (unsigned long) ptr;
 #ifdef CONFIG_KALLSYMS
        char sym[KSYM_SYMBOL_LEN];
        sprint_symbol(sym, value);
-       return string(buf, end, sym, field_width, precision, flags);
+       return string(buf, end, sym, spec);
 #else
-       field_width = 2*sizeof(void *);
-       flags |= SPECIAL | SMALL | ZEROPAD;
-       return number(buf, end, value, 16, field_width, precision, flags);
+       spec.field_width = 2*sizeof(void *);
+       spec.flags |= SPECIAL | SMALL | ZEROPAD;
+       spec.base = 16;
+       return number(buf, end, value, spec);
 #endif
 }
 
-static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags)
+static char *resource_string(char *buf, char *end, struct resource *res,
+                               struct printf_spec spec)
 {
 #ifndef IO_RSRC_PRINTK_SIZE
 #define IO_RSRC_PRINTK_SIZE    4
@@ -564,7 +598,11 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
 #ifndef MEM_RSRC_PRINTK_SIZE
 #define MEM_RSRC_PRINTK_SIZE   8
 #endif
-
+       struct printf_spec num_spec = {
+               .base = 16,
+               .precision = -1,
+               .flags = SPECIAL | SMALL | ZEROPAD,
+       };
        /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
        char sym[4*sizeof(resource_size_t) + 8];
        char *p = sym, *pend = sym + sizeof(sym);
@@ -576,17 +614,18 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
                size = MEM_RSRC_PRINTK_SIZE;
 
        *p++ = '[';
-       p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
+       num_spec.field_width = size;
+       p = number(p, pend, res->start, num_spec);
        *p++ = '-';
-       p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
+       p = number(p, pend, res->end, num_spec);
        *p++ = ']';
        *p = 0;
 
-       return string(buf, end, sym, field_width, precision, flags);
+       return string(buf, end, sym, spec);
 }
 
-static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width,
-                               int precision, int flags)
+static char *mac_address_string(char *buf, char *end, u8 *addr,
+                               struct printf_spec spec)
 {
        char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */
        char *p = mac_addr;
@@ -594,16 +633,17 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width,
 
        for (i = 0; i < 6; i++) {
                p = pack_hex_byte(p, addr[i]);
-               if (!(flags & SPECIAL) && i != 5)
+               if (!(spec.flags & SPECIAL) && i != 5)
                        *p++ = ':';
        }
        *p = '\0';
+       spec.flags &= ~SPECIAL;
 
-       return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL);
+       return string(buf, end, mac_addr, spec);
 }
 
-static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width,
-                        int precision, int flags)
+static char *ip6_addr_string(char *buf, char *end, u8 *addr,
+                               struct printf_spec spec)
 {
        char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
        char *p = ip6_addr;
@@ -612,16 +652,17 @@ static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width,
        for (i = 0; i < 8; i++) {
                p = pack_hex_byte(p, addr[2 * i]);
                p = pack_hex_byte(p, addr[2 * i + 1]);
-               if (!(flags & SPECIAL) && i != 7)
+               if (!(spec.flags & SPECIAL) && i != 7)
                        *p++ = ':';
        }
        *p = '\0';
+       spec.flags &= ~SPECIAL;
 
-       return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL);
+       return string(buf, end, ip6_addr, spec);
 }
 
-static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
-                        int precision, int flags)
+static char *ip4_addr_string(char *buf, char *end, u8 *addr,
+                               struct printf_spec spec)
 {
        char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */
        char temp[3];   /* hold each IP quad in reverse order */
@@ -637,8 +678,9 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
                        *p++ = '.';
        }
        *p = '\0';
+       spec.flags &= ~SPECIAL;
 
-       return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL);
+       return string(buf, end, ip4_addr, spec);
 }
 
 /*
@@ -663,41 +705,234 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
  * function pointers are really function descriptors, which contain a
  * pointer to the real address.
  */
-static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
+static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+                       struct printf_spec spec)
 {
        if (!ptr)
-               return string(buf, end, "(null)", field_width, precision, flags);
+               return string(buf, end, "(null)", spec);
 
        switch (*fmt) {
        case 'F':
                ptr = dereference_function_descriptor(ptr);
                /* Fallthrough */
        case 'S':
-               return symbol_string(buf, end, ptr, field_width, precision, flags);
+               return symbol_string(buf, end, ptr, spec);
        case 'R':
-               return resource_string(buf, end, ptr, field_width, precision, flags);
+               return resource_string(buf, end, ptr, spec);
        case 'm':
-               flags |= SPECIAL;
+               spec.flags |= SPECIAL;
                /* Fallthrough */
        case 'M':
-               return mac_address_string(buf, end, ptr, field_width, precision, flags);
+               return mac_address_string(buf, end, ptr, spec);
        case 'i':
-               flags |= SPECIAL;
+               spec.flags |= SPECIAL;
                /* Fallthrough */
        case 'I':
                if (fmt[1] == '6')
-                       return ip6_addr_string(buf, end, ptr, field_width, precision, flags);
+                       return ip6_addr_string(buf, end, ptr, spec);
                if (fmt[1] == '4')
-                       return ip4_addr_string(buf, end, ptr, field_width, precision, flags);
-               flags &= ~SPECIAL;
+                       return ip4_addr_string(buf, end, ptr, spec);
+               spec.flags &= ~SPECIAL;
+               break;
+       }
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2*sizeof(void *);
+               spec.flags |= ZEROPAD;
+       }
+       spec.base = 16;
+
+       return number(buf, end, (unsigned long) ptr, spec);
+}
+
+/*
+ * Helper function to decode printf style format.
+ * Each call decode a token from the format and return the
+ * number of characters read (or likely the delta where it wants
+ * to go on the next call).
+ * The decoded token is returned through the parameters
+ *
+ * 'h', 'l', or 'L' for integer fields
+ * 'z' support added 23/7/1999 S.H.
+ * 'z' changed to 'Z' --davidm 1/25/99
+ * 't' added for ptrdiff_t
+ *
+ * @fmt: the format string
+ * @type of the token returned
+ * @flags: various flags such as +, -, # tokens..
+ * @field_width: overwritten width
+ * @base: base of the number (octal, hex, ...)
+ * @precision: precision of a number
+ * @qualifier: qualifier of a number (long, size_t, ...)
+ */
+static int format_decode(const char *fmt, struct printf_spec *spec)
+{
+       const char *start = fmt;
+       bool sign = false;
+
+       /* we finished early by reading the field width */
+       if (spec->type == FORMAT_TYPE_WITDH) {
+               if (spec->field_width < 0) {
+                       spec->field_width = -spec->field_width;
+                       spec->flags |= LEFT;
+               }
+               spec->type = FORMAT_TYPE_NONE;
+               goto precision;
+       }
+
+       /* we finished early by reading the precision */
+       if (spec->type == FORMAT_TYPE_PRECISION) {
+               if (spec->precision < 0)
+                       spec->precision = 0;
+
+               spec->type = FORMAT_TYPE_NONE;
+               goto qualifier;
+       }
+
+       /* By default */
+       spec->type = FORMAT_TYPE_NONE;
+
+       for (; *fmt ; ++fmt) {
+               if (*fmt == '%')
+                       break;
+       }
+
+       /* Return the current non-format string */
+       if (fmt != start || !*fmt)
+               return fmt - start;
+
+       /* Process flags */
+       spec->flags = 0;
+
+       while (1) { /* this also skips first '%' */
+               bool found = true;
+
+               ++fmt;
+
+               switch (*fmt) {
+               case '-': spec->flags |= LEFT;    break;
+               case '+': spec->flags |= PLUS;    break;
+               case ' ': spec->flags |= SPACE;   break;
+               case '#': spec->flags |= SPECIAL; break;
+               case '0': spec->flags |= ZEROPAD; break;
+               default:  found = false;
+               }
+
+               if (!found)
+                       break;
+       }
+
+       /* get field width */
+       spec->field_width = -1;
+
+       if (isdigit(*fmt))
+               spec->field_width = skip_atoi(&fmt);
+       else if (*fmt == '*') {
+               /* it's the next argument */
+               spec->type = FORMAT_TYPE_WITDH;
+               return ++fmt - start;
+       }
+
+precision:
+       /* get the precision */
+       spec->precision = -1;
+       if (*fmt == '.') {
+               ++fmt;
+               if (isdigit(*fmt)) {
+                       spec->precision = skip_atoi(&fmt);
+                       if (spec->precision < 0)
+                               spec->precision = 0;
+               } else if (*fmt == '*') {
+                       /* it's the next argument */
+                       spec->type = FORMAT_TYPE_WITDH;
+                       return ++fmt - start;
+               }
+       }
+
+qualifier:
+       /* get the conversion qualifier */
+       spec->qualifier = -1;
+       if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+           *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
+               spec->qualifier = *fmt;
+               ++fmt;
+               if (spec->qualifier == 'l' && *fmt == 'l') {
+                       spec->qualifier = 'L';
+                       ++fmt;
+               }
+       }
+
+       /* default base */
+       spec->base = 10;
+       switch (*fmt) {
+       case 'c':
+               spec->type = FORMAT_TYPE_CHAR;
+               return ++fmt - start;
+
+       case 's':
+               spec->type = FORMAT_TYPE_STR;
+               return ++fmt - start;
+
+       case 'p':
+               spec->type = FORMAT_TYPE_PTR;
+               return fmt - start;
+               /* skip alnum */
+
+       case 'n':
+               spec->type = FORMAT_TYPE_NRCHARS;
+               return ++fmt - start;
+
+       case '%':
+               spec->type = FORMAT_TYPE_PERCENT_CHAR;
+               return ++fmt - start;
+
+       /* integer number formats - set up the flags and "break" */
+       case 'o':
+               spec->base = 8;
+               break;
+
+       case 'x':
+               spec->flags |= SMALL;
+
+       case 'X':
+               spec->base = 16;
+               break;
+
+       case 'd':
+       case 'i':
+               sign = true;
+       case 'u':
                break;
+
+       default:
+               spec->type = FORMAT_TYPE_INVALID;
+               return fmt - start;
        }
-       flags |= SMALL;
-       if (field_width == -1) {
-               field_width = 2*sizeof(void *);
-               flags |= ZEROPAD;
+
+       if (spec->qualifier == 'L')
+               spec->type = FORMAT_TYPE_LONG_LONG;
+       else if (spec->qualifier == 'l') {
+               if (sign)
+                       spec->type = FORMAT_TYPE_LONG;
+               else
+                       spec->type = FORMAT_TYPE_ULONG;
+       } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
+               spec->type = FORMAT_TYPE_SIZE_T;
+       } else if (spec->qualifier == 't') {
+               spec->type = FORMAT_TYPE_PTRDIFF;
+       } else if (spec->qualifier == 'h') {
+               if (sign)
+                       spec->type = FORMAT_TYPE_SHORT;
+               else
+                       spec->type = FORMAT_TYPE_USHORT;
+       } else {
+               if (sign)
+                       spec->type = FORMAT_TYPE_INT;
+               else
+                       spec->type = FORMAT_TYPE_UINT;
        }
-       return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
+
+       return ++fmt - start;
 }
 
 /**
@@ -726,18 +961,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
 int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
 {
        unsigned long long num;
-       int base;
        char *str, *end, c;
-
-       int flags;              /* flags to number() */
-
-       int field_width;        /* width of output field */
-       int precision;          /* min. # of digits for integers; max
-                                  number of chars for from string */
-       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
-                               /* 'z' support added 23/7/1999 S.H.    */
-                               /* 'z' changed to 'Z' --davidm 1/25/99 */
-                               /* 't' added for ptrdiff_t */
+       int read;
+       struct printf_spec spec = {0};
 
        /* Reject out-of-range values early.  Large positive sizes are
           used for unknown buffer sizes. */
@@ -758,184 +984,144 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
                size = end - buf;
        }
 
-       for (; *fmt ; ++fmt) {
-               if (*fmt != '%') {
-                       if (str < end)
-                               *str = *fmt;
-                       ++str;
-                       continue;
-               }
+       while (*fmt) {
+               const char *old_fmt = fmt;
 
-               /* process flags */
-               flags = 0;
-               repeat:
-                       ++fmt;          /* this also skips first '%' */
-                       switch (*fmt) {
-                               case '-': flags |= LEFT; goto repeat;
-                               case '+': flags |= PLUS; goto repeat;
-                               case ' ': flags |= SPACE; goto repeat;
-                               case '#': flags |= SPECIAL; goto repeat;
-                               case '0': flags |= ZEROPAD; goto repeat;
-                       }
+               read = format_decode(fmt, &spec);
 
-               /* get field width */
-               field_width = -1;
-               if (isdigit(*fmt))
-                       field_width = skip_atoi(&fmt);
-               else if (*fmt == '*') {
-                       ++fmt;
-                       /* it's the next argument */
-                       field_width = va_arg(args, int);
-                       if (field_width < 0) {
-                               field_width = -field_width;
-                               flags |= LEFT;
-                       }
-               }
+               fmt += read;
 
-               /* get the precision */
-               precision = -1;
-               if (*fmt == '.') {
-                       ++fmt;  
-                       if (isdigit(*fmt))
-                               precision = skip_atoi(&fmt);
-                       else if (*fmt == '*') {
-                               ++fmt;
-                               /* it's the next argument */
-                               precision = va_arg(args, int);
+               switch (spec.type) {
+               case FORMAT_TYPE_NONE: {
+                       int copy = read;
+                       if (str < end) {
+                               if (copy > end - str)
+                                       copy = end - str;
+                               memcpy(str, old_fmt, copy);
                        }
-                       if (precision < 0)
-                               precision = 0;
+                       str += read;
+                       break;
                }
 
-               /* get the conversion qualifier */
-               qualifier = -1;
-               if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
-                   *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
-                       qualifier = *fmt;
-                       ++fmt;
-                       if (qualifier == 'l' && *fmt == 'l') {
-                               qualifier = 'L';
-                               ++fmt;
-                       }
-               }
+               case FORMAT_TYPE_WITDH:
+                       spec.field_width = va_arg(args, int);
+                       break;
 
-               /* default base */
-               base = 10;
+               case FORMAT_TYPE_PRECISION:
+                       spec.precision = va_arg(args, int);
+                       break;
 
-               switch (*fmt) {
-                       case 'c':
-                               if (!(flags & LEFT)) {
-                                       while (--field_width > 0) {
-                                               if (str < end)
-                                                       *str = ' ';
-                                               ++str;
-                                       }
-                               }
-                               c = (unsigned char) va_arg(args, int);
-                               if (str < end)
-                                       *str = c;
-                               ++str;
-                               while (--field_width > 0) {
+               case FORMAT_TYPE_CHAR:
+                       if (!(spec.flags & LEFT)) {
+                               while (--spec.field_width > 0) {
                                        if (str < end)
                                                *str = ' ';
                                        ++str;
-                               }
-                               continue;
-
-                       case 's':
-                               str = string(str, end, va_arg(args, char *), field_width, precision, flags);
-                               continue;
-
-                       case 'p':
-                               str = pointer(fmt+1, str, end,
-                                               va_arg(args, void *),
-                                               field_width, precision, flags);
-                               /* Skip all alphanumeric pointer suffixes */
-                               while (isalnum(fmt[1]))
-                                       fmt++;
-                               continue;
-
-                       case 'n':
-                               /* FIXME:
-                               * What does C99 say about the overflow case here? */
-                               if (qualifier == 'l') {
-                                       long * ip = va_arg(args, long *);
-                                       *ip = (str - buf);
-                               } else if (qualifier == 'Z' || qualifier == 'z') {
-                                       size_t * ip = va_arg(args, size_t *);
-                                       *ip = (str - buf);
-                               } else {
-                                       int * ip = va_arg(args, int *);
-                                       *ip = (str - buf);
-                               }
-                               continue;
 
-                       case '%':
+                               }
+                       }
+                       c = (unsigned char) va_arg(args, int);
+                       if (str < end)
+                               *str = c;
+                       ++str;
+                       while (--spec.field_width > 0) {
                                if (str < end)
-                                       *str = '%';
+                                       *str = ' ';
                                ++str;
-                               continue;
+                       }
+                       break;
 
-                               /* integer number formats - set up the flags and "break" */
-                       case 'o':
-                               base = 8;
-                               break;
+               case FORMAT_TYPE_STR:
+                       str = string(str, end, va_arg(args, char *), spec);
+                       break;
 
-                       case 'x':
-                               flags |= SMALL;
-                       case 'X':
-                               base = 16;
-                               break;
+               case FORMAT_TYPE_PTR:
+                       str = pointer(fmt+1, str, end, va_arg(args, void *),
+                                     spec);
+                       while (isalnum(*fmt))
+                               fmt++;
+                       break;
 
-                       case 'd':
-                       case 'i':
-                               flags |= SIGN;
-                       case 'u':
-                               break;
+               case FORMAT_TYPE_PERCENT_CHAR:
+                       if (str < end)
+                               *str = '%';
+                       ++str;
+                       break;
 
-                       default:
+               case FORMAT_TYPE_INVALID:
+                       if (str < end)
+                               *str = '%';
+                       ++str;
+                       if (*fmt) {
                                if (str < end)
-                                       *str = '%';
+                                       *str = *fmt;
                                ++str;
-                               if (*fmt) {
-                                       if (str < end)
-                                               *str = *fmt;
-                                       ++str;
-                               } else {
-                                       --fmt;
-                               }
-                               continue;
+                       } else {
+                               --fmt;
+                       }
+                       break;
+
+               case FORMAT_TYPE_NRCHARS: {
+                       int qualifier = spec.qualifier;
+
+                       if (qualifier == 'l') {
+                               long *ip = va_arg(args, long *);
+                               *ip = (str - buf);
+                       } else if (qualifier == 'Z' ||
+                                       qualifier == 'z') {
+                               size_t *ip = va_arg(args, size_t *);
+                               *ip = (str - buf);
+                       } else {
+                               int *ip = va_arg(args, int *);
+                               *ip = (str - buf);
+                       }
+                       break;
                }
-               if (qualifier == 'L')
-                       num = va_arg(args, long long);
-               else if (qualifier == 'l') {
-                       num = va_arg(args, unsigned long);
-                       if (flags & SIGN)
-                               num = (signed long) num;
-               } else if (qualifier == 'Z' || qualifier == 'z') {
-                       num = va_arg(args, size_t);
-               } else if (qualifier == 't') {
-                       num = va_arg(args, ptrdiff_t);
-               } else if (qualifier == 'h') {
-                       num = (unsigned short) va_arg(args, int);
-                       if (flags & SIGN)
-                               num = (signed short) num;
-               } else {
-                       num = va_arg(args, unsigned int);
-                       if (flags & SIGN)
-                               num = (signed int) num;
+
+               default:
+                       switch (spec.type) {
+                       case FORMAT_TYPE_LONG_LONG:
+                               num = va_arg(args, long long);
+                               break;
+                       case FORMAT_TYPE_ULONG:
+                               num = va_arg(args, unsigned long);
+                               break;
+                       case FORMAT_TYPE_LONG:
+                               num = va_arg(args, long);
+                               break;
+                       case FORMAT_TYPE_SIZE_T:
+                               num = va_arg(args, size_t);
+                               break;
+                       case FORMAT_TYPE_PTRDIFF:
+                               num = va_arg(args, ptrdiff_t);
+                               break;
+                       case FORMAT_TYPE_USHORT:
+                               num = (unsigned short) va_arg(args, int);
+                               break;
+                       case FORMAT_TYPE_SHORT:
+                               num = (short) va_arg(args, int);
+                               break;
+                       case FORMAT_TYPE_UINT:
+                               num = va_arg(args, unsigned int);
+                               break;
+                       default:
+                               num = va_arg(args, unsigned int);
+                       }
+
+                       str = number(str, end, num, spec);
                }
-               str = number(str, end, num, base,
-                               field_width, precision, flags);
        }
+
        if (size > 0) {
                if (str < end)
                        *str = '\0';
                else
                        end[-1] = '\0';
        }
+
        /* the trailing null byte doesn't count towards the total */
        return str-buf;
+
 }
 EXPORT_SYMBOL(vsnprintf);
 
@@ -1058,6 +1244,372 @@ int sprintf(char * buf, const char *fmt, ...)
 }
 EXPORT_SYMBOL(sprintf);
 
+#ifdef CONFIG_BINARY_PRINTF
+/*
+ * bprintf service:
+ * vbin_printf() - VA arguments to binary data
+ * bstr_printf() - Binary data to text string
+ */
+
+/**
+ * vbin_printf - Parse a format string and place args' binary value in a buffer
+ * @bin_buf: The buffer to place args' binary value
+ * @size: The size of the buffer(by words(32bits), not characters)
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The format follows C99 vsnprintf, except %n is ignored, and its argument
+ * is skiped.
+ *
+ * The return value is the number of words(32bits) which would be generated for
+ * the given input.
+ *
+ * NOTE:
+ * If the return value is greater than @size, the resulting bin_buf is NOT
+ * valid for bstr_printf().
+ */
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
+{
+       struct printf_spec spec = {0};
+       char *str, *end;
+       int read;
+
+       str = (char *)bin_buf;
+       end = (char *)(bin_buf + size);
+
+#define save_arg(type)                                                 \
+do {                                                                   \
+       if (sizeof(type) == 8) {                                        \
+               unsigned long long value;                               \
+               str = PTR_ALIGN(str, sizeof(u32));                      \
+               value = va_arg(args, unsigned long long);               \
+               if (str + sizeof(type) <= end) {                        \
+                       *(u32 *)str = *(u32 *)&value;                   \
+                       *(u32 *)(str + 4) = *((u32 *)&value + 1);       \
+               }                                                       \
+       } else {                                                        \
+               unsigned long value;                                    \
+               str = PTR_ALIGN(str, sizeof(type));                     \
+               value = va_arg(args, int);                              \
+               if (str + sizeof(type) <= end)                          \
+                       *(typeof(type) *)str = (type)value;             \
+       }                                                               \
+       str += sizeof(type);                                            \
+} while (0)
+
+
+       while (*fmt) {
+               read = format_decode(fmt, &spec);
+
+               fmt += read;
+
+               switch (spec.type) {
+               case FORMAT_TYPE_NONE:
+                       break;
+
+               case FORMAT_TYPE_WITDH:
+               case FORMAT_TYPE_PRECISION:
+                       save_arg(int);
+                       break;
+
+               case FORMAT_TYPE_CHAR:
+                       save_arg(char);
+                       break;
+
+               case FORMAT_TYPE_STR: {
+                       const char *save_str = va_arg(args, char *);
+                       size_t len;
+                       if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
+                                       || (unsigned long)save_str < PAGE_SIZE)
+                               save_str = "<NULL>";
+                       len = strlen(save_str);
+                       if (str + len + 1 < end)
+                               memcpy(str, save_str, len + 1);
+                       str += len + 1;
+                       break;
+               }
+
+               case FORMAT_TYPE_PTR:
+                       save_arg(void *);
+                       /* skip all alphanumeric pointer suffixes */
+                       while (isalnum(*fmt))
+                               fmt++;
+                       break;
+
+               case FORMAT_TYPE_PERCENT_CHAR:
+                       break;
+
+               case FORMAT_TYPE_INVALID:
+                       if (!*fmt)
+                               --fmt;
+                       break;
+
+               case FORMAT_TYPE_NRCHARS: {
+                       /* skip %n 's argument */
+                       int qualifier = spec.qualifier;
+                       void *skip_arg;
+                       if (qualifier == 'l')
+                               skip_arg = va_arg(args, long *);
+                       else if (qualifier == 'Z' || qualifier == 'z')
+                               skip_arg = va_arg(args, size_t *);
+                       else
+                               skip_arg = va_arg(args, int *);
+                       break;
+               }
+
+               default:
+                       switch (spec.type) {
+
+                       case FORMAT_TYPE_LONG_LONG:
+                               save_arg(long long);
+                               break;
+                       case FORMAT_TYPE_ULONG:
+                       case FORMAT_TYPE_LONG:
+                               save_arg(unsigned long);
+                               break;
+                       case FORMAT_TYPE_SIZE_T:
+                               save_arg(size_t);
+                               break;
+                       case FORMAT_TYPE_PTRDIFF:
+                               save_arg(ptrdiff_t);
+                               break;
+                       case FORMAT_TYPE_USHORT:
+                       case FORMAT_TYPE_SHORT:
+                               save_arg(short);
+                               break;
+                       default:
+                               save_arg(int);
+                       }
+               }
+       }
+       return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
+
+#undef save_arg
+}
+EXPORT_SYMBOL_GPL(vbin_printf);
+
+/**
+ * bstr_printf - Format a string from binary arguments and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @bin_buf: Binary arguments for the format string
+ *
+ * This function like C99 vsnprintf, but the difference is that vsnprintf gets
+ * arguments from stack, and bstr_printf gets arguments from @bin_buf which is
+ * a binary buffer that generated by vbin_printf.
+ *
+ * The format follows C99 vsnprintf, but has some extensions:
+ * %pS output the name of a text symbol
+ * %pF output the name of a function pointer
+ * %pR output the address range in a struct resource
+ * %n is ignored
+ *
+ * The return value is the number of characters which would
+ * be generated for the given input, excluding the trailing
+ * '\0', as per ISO C99. If you want to have the exact
+ * number of characters written into @buf as return value
+ * (not including the trailing '\0'), use vscnprintf(). If the
+ * return is greater than or equal to @size, the resulting
+ * string is truncated.
+ */
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+{
+       unsigned long long num;
+       char *str, *end, c;
+       const char *args = (const char *)bin_buf;
+
+       struct printf_spec spec = {0};
+
+       if (unlikely((int) size < 0)) {
+               /* There can be only one.. */
+               static char warn = 1;
+               WARN_ON(warn);
+               warn = 0;
+               return 0;
+       }
+
+       str = buf;
+       end = buf + size;
+
+#define get_arg(type)                                                  \
+({                                                                     \
+       typeof(type) value;                                             \
+       if (sizeof(type) == 8) {                                        \
+               args = PTR_ALIGN(args, sizeof(u32));                    \
+               *(u32 *)&value = *(u32 *)args;                          \
+               *((u32 *)&value + 1) = *(u32 *)(args + 4);              \
+       } else {                                                        \
+               args = PTR_ALIGN(args, sizeof(type));                   \
+               value = *(typeof(type) *)args;                          \
+       }                                                               \
+       args += sizeof(type);                                           \
+       value;                                                          \
+})
+
+       /* Make sure end is always >= buf */
+       if (end < buf) {
+               end = ((void *)-1);
+               size = end - buf;
+       }
+
+       while (*fmt) {
+               int read;
+               const char *old_fmt = fmt;
+
+               read = format_decode(fmt, &spec);
+
+               fmt += read;
+
+               switch (spec.type) {
+               case FORMAT_TYPE_NONE: {
+                       int copy = read;
+                       if (str < end) {
+                               if (copy > end - str)
+                                       copy = end - str;
+                               memcpy(str, old_fmt, copy);
+                       }
+                       str += read;
+                       break;
+               }
+
+               case FORMAT_TYPE_WITDH:
+                       spec.field_width = get_arg(int);
+                       break;
+
+               case FORMAT_TYPE_PRECISION:
+                       spec.precision = get_arg(int);
+                       break;
+
+               case FORMAT_TYPE_CHAR:
+                       if (!(spec.flags & LEFT)) {
+                               while (--spec.field_width > 0) {
+                                       if (str < end)
+                                               *str = ' ';
+                                       ++str;
+                               }
+                       }
+                       c = (unsigned char) get_arg(char);
+                       if (str < end)
+                               *str = c;
+                       ++str;
+                       while (--spec.field_width > 0) {
+                               if (str < end)
+                                       *str = ' ';
+                               ++str;
+                       }
+                       break;
+
+               case FORMAT_TYPE_STR: {
+                       const char *str_arg = args;
+                       size_t len = strlen(str_arg);
+                       args += len + 1;
+                       str = string(str, end, (char *)str_arg, spec);
+                       break;
+               }
+
+               case FORMAT_TYPE_PTR:
+                       str = pointer(fmt+1, str, end, get_arg(void *), spec);
+                       while (isalnum(*fmt))
+                               fmt++;
+                       break;
+
+               case FORMAT_TYPE_PERCENT_CHAR:
+                       if (str < end)
+                               *str = '%';
+                       ++str;
+                       break;
+
+               case FORMAT_TYPE_INVALID:
+                       if (str < end)
+                               *str = '%';
+                       ++str;
+                       if (*fmt) {
+                               if (str < end)
+                                       *str = *fmt;
+                               ++str;
+                       } else {
+                               --fmt;
+                       }
+                       break;
+
+               case FORMAT_TYPE_NRCHARS:
+                       /* skip */
+                       break;
+
+               default:
+                       switch (spec.type) {
+
+                       case FORMAT_TYPE_LONG_LONG:
+                               num = get_arg(long long);
+                               break;
+                       case FORMAT_TYPE_ULONG:
+                               num = get_arg(unsigned long);
+                               break;
+                       case FORMAT_TYPE_LONG:
+                               num = get_arg(unsigned long);
+                               break;
+                       case FORMAT_TYPE_SIZE_T:
+                               num = get_arg(size_t);
+                               break;
+                       case FORMAT_TYPE_PTRDIFF:
+                               num = get_arg(ptrdiff_t);
+                               break;
+                       case FORMAT_TYPE_USHORT:
+                               num = get_arg(unsigned short);
+                               break;
+                       case FORMAT_TYPE_SHORT:
+                               num = get_arg(short);
+                               break;
+                       case FORMAT_TYPE_UINT:
+                               num = get_arg(unsigned int);
+                               break;
+                       default:
+                               num = get_arg(int);
+                       }
+
+                       str = number(str, end, num, spec);
+               }
+       }
+
+       if (size > 0) {
+               if (str < end)
+                       *str = '\0';
+               else
+                       end[-1] = '\0';
+       }
+
+#undef get_arg
+
+       /* the trailing null byte doesn't count towards the total */
+       return str - buf;
+}
+EXPORT_SYMBOL_GPL(bstr_printf);
+
+/**
+ * bprintf - Parse a format string and place args' binary value in a buffer
+ * @bin_buf: The buffer to place args' binary value
+ * @size: The size of the buffer(by words(32bits), not characters)
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The function returns the number of words(u32) written
+ * into @bin_buf.
+ */
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
+{
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       ret = vbin_printf(bin_buf, size, fmt, args);
+       va_end(args);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(bprintf);
+
+#endif /* CONFIG_BINARY_PRINTF */
+
 /**
  * vsscanf - Unformat a buffer into a list of arguments
  * @buf:       input buffer
index baa999e..05fab3b 100644 (file)
@@ -48,6 +48,8 @@
 #include <linux/rmap.h>
 #include <linux/module.h>
 #include <linux/delayacct.h>
+#include <linux/kprobes.h>
+#include <linux/mutex.h>
 #include <linux/init.h>
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
@@ -99,6 +101,14 @@ int randomize_va_space __read_mostly =
                                        2;
 #endif
 
+/*
+ * mutex protecting text section modification (dynamic code patching).
+ * some users need to sleep (allocating memory...) while they hold this lock.
+ *
+ * NOT exported to modules - patching kernel text is a really delicate matter.
+ */
+DEFINE_MUTEX(text_mutex);
+
 static int __init disable_randmaps(char *s)
 {
        randomize_va_space = 0;
index 5c44ed4..a3803ea 100644 (file)
@@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
        unsigned long did_some_progress;
        unsigned long pages_reclaimed = 0;
 
+       lockdep_trace_alloc(gfp_mask);
+
        might_sleep_if(wait);
 
        if (should_fail_alloc_page(gfp_mask, order))
@@ -1578,12 +1580,15 @@ nofail_alloc:
         */
        cpuset_update_task_memory_state();
        p->flags |= PF_MEMALLOC;
+
+       lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
        did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
        p->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
        p->flags &= ~PF_MEMALLOC;
 
        cond_resched();
index 4d00855..9ec66c3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/cpu.h>
 #include       <linux/sysctl.h>
 #include       <linux/module.h>
+#include       <trace/kmemtrace.h>
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 
 #endif
 
+#ifdef CONFIG_KMEMTRACE
+size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+       return cachep->buffer_size;
+}
+EXPORT_SYMBOL(slab_buffer_size);
+#endif
+
 /*
  * Do not go above this order unless 0 objects fit into the slab.
  */
@@ -3318,6 +3327,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
        unsigned long save_flags;
        void *ptr;
 
+       lockdep_trace_alloc(flags);
+
        if (slab_should_failslab(cachep, flags))
                return NULL;
 
@@ -3394,6 +3405,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
        unsigned long save_flags;
        void *objp;
 
+       lockdep_trace_alloc(flags);
+
        if (slab_should_failslab(cachep, flags))
                return NULL;
 
@@ -3550,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
  */
 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-       return __cache_alloc(cachep, flags, __builtin_return_address(0));
+       void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                            obj_size(cachep), cachep->buffer_size, flags);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+{
+       return __cache_alloc(cachep, flags, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+#endif
+
 /**
  * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
  * @cachep: the cache we're checking against
@@ -3598,23 +3624,47 @@ out:
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-       return __cache_alloc_node(cachep, flags, nodeid,
-                       __builtin_return_address(0));
+       void *ret = __cache_alloc_node(cachep, flags, nodeid,
+                                      __builtin_return_address(0));
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                                 obj_size(cachep), cachep->buffer_size,
+                                 flags, nodeid);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+                                   gfp_t flags,
+                                   int nodeid)
+{
+       return __cache_alloc_node(cachep, flags, nodeid,
+                                 __builtin_return_address(0));
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+#endif
+
 static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
 {
        struct kmem_cache *cachep;
+       void *ret;
 
        cachep = kmem_find_general_cachep(size, flags);
        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                return cachep;
-       return kmem_cache_alloc_node(cachep, flags, node);
+       ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                 (unsigned long) caller, ret,
+                                 size, cachep->buffer_size, flags, node);
+
+       return ret;
 }
 
-#ifdef CONFIG_DEBUG_SLAB
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return __do_kmalloc_node(size, flags, node,
@@ -3647,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
                                          void *caller)
 {
        struct kmem_cache *cachep;
+       void *ret;
 
        /* If you want to save a few bytes .text space: replace
         * __ with kmem_.
@@ -3656,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
        cachep = __find_general_cachep(size, flags);
        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                return cachep;
-       return __cache_alloc(cachep, flags, caller);
+       ret = __cache_alloc(cachep, flags, caller);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
+                            (unsigned long) caller, ret,
+                            size, cachep->buffer_size, flags);
+
+       return ret;
 }
 
 
-#ifdef CONFIG_DEBUG_SLAB
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
 void *__kmalloc(size_t size, gfp_t flags)
 {
        return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3699,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
                debug_check_no_obj_freed(objp, obj_size(cachep));
        __cache_free(cachep, objp);
        local_irq_restore(flags);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -3725,6 +3784,8 @@ void kfree(const void *objp)
        debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
 }
 EXPORT_SYMBOL(kfree);
 
index 52bc8a2..5961529 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/list.h>
+#include <trace/kmemtrace.h>
 #include <asm/atomic.h>
 
 /*
@@ -463,27 +464,40 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
 {
        unsigned int *m;
        int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+       void *ret;
+
+       lockdep_trace_alloc(flags);
 
        if (size < PAGE_SIZE - align) {
                if (!size)
                        return ZERO_SIZE_PTR;
 
                m = slob_alloc(size + align, gfp, align, node);
+
                if (!m)
                        return NULL;
                *m = size;
-               return (void *)m + align;
+               ret = (void *)m + align;
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, size + align, gfp, node);
        } else {
-               void *ret;
+               unsigned int order = get_order(size);
 
-               ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+               ret = slob_new_page(gfp | __GFP_COMP, order, node);
                if (ret) {
                        struct page *page;
                        page = virt_to_page(ret);
                        page->private = size;
                }
-               return ret;
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, PAGE_SIZE << order, gfp, node);
        }
+
+       return ret;
 }
 EXPORT_SYMBOL(__kmalloc_node);
 
@@ -501,6 +515,8 @@ void kfree(const void *block)
                slob_free(m, *m + align);
        } else
                put_page(&sp->page);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -570,10 +586,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
        void *b;
 
-       if (c->size < PAGE_SIZE)
+       if (c->size < PAGE_SIZE) {
                b = slob_alloc(c->size, flags, c->align, node);
-       else
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+                                         _RET_IP_, b, c->size,
+                                         SLOB_UNITS(c->size) * SLOB_UNIT,
+                                         flags, node);
+       } else {
                b = slob_new_page(flags, get_order(c->size), node);
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+                                         _RET_IP_, b, c->size,
+                                         PAGE_SIZE << get_order(c->size),
+                                         flags, node);
+       }
 
        if (c->ctor)
                c->ctor(b);
@@ -609,6 +634,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
        } else {
                __kmem_cache_free(b, c->size);
        }
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
index 0280eee..816734e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <trace/kmemtrace.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
@@ -1596,6 +1597,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
        unsigned long flags;
        unsigned int objsize;
 
+       lockdep_trace_alloc(gfpflags);
        might_sleep_if(gfpflags & __GFP_WAIT);
 
        if (should_failslab(s->objsize, gfpflags))
@@ -1623,18 +1625,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
 
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
-       return slab_alloc(s, gfpflags, -1, _RET_IP_);
+       void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                            s->objsize, s->size, gfpflags);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+       return slab_alloc(s, gfpflags, -1, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+#endif
+
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
-       return slab_alloc(s, gfpflags, node, _RET_IP_);
+       void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                                 s->objsize, s->size, gfpflags, node);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 #endif
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+                                   gfp_t gfpflags,
+                                   int node)
+{
+       return slab_alloc(s, gfpflags, node, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+#endif
+
 /*
  * Slow patch handling. This may still be called frequently since objects
  * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1742,6 +1772,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
        page = virt_to_head_page(x);
 
        slab_free(s, page, x, _RET_IP_);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -2475,7 +2507,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
  *             Kmalloc subsystem
  *******************************************************************/
 
-struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
 EXPORT_SYMBOL(kmalloc_caches);
 
 static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2569,7 @@ panic:
 }
 
 #ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
+static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
 
 static void sysfs_add_func(struct work_struct *w)
 {
@@ -2657,8 +2689,9 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
 void *__kmalloc(size_t size, gfp_t flags)
 {
        struct kmem_cache *s;
+       void *ret;
 
-       if (unlikely(size > PAGE_SIZE))
+       if (unlikely(size > SLUB_MAX_SIZE))
                return kmalloc_large(size, flags);
 
        s = get_slab(size, flags);
@@ -2666,7 +2699,12 @@ void *__kmalloc(size_t size, gfp_t flags)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, flags, -1, _RET_IP_);
+       ret = slab_alloc(s, flags, -1, _RET_IP_);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
+                            size, s->size, flags);
+
+       return ret;
 }
 EXPORT_SYMBOL(__kmalloc);
 
@@ -2685,16 +2723,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
        struct kmem_cache *s;
+       void *ret;
+
+       if (unlikely(size > SLUB_MAX_SIZE)) {
+               ret = kmalloc_large_node(size, flags, node);
 
-       if (unlikely(size > PAGE_SIZE))
-               return kmalloc_large_node(size, flags, node);
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, PAGE_SIZE << get_order(size),
+                                         flags, node);
+
+               return ret;
+       }
 
        s = get_slab(size, flags);
 
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, flags, node, _RET_IP_);
+       ret = slab_alloc(s, flags, node, _RET_IP_);
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
+                                 size, s->size, flags, node);
+
+       return ret;
 }
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
@@ -2753,6 +2805,8 @@ void kfree(const void *x)
                return;
        }
        slab_free(page->slab, page, object, _RET_IP_);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -2986,7 +3040,7 @@ void __init kmem_cache_init(void)
                caches++;
        }
 
-       for (i = KMALLOC_SHIFT_LOW; i <PAGE_SHIFT; i++) {
+       for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
                create_kmalloc_cache(&kmalloc_caches[i],
                        "kmalloc", 1 << i, GFP_KERNEL);
                caches++;
@@ -3023,7 +3077,7 @@ void __init kmem_cache_init(void)
        slab_state = UP;
 
        /* Provide the correct kmalloc names now that the caches are up */
-       for (i = KMALLOC_SHIFT_LOW; i <PAGE_SHIFT; i++)
+       for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
                kmalloc_caches[i]. name =
                        kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
 
@@ -3222,8 +3276,9 @@ static struct notifier_block __cpuinitdata slab_notifier = {
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 {
        struct kmem_cache *s;
+       void *ret;
 
-       if (unlikely(size > PAGE_SIZE))
+       if (unlikely(size > SLUB_MAX_SIZE))
                return kmalloc_large(size, gfpflags);
 
        s = get_slab(size, gfpflags);
@@ -3231,15 +3286,22 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, gfpflags, -1, caller);
+       ret = slab_alloc(s, gfpflags, -1, caller);
+
+       /* Honor the call site pointer we recieved. */
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
+                            s->size, gfpflags);
+
+       return ret;
 }
 
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
                                        int node, unsigned long caller)
 {
        struct kmem_cache *s;
+       void *ret;
 
-       if (unlikely(size > PAGE_SIZE))
+       if (unlikely(size > SLUB_MAX_SIZE))
                return kmalloc_large_node(size, gfpflags, node);
 
        s = get_slab(size, gfpflags);
@@ -3247,7 +3309,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, gfpflags, node, caller);
+       ret = slab_alloc(s, gfpflags, node, caller);
+
+       /* Honor the call site pointer we recieved. */
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
+                                 size, s->size, gfpflags, node);
+
+       return ret;
 }
 
 #ifdef CONFIG_SLUB_DEBUG
index 6177e3b..ae6f4c1 100644 (file)
@@ -1965,6 +1965,8 @@ static int kswapd(void *p)
        };
        node_to_cpumask_ptr(cpumask, pgdat->node_id);
 
+       lockdep_set_current_reclaim_state(GFP_KERNEL);
+
        if (!cpumask_empty(cpumask))
                set_cpus_allowed_ptr(tsk, cpumask);
        current->reclaim_state = &reclaim_state;
index 01724e0..dffdc49 100644 (file)
@@ -5,9 +5,9 @@
 #include <linux/tracepoint.h>
 
 DECLARE_TRACE(subsys_event,
-       TPPROTO(struct inode *inode, struct file *file),
-       TPARGS(inode, file));
+       TP_PROTO(struct inode *inode, struct file *file),
+       TP_ARGS(inode, file));
 DECLARE_TRACE(subsys_eventb,
-       TPPROTO(void),
-       TPARGS());
+       TP_PROTO(void),
+       TP_ARGS());
 #endif
index c7de8b3..39a9642 100644 (file)
@@ -112,13 +112,13 @@ endif
 # ---------------------------------------------------------------------------
 
 # Default is built-in, unless we know otherwise
-modkern_cflags := $(CFLAGS_KERNEL)
+modkern_cflags = $(if $(part-of-module), $(CFLAGS_MODULE), $(CFLAGS_KERNEL))
 quiet_modtag := $(empty)   $(empty)
 
-$(real-objs-m)        : modkern_cflags := $(CFLAGS_MODULE)
-$(real-objs-m:.o=.i)  : modkern_cflags := $(CFLAGS_MODULE)
-$(real-objs-m:.o=.s)  : modkern_cflags := $(CFLAGS_MODULE)
-$(real-objs-m:.o=.lst): modkern_cflags := $(CFLAGS_MODULE)
+$(real-objs-m)        : part-of-module := y
+$(real-objs-m:.o=.i)  : part-of-module := y
+$(real-objs-m:.o=.s)  : part-of-module := y
+$(real-objs-m:.o=.lst): part-of-module := y
 
 $(real-objs-m)        : quiet_modtag := [M]
 $(real-objs-m:.o=.i)  : quiet_modtag := [M]
@@ -205,7 +205,8 @@ endif
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
 cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(CONFIG_64BIT),64,32)" \
-       "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" "$(@)";
+       "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
+       "$(if $(part-of-module),1,0)" "$(@)";
 endif
 
 define rule_cc_o_c
index fe83141..409596e 100755 (executable)
@@ -100,14 +100,19 @@ $P =~ s@.*/@@g;
 
 my $V = '0.1';
 
-if ($#ARGV < 6) {
-       print "usage: $P arch objdump objcopy cc ld nm rm mv inputfile\n";
+if ($#ARGV < 7) {
+       print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
        print "version: $V\n";
        exit(1);
 }
 
 my ($arch, $bits, $objdump, $objcopy, $cc,
-    $ld, $nm, $rm, $mv, $inputfile) = @ARGV;
+    $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
+
+# This file refers to mcount and shouldn't be ftraced, so lets' ignore it
+if ($inputfile eq "kernel/trace/ftrace.o") {
+    exit(0);
+}
 
 # Acceptable sections to record.
 my %text_sections = (
@@ -201,6 +206,13 @@ if ($arch eq "x86_64") {
     $alignment = 2;
     $section_type = '%progbits';
 
+} elsif ($arch eq "ia64") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+    $type = "data8";
+
+    if ($is_module eq "0") {
+        $cc .= " -mconstant-gp";
+    }
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
@@ -263,7 +275,6 @@ if (!$found_version) {
        "\tDisabling local function references.\n";
 }
 
-
 #
 # Step 1: find all the local (static functions) and weak symbols.
 #        't' is local, 'w/W' is weak (we never use a weak function)
@@ -331,13 +342,16 @@ sub update_funcs
 #
 # Step 2: find the sections and mcount call sites
 #
-open(IN, "$objdump -dr $inputfile|") || die "error running $objdump";
+open(IN, "$objdump -hdr $inputfile|") || die "error running $objdump";
 
 my $text;
 
+my $read_headers = 1;
+
 while (<IN>) {
     # is it a section?
     if (/$section_regex/) {
+       $read_headers = 0;
 
        # Only record text sections that we know are safe
        if (defined($text_sections{$1})) {
@@ -371,6 +385,19 @@ while (<IN>) {
                $ref_func = $text;
            }
        }
+    } elsif ($read_headers && /$mcount_section/) {
+       #
+       # Somehow the make process can execute this script on an
+       # object twice. If it does, we would duplicate the mcount
+       # section and it will cause the function tracer self test
+       # to fail. Check if the mcount section exists, and if it does,
+       # warn and exit.
+       #
+       print STDERR "ERROR: $mcount_section already in $inputfile\n" .
+           "\tThis may be an indication that your build is corrupted.\n" .
+           "\tDelete $inputfile and try again. If the same object file\n" .
+           "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n";
+       exit(-1);
     }
 
     # is this a call site to mcount? If so, record it to print later