2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
78 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
88 static DEFINE_PER_CPU(bool, trace_cmdline_save);
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
96 static int tracing_disabled = 1;
98 cpumask_var_t __read_mostly tracing_buffer_mask;
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
116 enum ftrace_dump_mode ftrace_dump_on_oops;
118 /* When set, tracing will stop when a WARN*() is hit */
119 int __disable_trace_on_warning;
121 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
122 /* Map of enums to their values, for "enum_map" file */
123 struct trace_enum_map_head {
125 unsigned long length;
128 union trace_enum_map_item;
130 struct trace_enum_map_tail {
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
139 static DEFINE_MUTEX(trace_enum_mutex);
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
148 union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
154 static union trace_enum_map_item *trace_enum_maps;
155 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
157 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
159 #define MAX_TRACER_SIZE 100
160 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
161 static char *default_bootup_tracer;
163 static bool allocate_snapshot;
165 static int __init set_cmdline_ftrace(char *str)
167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
168 default_bootup_tracer = bootup_tracer_buf;
169 /* We are using ftrace early, expand it */
170 ring_buffer_expanded = true;
173 __setup("ftrace=", set_cmdline_ftrace);
175 static int __init set_ftrace_dump_on_oops(char *str)
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
189 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
191 static int __init stop_trace_on_warning(char *str)
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
197 __setup("traceoff_on_warning", stop_trace_on_warning);
199 static int __init boot_alloc_snapshot(char *str)
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
206 __setup("alloc_snapshot", boot_alloc_snapshot);
209 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
211 static int __init set_trace_boot_options(char *str)
213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
216 __setup("trace_options=", set_trace_boot_options);
218 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219 static char *trace_boot_clock __initdata;
221 static int __init set_trace_boot_clock(char *str)
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
227 __setup("trace_clock=", set_trace_boot_clock);
229 static int __init set_tracepoint_printk(char *str)
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
235 __setup("tp_printk", set_tracepoint_printk);
237 unsigned long long ns2usecs(cycle_t nsec)
244 /* trace_flags holds trace_options default values */
245 #define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
252 /* trace_options that are only supported by global_trace */
253 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
256 /* trace_flags that are default zero for instances */
257 #define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
272 static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
276 LIST_HEAD(ftrace_trace_arrays);
278 int trace_array_get(struct trace_array *this_tr)
280 struct trace_array *tr;
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
291 mutex_unlock(&trace_types_lock);
296 static void __trace_array_put(struct trace_array *this_tr)
298 WARN_ON(!this_tr->ref);
302 void trace_array_put(struct trace_array *this_tr)
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
309 int call_filter_check_discard(struct trace_event_call *call, void *rec,
310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
315 ring_buffer_discard_commit(buffer, event);
322 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
326 /* Early boot up does not have a buffer yet */
328 return trace_clock_local();
330 ts = ring_buffer_time_stamp(buf->buffer, cpu);
331 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
336 cycle_t ftrace_now(int cpu)
338 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
342 * tracing_is_enabled - Show if global_trace has been disabled
344 * Shows if the global trace has been enabled or not. It uses the
345 * mirror flag "buffer_disabled" to be used in fast paths such as for
346 * the irqsoff tracer. But it may be inaccurate due to races. If you
347 * need to know the accurate state, use tracing_is_on() which is a little
348 * slower, but accurate.
350 int tracing_is_enabled(void)
353 * For quick access (irqsoff uses this in fast path), just
354 * return the mirror variable of the state of the ring buffer.
355 * It's a little racy, but we don't really care.
358 return !global_trace.buffer_disabled;
362 * trace_buf_size is the size in bytes that is allocated
363 * for a buffer. Note, the number of bytes is always rounded
366 * This number is purposely set to a low number of 16384.
367 * If the dump on oops happens, it will be much appreciated
368 * to not have to wait for all that output. Anyway this can be
369 * boot time and run time configurable.
371 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
373 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
375 /* trace_types holds a link list of available tracers. */
376 static struct tracer *trace_types __read_mostly;
379 * trace_types_lock is used to protect the trace_types list.
381 DEFINE_MUTEX(trace_types_lock);
384 * serialize the access of the ring buffer
386 * ring buffer serializes readers, but it is low level protection.
387 * The validity of the events (which returns by ring_buffer_peek() ..etc)
388 * are not protected by ring buffer.
390 * The content of events may become garbage if we allow other process consumes
391 * these events concurrently:
392 * A) the page of the consumed events may become a normal page
393 * (not reader page) in ring buffer, and this page will be rewrited
394 * by events producer.
395 * B) The page of the consumed events may become a page for splice_read,
396 * and this page will be returned to system.
398 * These primitives allow multi process access to different cpu ring buffer
401 * These primitives don't distinguish read-only and read-consume access.
402 * Multi read-only access are also serialized.
406 static DECLARE_RWSEM(all_cpu_access_lock);
407 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
409 static inline void trace_access_lock(int cpu)
411 if (cpu == RING_BUFFER_ALL_CPUS) {
412 /* gain it for accessing the whole ring buffer. */
413 down_write(&all_cpu_access_lock);
415 /* gain it for accessing a cpu ring buffer. */
417 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
418 down_read(&all_cpu_access_lock);
420 /* Secondly block other access to this @cpu ring buffer. */
421 mutex_lock(&per_cpu(cpu_access_lock, cpu));
425 static inline void trace_access_unlock(int cpu)
427 if (cpu == RING_BUFFER_ALL_CPUS) {
428 up_write(&all_cpu_access_lock);
430 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
431 up_read(&all_cpu_access_lock);
435 static inline void trace_access_lock_init(void)
439 for_each_possible_cpu(cpu)
440 mutex_init(&per_cpu(cpu_access_lock, cpu));
445 static DEFINE_MUTEX(access_lock);
447 static inline void trace_access_lock(int cpu)
450 mutex_lock(&access_lock);
453 static inline void trace_access_unlock(int cpu)
456 mutex_unlock(&access_lock);
459 static inline void trace_access_lock_init(void)
465 #ifdef CONFIG_STACKTRACE
466 static void __ftrace_trace_stack(struct ring_buffer *buffer,
468 int skip, int pc, struct pt_regs *regs);
469 static inline void ftrace_trace_stack(struct trace_array *tr,
470 struct ring_buffer *buffer,
472 int skip, int pc, struct pt_regs *regs);
475 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
477 int skip, int pc, struct pt_regs *regs)
480 static inline void ftrace_trace_stack(struct trace_array *tr,
481 struct ring_buffer *buffer,
483 int skip, int pc, struct pt_regs *regs)
489 static void tracer_tracing_on(struct trace_array *tr)
491 if (tr->trace_buffer.buffer)
492 ring_buffer_record_on(tr->trace_buffer.buffer);
494 * This flag is looked at when buffers haven't been allocated
495 * yet, or by some tracers (like irqsoff), that just want to
496 * know if the ring buffer has been disabled, but it can handle
497 * races of where it gets disabled but we still do a record.
498 * As the check is in the fast path of the tracers, it is more
499 * important to be fast than accurate.
501 tr->buffer_disabled = 0;
502 /* Make the flag seen by readers */
507 * tracing_on - enable tracing buffers
509 * This function enables tracing buffers that may have been
510 * disabled with tracing_off.
512 void tracing_on(void)
514 tracer_tracing_on(&global_trace);
516 EXPORT_SYMBOL_GPL(tracing_on);
519 * __trace_puts - write a constant string into the trace buffer.
520 * @ip: The address of the caller
521 * @str: The constant string to write
522 * @size: The size of the string.
524 int __trace_puts(unsigned long ip, const char *str, int size)
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct print_entry *entry;
529 unsigned long irq_flags;
533 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
536 pc = preempt_count();
538 if (unlikely(tracing_selftest_running || tracing_disabled))
541 alloc = sizeof(*entry) + size + 2; /* possible \n added */
543 local_save_flags(irq_flags);
544 buffer = global_trace.trace_buffer.buffer;
545 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
550 entry = ring_buffer_event_data(event);
553 memcpy(&entry->buf, str, size);
555 /* Add a newline if necessary */
556 if (entry->buf[size - 1] != '\n') {
557 entry->buf[size] = '\n';
558 entry->buf[size + 1] = '\0';
560 entry->buf[size] = '\0';
562 __buffer_unlock_commit(buffer, event);
563 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
567 EXPORT_SYMBOL_GPL(__trace_puts);
570 * __trace_bputs - write the pointer to a constant string into trace buffer
571 * @ip: The address of the caller
572 * @str: The constant string to write to the buffer to
574 int __trace_bputs(unsigned long ip, const char *str)
576 struct ring_buffer_event *event;
577 struct ring_buffer *buffer;
578 struct bputs_entry *entry;
579 unsigned long irq_flags;
580 int size = sizeof(struct bputs_entry);
583 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
586 pc = preempt_count();
588 if (unlikely(tracing_selftest_running || tracing_disabled))
591 local_save_flags(irq_flags);
592 buffer = global_trace.trace_buffer.buffer;
593 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
598 entry = ring_buffer_event_data(event);
602 __buffer_unlock_commit(buffer, event);
603 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
607 EXPORT_SYMBOL_GPL(__trace_bputs);
609 #ifdef CONFIG_TRACER_SNAPSHOT
611 * trace_snapshot - take a snapshot of the current buffer.
613 * This causes a swap between the snapshot buffer and the current live
614 * tracing buffer. You can use this to take snapshots of the live
615 * trace when some condition is triggered, but continue to trace.
617 * Note, make sure to allocate the snapshot with either
618 * a tracing_snapshot_alloc(), or by doing it manually
619 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
621 * If the snapshot buffer is not allocated, it will stop tracing.
622 * Basically making a permanent snapshot.
624 void tracing_snapshot(void)
626 struct trace_array *tr = &global_trace;
627 struct tracer *tracer = tr->current_trace;
631 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
632 internal_trace_puts("*** snapshot is being ignored ***\n");
636 if (!tr->allocated_snapshot) {
637 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
638 internal_trace_puts("*** stopping trace here! ***\n");
643 /* Note, snapshot can not be used when the tracer uses it */
644 if (tracer->use_max_tr) {
645 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
646 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
650 local_irq_save(flags);
651 update_max_tr(tr, current, smp_processor_id());
652 local_irq_restore(flags);
654 EXPORT_SYMBOL_GPL(tracing_snapshot);
656 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
657 struct trace_buffer *size_buf, int cpu_id);
658 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
660 static int alloc_snapshot(struct trace_array *tr)
664 if (!tr->allocated_snapshot) {
666 /* allocate spare buffer */
667 ret = resize_buffer_duplicate_size(&tr->max_buffer,
668 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
672 tr->allocated_snapshot = true;
678 static void free_snapshot(struct trace_array *tr)
681 * We don't free the ring buffer. instead, resize it because
682 * The max_tr ring buffer has some state (e.g. ring->clock) and
683 * we want preserve it.
685 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
686 set_buffer_entries(&tr->max_buffer, 1);
687 tracing_reset_online_cpus(&tr->max_buffer);
688 tr->allocated_snapshot = false;
692 * tracing_alloc_snapshot - allocate snapshot buffer.
694 * This only allocates the snapshot buffer if it isn't already
695 * allocated - it doesn't also take a snapshot.
697 * This is meant to be used in cases where the snapshot buffer needs
698 * to be set up for events that can't sleep but need to be able to
699 * trigger a snapshot.
701 int tracing_alloc_snapshot(void)
703 struct trace_array *tr = &global_trace;
706 ret = alloc_snapshot(tr);
711 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
714 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
716 * This is similar to trace_snapshot(), but it will allocate the
717 * snapshot buffer if it isn't already allocated. Use this only
718 * where it is safe to sleep, as the allocation may sleep.
720 * This causes a swap between the snapshot buffer and the current live
721 * tracing buffer. You can use this to take snapshots of the live
722 * trace when some condition is triggered, but continue to trace.
724 void tracing_snapshot_alloc(void)
728 ret = tracing_alloc_snapshot();
734 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
736 void tracing_snapshot(void)
738 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
740 EXPORT_SYMBOL_GPL(tracing_snapshot);
741 int tracing_alloc_snapshot(void)
743 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
746 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
747 void tracing_snapshot_alloc(void)
752 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
753 #endif /* CONFIG_TRACER_SNAPSHOT */
755 static void tracer_tracing_off(struct trace_array *tr)
757 if (tr->trace_buffer.buffer)
758 ring_buffer_record_off(tr->trace_buffer.buffer);
760 * This flag is looked at when buffers haven't been allocated
761 * yet, or by some tracers (like irqsoff), that just want to
762 * know if the ring buffer has been disabled, but it can handle
763 * races of where it gets disabled but we still do a record.
764 * As the check is in the fast path of the tracers, it is more
765 * important to be fast than accurate.
767 tr->buffer_disabled = 1;
768 /* Make the flag seen by readers */
773 * tracing_off - turn off tracing buffers
775 * This function stops the tracing buffers from recording data.
776 * It does not disable any overhead the tracers themselves may
777 * be causing. This function simply causes all recording to
778 * the ring buffers to fail.
780 void tracing_off(void)
782 tracer_tracing_off(&global_trace);
784 EXPORT_SYMBOL_GPL(tracing_off);
786 void disable_trace_on_warning(void)
788 if (__disable_trace_on_warning)
793 * tracer_tracing_is_on - show real state of ring buffer enabled
794 * @tr : the trace array to know if ring buffer is enabled
796 * Shows real state of the ring buffer if it is enabled or not.
798 static int tracer_tracing_is_on(struct trace_array *tr)
800 if (tr->trace_buffer.buffer)
801 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
802 return !tr->buffer_disabled;
806 * tracing_is_on - show state of ring buffers enabled
808 int tracing_is_on(void)
810 return tracer_tracing_is_on(&global_trace);
812 EXPORT_SYMBOL_GPL(tracing_is_on);
814 static int __init set_buf_size(char *str)
816 unsigned long buf_size;
820 buf_size = memparse(str, &str);
821 /* nr_entries can not be zero */
824 trace_buf_size = buf_size;
827 __setup("trace_buf_size=", set_buf_size);
829 static int __init set_tracing_thresh(char *str)
831 unsigned long threshold;
836 ret = kstrtoul(str, 0, &threshold);
839 tracing_thresh = threshold * 1000;
842 __setup("tracing_thresh=", set_tracing_thresh);
844 unsigned long nsecs_to_usecs(unsigned long nsecs)
850 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
851 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
852 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
853 * of strings in the order that the enums were defined.
858 /* These must match the bit postions in trace_iterator_flags */
859 static const char *trace_options[] = {
867 int in_ns; /* is this clock in nanoseconds? */
869 { trace_clock_local, "local", 1 },
870 { trace_clock_global, "global", 1 },
871 { trace_clock_counter, "counter", 0 },
872 { trace_clock_jiffies, "uptime", 0 },
873 { trace_clock, "perf", 1 },
874 { ktime_get_mono_fast_ns, "mono", 1 },
875 { ktime_get_raw_fast_ns, "mono_raw", 1 },
880 * trace_parser_get_init - gets the buffer for trace parser
882 int trace_parser_get_init(struct trace_parser *parser, int size)
884 memset(parser, 0, sizeof(*parser));
886 parser->buffer = kmalloc(size, GFP_KERNEL);
895 * trace_parser_put - frees the buffer for trace parser
897 void trace_parser_put(struct trace_parser *parser)
899 kfree(parser->buffer);
903 * trace_get_user - reads the user input string separated by space
904 * (matched by isspace(ch))
906 * For each string found the 'struct trace_parser' is updated,
907 * and the function returns.
909 * Returns number of bytes read.
911 * See kernel/trace/trace.h for 'struct trace_parser' details.
913 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
914 size_t cnt, loff_t *ppos)
921 trace_parser_clear(parser);
923 ret = get_user(ch, ubuf++);
931 * The parser is not finished with the last write,
932 * continue reading the user input without skipping spaces.
935 /* skip white space */
936 while (cnt && isspace(ch)) {
937 ret = get_user(ch, ubuf++);
944 /* only spaces were written */
954 /* read the non-space input */
955 while (cnt && !isspace(ch)) {
956 if (parser->idx < parser->size - 1)
957 parser->buffer[parser->idx++] = ch;
962 ret = get_user(ch, ubuf++);
969 /* We either got finished input or we have to wait for another call. */
971 parser->buffer[parser->idx] = 0;
972 parser->cont = false;
973 } else if (parser->idx < parser->size - 1) {
975 parser->buffer[parser->idx++] = ch;
988 /* TODO add a seq_buf_to_buffer() */
989 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
993 if (trace_seq_used(s) <= s->seq.readpos)
996 len = trace_seq_used(s) - s->seq.readpos;
999 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1001 s->seq.readpos += cnt;
1005 unsigned long __read_mostly tracing_thresh;
1007 #ifdef CONFIG_TRACER_MAX_TRACE
1009 * Copy the new maximum trace into the separate maximum-trace
1010 * structure. (this way the maximum trace is permanently saved,
1011 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1014 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1016 struct trace_buffer *trace_buf = &tr->trace_buffer;
1017 struct trace_buffer *max_buf = &tr->max_buffer;
1018 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1019 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1022 max_buf->time_start = data->preempt_timestamp;
1024 max_data->saved_latency = tr->max_latency;
1025 max_data->critical_start = data->critical_start;
1026 max_data->critical_end = data->critical_end;
1028 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1029 max_data->pid = tsk->pid;
1031 * If tsk == current, then use current_uid(), as that does not use
1032 * RCU. The irq tracer can be called out of RCU scope.
1035 max_data->uid = current_uid();
1037 max_data->uid = task_uid(tsk);
1039 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1040 max_data->policy = tsk->policy;
1041 max_data->rt_priority = tsk->rt_priority;
1043 /* record this tasks comm */
1044 tracing_record_cmdline(tsk);
1048 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1050 * @tsk: the task with the latency
1051 * @cpu: The cpu that initiated the trace.
1053 * Flip the buffers between the @tr and the max_tr and record information
1054 * about which task was the cause of this latency.
1057 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059 struct ring_buffer *buf;
1064 WARN_ON_ONCE(!irqs_disabled());
1066 if (!tr->allocated_snapshot) {
1067 /* Only the nop tracer should hit this when disabling */
1068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1072 arch_spin_lock(&tr->max_lock);
1074 buf = tr->trace_buffer.buffer;
1075 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1076 tr->max_buffer.buffer = buf;
1078 __update_max_tr(tr, tsk, cpu);
1079 arch_spin_unlock(&tr->max_lock);
1083 * update_max_tr_single - only copy one trace over, and reset the rest
1085 * @tsk - task with the latency
1086 * @cpu - the cpu of the buffer to copy.
1088 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1091 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1098 WARN_ON_ONCE(!irqs_disabled());
1099 if (!tr->allocated_snapshot) {
1100 /* Only the nop tracer should hit this when disabling */
1101 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1105 arch_spin_lock(&tr->max_lock);
1107 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1109 if (ret == -EBUSY) {
1111 * We failed to swap the buffer due to a commit taking
1112 * place on this CPU. We fail to record, but we reset
1113 * the max trace buffer (no one writes directly to it)
1114 * and flag that it failed.
1116 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1117 "Failed to swap buffers due to commit in progress\n");
1120 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1122 __update_max_tr(tr, tsk, cpu);
1123 arch_spin_unlock(&tr->max_lock);
1125 #endif /* CONFIG_TRACER_MAX_TRACE */
1127 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1129 /* Iterators are static, they should be filled or empty */
1130 if (trace_buffer_iter(iter, iter->cpu_file))
1133 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1137 #ifdef CONFIG_FTRACE_STARTUP_TEST
1138 static int run_tracer_selftest(struct tracer *type)
1140 struct trace_array *tr = &global_trace;
1141 struct tracer *saved_tracer = tr->current_trace;
1144 if (!type->selftest || tracing_selftest_disabled)
1148 * Run a selftest on this tracer.
1149 * Here we reset the trace buffer, and set the current
1150 * tracer to be this tracer. The tracer can then run some
1151 * internal tracing to verify that everything is in order.
1152 * If we fail, we do not register this tracer.
1154 tracing_reset_online_cpus(&tr->trace_buffer);
1156 tr->current_trace = type;
1158 #ifdef CONFIG_TRACER_MAX_TRACE
1159 if (type->use_max_tr) {
1160 /* If we expanded the buffers, make sure the max is expanded too */
1161 if (ring_buffer_expanded)
1162 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1163 RING_BUFFER_ALL_CPUS);
1164 tr->allocated_snapshot = true;
1168 /* the test is responsible for initializing and enabling */
1169 pr_info("Testing tracer %s: ", type->name);
1170 ret = type->selftest(type, tr);
1171 /* the test is responsible for resetting too */
1172 tr->current_trace = saved_tracer;
1174 printk(KERN_CONT "FAILED!\n");
1175 /* Add the warning after printing 'FAILED' */
1179 /* Only reset on passing, to avoid touching corrupted buffers */
1180 tracing_reset_online_cpus(&tr->trace_buffer);
1182 #ifdef CONFIG_TRACER_MAX_TRACE
1183 if (type->use_max_tr) {
1184 tr->allocated_snapshot = false;
1186 /* Shrink the max buffer again */
1187 if (ring_buffer_expanded)
1188 ring_buffer_resize(tr->max_buffer.buffer, 1,
1189 RING_BUFFER_ALL_CPUS);
1193 printk(KERN_CONT "PASSED\n");
1197 static inline int run_tracer_selftest(struct tracer *type)
1201 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1203 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1205 static void __init apply_trace_boot_options(void);
1208 * register_tracer - register a tracer with the ftrace system.
1209 * @type - the plugin for the tracer
1211 * Register a new plugin tracer.
1213 int __init register_tracer(struct tracer *type)
1219 pr_info("Tracer must have a name\n");
1223 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1224 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1228 mutex_lock(&trace_types_lock);
1230 tracing_selftest_running = true;
1232 for (t = trace_types; t; t = t->next) {
1233 if (strcmp(type->name, t->name) == 0) {
1235 pr_info("Tracer %s already registered\n",
1242 if (!type->set_flag)
1243 type->set_flag = &dummy_set_flag;
1245 /*allocate a dummy tracer_flags*/
1246 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1251 type->flags->val = 0;
1252 type->flags->opts = dummy_tracer_opt;
1254 if (!type->flags->opts)
1255 type->flags->opts = dummy_tracer_opt;
1257 /* store the tracer for __set_tracer_option */
1258 type->flags->trace = type;
1260 ret = run_tracer_selftest(type);
1264 type->next = trace_types;
1266 add_tracer_options(&global_trace, type);
1269 tracing_selftest_running = false;
1270 mutex_unlock(&trace_types_lock);
1272 if (ret || !default_bootup_tracer)
1275 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1278 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1279 /* Do we want this tracer to start on bootup? */
1280 tracing_set_tracer(&global_trace, type->name);
1281 default_bootup_tracer = NULL;
1283 apply_trace_boot_options();
1285 /* disable other selftests, since this will break it. */
1286 tracing_selftest_disabled = true;
1287 #ifdef CONFIG_FTRACE_STARTUP_TEST
1288 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1296 void tracing_reset(struct trace_buffer *buf, int cpu)
1298 struct ring_buffer *buffer = buf->buffer;
1303 ring_buffer_record_disable(buffer);
1305 /* Make sure all commits have finished */
1306 synchronize_sched();
1307 ring_buffer_reset_cpu(buffer, cpu);
1309 ring_buffer_record_enable(buffer);
1312 void tracing_reset_online_cpus(struct trace_buffer *buf)
1314 struct ring_buffer *buffer = buf->buffer;
1320 ring_buffer_record_disable(buffer);
1322 /* Make sure all commits have finished */
1323 synchronize_sched();
1325 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1327 for_each_online_cpu(cpu)
1328 ring_buffer_reset_cpu(buffer, cpu);
1330 ring_buffer_record_enable(buffer);
1333 /* Must have trace_types_lock held */
1334 void tracing_reset_all_online_cpus(void)
1336 struct trace_array *tr;
1338 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1339 tracing_reset_online_cpus(&tr->trace_buffer);
1340 #ifdef CONFIG_TRACER_MAX_TRACE
1341 tracing_reset_online_cpus(&tr->max_buffer);
1346 #define SAVED_CMDLINES_DEFAULT 128
1347 #define NO_CMDLINE_MAP UINT_MAX
1348 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1349 struct saved_cmdlines_buffer {
1350 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1351 unsigned *map_cmdline_to_pid;
1352 unsigned cmdline_num;
1354 char *saved_cmdlines;
1356 static struct saved_cmdlines_buffer *savedcmd;
1358 /* temporary disable recording */
1359 static atomic_t trace_record_cmdline_disabled __read_mostly;
1361 static inline char *get_saved_cmdlines(int idx)
1363 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1366 static inline void set_cmdline(int idx, const char *cmdline)
1368 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1371 static int allocate_cmdlines_buffer(unsigned int val,
1372 struct saved_cmdlines_buffer *s)
1374 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1376 if (!s->map_cmdline_to_pid)
1379 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1380 if (!s->saved_cmdlines) {
1381 kfree(s->map_cmdline_to_pid);
1386 s->cmdline_num = val;
1387 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1388 sizeof(s->map_pid_to_cmdline));
1389 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1390 val * sizeof(*s->map_cmdline_to_pid));
1395 static int trace_create_savedcmd(void)
1399 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1403 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1413 int is_tracing_stopped(void)
1415 return global_trace.stop_count;
1419 * tracing_start - quick start of the tracer
1421 * If tracing is enabled but was stopped by tracing_stop,
1422 * this will start the tracer back up.
1424 void tracing_start(void)
1426 struct ring_buffer *buffer;
1427 unsigned long flags;
1429 if (tracing_disabled)
1432 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1433 if (--global_trace.stop_count) {
1434 if (global_trace.stop_count < 0) {
1435 /* Someone screwed up their debugging */
1437 global_trace.stop_count = 0;
1442 /* Prevent the buffers from switching */
1443 arch_spin_lock(&global_trace.max_lock);
1445 buffer = global_trace.trace_buffer.buffer;
1447 ring_buffer_record_enable(buffer);
1449 #ifdef CONFIG_TRACER_MAX_TRACE
1450 buffer = global_trace.max_buffer.buffer;
1452 ring_buffer_record_enable(buffer);
1455 arch_spin_unlock(&global_trace.max_lock);
1458 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1461 static void tracing_start_tr(struct trace_array *tr)
1463 struct ring_buffer *buffer;
1464 unsigned long flags;
1466 if (tracing_disabled)
1469 /* If global, we need to also start the max tracer */
1470 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1471 return tracing_start();
1473 raw_spin_lock_irqsave(&tr->start_lock, flags);
1475 if (--tr->stop_count) {
1476 if (tr->stop_count < 0) {
1477 /* Someone screwed up their debugging */
1484 buffer = tr->trace_buffer.buffer;
1486 ring_buffer_record_enable(buffer);
1489 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1493 * tracing_stop - quick stop of the tracer
1495 * Light weight way to stop tracing. Use in conjunction with
1498 void tracing_stop(void)
1500 struct ring_buffer *buffer;
1501 unsigned long flags;
1503 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1504 if (global_trace.stop_count++)
1507 /* Prevent the buffers from switching */
1508 arch_spin_lock(&global_trace.max_lock);
1510 buffer = global_trace.trace_buffer.buffer;
1512 ring_buffer_record_disable(buffer);
1514 #ifdef CONFIG_TRACER_MAX_TRACE
1515 buffer = global_trace.max_buffer.buffer;
1517 ring_buffer_record_disable(buffer);
1520 arch_spin_unlock(&global_trace.max_lock);
1523 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1526 static void tracing_stop_tr(struct trace_array *tr)
1528 struct ring_buffer *buffer;
1529 unsigned long flags;
1531 /* If global, we need to also stop the max tracer */
1532 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1533 return tracing_stop();
1535 raw_spin_lock_irqsave(&tr->start_lock, flags);
1536 if (tr->stop_count++)
1539 buffer = tr->trace_buffer.buffer;
1541 ring_buffer_record_disable(buffer);
1544 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1547 void trace_stop_cmdline_recording(void);
1549 static int trace_save_cmdline(struct task_struct *tsk)
1553 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1557 * It's not the end of the world if we don't get
1558 * the lock, but we also don't want to spin
1559 * nor do we want to disable interrupts,
1560 * so if we miss here, then better luck next time.
1562 if (!arch_spin_trylock(&trace_cmdline_lock))
1565 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1566 if (idx == NO_CMDLINE_MAP) {
1567 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1570 * Check whether the cmdline buffer at idx has a pid
1571 * mapped. We are going to overwrite that entry so we
1572 * need to clear the map_pid_to_cmdline. Otherwise we
1573 * would read the new comm for the old pid.
1575 pid = savedcmd->map_cmdline_to_pid[idx];
1576 if (pid != NO_CMDLINE_MAP)
1577 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1579 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1580 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1582 savedcmd->cmdline_idx = idx;
1585 set_cmdline(idx, tsk->comm);
1587 arch_spin_unlock(&trace_cmdline_lock);
1592 static void __trace_find_cmdline(int pid, char comm[])
1597 strcpy(comm, "<idle>");
1601 if (WARN_ON_ONCE(pid < 0)) {
1602 strcpy(comm, "<XXX>");
1606 if (pid > PID_MAX_DEFAULT) {
1607 strcpy(comm, "<...>");
1611 map = savedcmd->map_pid_to_cmdline[pid];
1612 if (map != NO_CMDLINE_MAP)
1613 strcpy(comm, get_saved_cmdlines(map));
1615 strcpy(comm, "<...>");
1618 void trace_find_cmdline(int pid, char comm[])
1621 arch_spin_lock(&trace_cmdline_lock);
1623 __trace_find_cmdline(pid, comm);
1625 arch_spin_unlock(&trace_cmdline_lock);
1629 void tracing_record_cmdline(struct task_struct *tsk)
1631 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1634 if (!__this_cpu_read(trace_cmdline_save))
1637 if (trace_save_cmdline(tsk))
1638 __this_cpu_write(trace_cmdline_save, false);
1642 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1645 struct task_struct *tsk = current;
1647 entry->preempt_count = pc & 0xff;
1648 entry->pid = (tsk) ? tsk->pid : 0;
1650 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1651 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1653 TRACE_FLAG_IRQS_NOSUPPORT |
1655 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1656 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1657 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1658 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1659 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1661 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1663 struct ring_buffer_event *
1664 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1667 unsigned long flags, int pc)
1669 struct ring_buffer_event *event;
1671 event = ring_buffer_lock_reserve(buffer, len);
1672 if (event != NULL) {
1673 struct trace_entry *ent = ring_buffer_event_data(event);
1675 tracing_generic_entry_update(ent, flags, pc);
1683 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1685 __this_cpu_write(trace_cmdline_save, true);
1686 ring_buffer_unlock_commit(buffer, event);
1689 static struct ring_buffer *temp_buffer;
1691 struct ring_buffer_event *
1692 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1693 struct trace_event_file *trace_file,
1694 int type, unsigned long len,
1695 unsigned long flags, int pc)
1697 struct ring_buffer_event *entry;
1699 *current_rb = trace_file->tr->trace_buffer.buffer;
1700 entry = trace_buffer_lock_reserve(*current_rb,
1701 type, len, flags, pc);
1703 * If tracing is off, but we have triggers enabled
1704 * we still need to look at the event data. Use the temp_buffer
1705 * to store the trace event for the tigger to use. It's recusive
1706 * safe and will not be recorded anywhere.
1708 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1709 *current_rb = temp_buffer;
1710 entry = trace_buffer_lock_reserve(*current_rb,
1711 type, len, flags, pc);
1715 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1717 struct ring_buffer_event *
1718 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1719 int type, unsigned long len,
1720 unsigned long flags, int pc)
1722 *current_rb = global_trace.trace_buffer.buffer;
1723 return trace_buffer_lock_reserve(*current_rb,
1724 type, len, flags, pc);
1727 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1728 struct ring_buffer *buffer,
1729 struct ring_buffer_event *event,
1730 unsigned long flags, int pc,
1731 struct pt_regs *regs)
1733 __buffer_unlock_commit(buffer, event);
1735 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1736 ftrace_trace_userstack(buffer, flags, pc);
1740 trace_function(struct trace_array *tr,
1741 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1744 struct trace_event_call *call = &event_function;
1745 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1746 struct ring_buffer_event *event;
1747 struct ftrace_entry *entry;
1749 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1753 entry = ring_buffer_event_data(event);
1755 entry->parent_ip = parent_ip;
1757 if (!call_filter_check_discard(call, entry, buffer, event))
1758 __buffer_unlock_commit(buffer, event);
1761 #ifdef CONFIG_STACKTRACE
1763 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1764 struct ftrace_stack {
1765 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1768 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1769 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1771 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1772 unsigned long flags,
1773 int skip, int pc, struct pt_regs *regs)
1775 struct trace_event_call *call = &event_kernel_stack;
1776 struct ring_buffer_event *event;
1777 struct stack_entry *entry;
1778 struct stack_trace trace;
1780 int size = FTRACE_STACK_ENTRIES;
1782 trace.nr_entries = 0;
1786 * Since events can happen in NMIs there's no safe way to
1787 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1788 * or NMI comes in, it will just have to use the default
1789 * FTRACE_STACK_SIZE.
1791 preempt_disable_notrace();
1793 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1795 * We don't need any atomic variables, just a barrier.
1796 * If an interrupt comes in, we don't care, because it would
1797 * have exited and put the counter back to what we want.
1798 * We just need a barrier to keep gcc from moving things
1802 if (use_stack == 1) {
1803 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1804 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1807 save_stack_trace_regs(regs, &trace);
1809 save_stack_trace(&trace);
1811 if (trace.nr_entries > size)
1812 size = trace.nr_entries;
1814 /* From now on, use_stack is a boolean */
1817 size *= sizeof(unsigned long);
1819 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1820 sizeof(*entry) + size, flags, pc);
1823 entry = ring_buffer_event_data(event);
1825 memset(&entry->caller, 0, size);
1828 memcpy(&entry->caller, trace.entries,
1829 trace.nr_entries * sizeof(unsigned long));
1831 trace.max_entries = FTRACE_STACK_ENTRIES;
1832 trace.entries = entry->caller;
1834 save_stack_trace_regs(regs, &trace);
1836 save_stack_trace(&trace);
1839 entry->size = trace.nr_entries;
1841 if (!call_filter_check_discard(call, entry, buffer, event))
1842 __buffer_unlock_commit(buffer, event);
1845 /* Again, don't let gcc optimize things here */
1847 __this_cpu_dec(ftrace_stack_reserve);
1848 preempt_enable_notrace();
1852 static inline void ftrace_trace_stack(struct trace_array *tr,
1853 struct ring_buffer *buffer,
1854 unsigned long flags,
1855 int skip, int pc, struct pt_regs *regs)
1857 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1860 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1863 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1866 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1870 * trace_dump_stack - record a stack back trace in the trace buffer
1871 * @skip: Number of functions to skip (helper handlers)
1873 void trace_dump_stack(int skip)
1875 unsigned long flags;
1877 if (tracing_disabled || tracing_selftest_running)
1880 local_save_flags(flags);
1883 * Skip 3 more, seems to get us at the caller of
1887 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1888 flags, skip, preempt_count(), NULL);
1891 static DEFINE_PER_CPU(int, user_stack_count);
1894 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1896 struct trace_event_call *call = &event_user_stack;
1897 struct ring_buffer_event *event;
1898 struct userstack_entry *entry;
1899 struct stack_trace trace;
1901 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1905 * NMIs can not handle page faults, even with fix ups.
1906 * The save user stack can (and often does) fault.
1908 if (unlikely(in_nmi()))
1912 * prevent recursion, since the user stack tracing may
1913 * trigger other kernel events.
1916 if (__this_cpu_read(user_stack_count))
1919 __this_cpu_inc(user_stack_count);
1921 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1922 sizeof(*entry), flags, pc);
1924 goto out_drop_count;
1925 entry = ring_buffer_event_data(event);
1927 entry->tgid = current->tgid;
1928 memset(&entry->caller, 0, sizeof(entry->caller));
1930 trace.nr_entries = 0;
1931 trace.max_entries = FTRACE_STACK_ENTRIES;
1933 trace.entries = entry->caller;
1935 save_stack_trace_user(&trace);
1936 if (!call_filter_check_discard(call, entry, buffer, event))
1937 __buffer_unlock_commit(buffer, event);
1940 __this_cpu_dec(user_stack_count);
1946 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1948 ftrace_trace_userstack(tr, flags, preempt_count());
1952 #endif /* CONFIG_STACKTRACE */
1954 /* created for use with alloc_percpu */
1955 struct trace_buffer_struct {
1956 char buffer[TRACE_BUF_SIZE];
1959 static struct trace_buffer_struct *trace_percpu_buffer;
1960 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1961 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1962 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1965 * The buffer used is dependent on the context. There is a per cpu
1966 * buffer for normal context, softirq contex, hard irq context and
1967 * for NMI context. Thise allows for lockless recording.
1969 * Note, if the buffers failed to be allocated, then this returns NULL
1971 static char *get_trace_buf(void)
1973 struct trace_buffer_struct *percpu_buffer;
1976 * If we have allocated per cpu buffers, then we do not
1977 * need to do any locking.
1980 percpu_buffer = trace_percpu_nmi_buffer;
1982 percpu_buffer = trace_percpu_irq_buffer;
1983 else if (in_softirq())
1984 percpu_buffer = trace_percpu_sirq_buffer;
1986 percpu_buffer = trace_percpu_buffer;
1991 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1994 static int alloc_percpu_trace_buffer(void)
1996 struct trace_buffer_struct *buffers;
1997 struct trace_buffer_struct *sirq_buffers;
1998 struct trace_buffer_struct *irq_buffers;
1999 struct trace_buffer_struct *nmi_buffers;
2001 buffers = alloc_percpu(struct trace_buffer_struct);
2005 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2009 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2013 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2017 trace_percpu_buffer = buffers;
2018 trace_percpu_sirq_buffer = sirq_buffers;
2019 trace_percpu_irq_buffer = irq_buffers;
2020 trace_percpu_nmi_buffer = nmi_buffers;
2025 free_percpu(irq_buffers);
2027 free_percpu(sirq_buffers);
2029 free_percpu(buffers);
2031 WARN(1, "Could not allocate percpu trace_printk buffer");
2035 static int buffers_allocated;
2037 void trace_printk_init_buffers(void)
2039 if (buffers_allocated)
2042 if (alloc_percpu_trace_buffer())
2045 /* trace_printk() is for debug use only. Don't use it in production. */
2048 pr_warn("**********************************************************\n");
2049 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2051 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2053 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2054 pr_warn("** unsafe for production use. **\n");
2056 pr_warn("** If you see this message and you are not debugging **\n");
2057 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2059 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2060 pr_warn("**********************************************************\n");
2062 /* Expand the buffers to set size */
2063 tracing_update_buffers();
2065 buffers_allocated = 1;
2068 * trace_printk_init_buffers() can be called by modules.
2069 * If that happens, then we need to start cmdline recording
2070 * directly here. If the global_trace.buffer is already
2071 * allocated here, then this was called by module code.
2073 if (global_trace.trace_buffer.buffer)
2074 tracing_start_cmdline_record();
2077 void trace_printk_start_comm(void)
2079 /* Start tracing comms if trace printk is set */
2080 if (!buffers_allocated)
2082 tracing_start_cmdline_record();
2085 static void trace_printk_start_stop_comm(int enabled)
2087 if (!buffers_allocated)
2091 tracing_start_cmdline_record();
2093 tracing_stop_cmdline_record();
2097 * trace_vbprintk - write binary msg to tracing buffer
2100 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2102 struct trace_event_call *call = &event_bprint;
2103 struct ring_buffer_event *event;
2104 struct ring_buffer *buffer;
2105 struct trace_array *tr = &global_trace;
2106 struct bprint_entry *entry;
2107 unsigned long flags;
2109 int len = 0, size, pc;
2111 if (unlikely(tracing_selftest_running || tracing_disabled))
2114 /* Don't pollute graph traces with trace_vprintk internals */
2115 pause_graph_tracing();
2117 pc = preempt_count();
2118 preempt_disable_notrace();
2120 tbuffer = get_trace_buf();
2126 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2128 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2131 local_save_flags(flags);
2132 size = sizeof(*entry) + sizeof(u32) * len;
2133 buffer = tr->trace_buffer.buffer;
2134 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2138 entry = ring_buffer_event_data(event);
2142 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2143 if (!call_filter_check_discard(call, entry, buffer, event)) {
2144 __buffer_unlock_commit(buffer, event);
2145 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2149 preempt_enable_notrace();
2150 unpause_graph_tracing();
2154 EXPORT_SYMBOL_GPL(trace_vbprintk);
2157 __trace_array_vprintk(struct ring_buffer *buffer,
2158 unsigned long ip, const char *fmt, va_list args)
2160 struct trace_event_call *call = &event_print;
2161 struct ring_buffer_event *event;
2162 int len = 0, size, pc;
2163 struct print_entry *entry;
2164 unsigned long flags;
2167 if (tracing_disabled || tracing_selftest_running)
2170 /* Don't pollute graph traces with trace_vprintk internals */
2171 pause_graph_tracing();
2173 pc = preempt_count();
2174 preempt_disable_notrace();
2177 tbuffer = get_trace_buf();
2183 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2185 local_save_flags(flags);
2186 size = sizeof(*entry) + len + 1;
2187 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2191 entry = ring_buffer_event_data(event);
2194 memcpy(&entry->buf, tbuffer, len + 1);
2195 if (!call_filter_check_discard(call, entry, buffer, event)) {
2196 __buffer_unlock_commit(buffer, event);
2197 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2200 preempt_enable_notrace();
2201 unpause_graph_tracing();
2206 int trace_array_vprintk(struct trace_array *tr,
2207 unsigned long ip, const char *fmt, va_list args)
2209 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2212 int trace_array_printk(struct trace_array *tr,
2213 unsigned long ip, const char *fmt, ...)
2218 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2222 ret = trace_array_vprintk(tr, ip, fmt, ap);
2227 int trace_array_printk_buf(struct ring_buffer *buffer,
2228 unsigned long ip, const char *fmt, ...)
2233 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2237 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2242 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2244 return trace_array_vprintk(&global_trace, ip, fmt, args);
2246 EXPORT_SYMBOL_GPL(trace_vprintk);
2248 static void trace_iterator_increment(struct trace_iterator *iter)
2250 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2254 ring_buffer_read(buf_iter, NULL);
2257 static struct trace_entry *
2258 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2259 unsigned long *lost_events)
2261 struct ring_buffer_event *event;
2262 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2265 event = ring_buffer_iter_peek(buf_iter, ts);
2267 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2271 iter->ent_size = ring_buffer_event_length(event);
2272 return ring_buffer_event_data(event);
2278 static struct trace_entry *
2279 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2280 unsigned long *missing_events, u64 *ent_ts)
2282 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2283 struct trace_entry *ent, *next = NULL;
2284 unsigned long lost_events = 0, next_lost = 0;
2285 int cpu_file = iter->cpu_file;
2286 u64 next_ts = 0, ts;
2292 * If we are in a per_cpu trace file, don't bother by iterating over
2293 * all cpu and peek directly.
2295 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2296 if (ring_buffer_empty_cpu(buffer, cpu_file))
2298 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2300 *ent_cpu = cpu_file;
2305 for_each_tracing_cpu(cpu) {
2307 if (ring_buffer_empty_cpu(buffer, cpu))
2310 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2313 * Pick the entry with the smallest timestamp:
2315 if (ent && (!next || ts < next_ts)) {
2319 next_lost = lost_events;
2320 next_size = iter->ent_size;
2324 iter->ent_size = next_size;
2327 *ent_cpu = next_cpu;
2333 *missing_events = next_lost;
2338 /* Find the next real entry, without updating the iterator itself */
2339 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2340 int *ent_cpu, u64 *ent_ts)
2342 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2345 /* Find the next real entry, and increment the iterator to the next entry */
2346 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2348 iter->ent = __find_next_entry(iter, &iter->cpu,
2349 &iter->lost_events, &iter->ts);
2352 trace_iterator_increment(iter);
2354 return iter->ent ? iter : NULL;
2357 static void trace_consume(struct trace_iterator *iter)
2359 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2360 &iter->lost_events);
2363 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2365 struct trace_iterator *iter = m->private;
2369 WARN_ON_ONCE(iter->leftover);
2373 /* can't go backwards */
2378 ent = trace_find_next_entry_inc(iter);
2382 while (ent && iter->idx < i)
2383 ent = trace_find_next_entry_inc(iter);
2390 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2392 struct ring_buffer_event *event;
2393 struct ring_buffer_iter *buf_iter;
2394 unsigned long entries = 0;
2397 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2399 buf_iter = trace_buffer_iter(iter, cpu);
2403 ring_buffer_iter_reset(buf_iter);
2406 * We could have the case with the max latency tracers
2407 * that a reset never took place on a cpu. This is evident
2408 * by the timestamp being before the start of the buffer.
2410 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2411 if (ts >= iter->trace_buffer->time_start)
2414 ring_buffer_read(buf_iter, NULL);
2417 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2421 * The current tracer is copied to avoid a global locking
2424 static void *s_start(struct seq_file *m, loff_t *pos)
2426 struct trace_iterator *iter = m->private;
2427 struct trace_array *tr = iter->tr;
2428 int cpu_file = iter->cpu_file;
2434 * copy the tracer to avoid using a global lock all around.
2435 * iter->trace is a copy of current_trace, the pointer to the
2436 * name may be used instead of a strcmp(), as iter->trace->name
2437 * will point to the same string as current_trace->name.
2439 mutex_lock(&trace_types_lock);
2440 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2441 *iter->trace = *tr->current_trace;
2442 mutex_unlock(&trace_types_lock);
2444 #ifdef CONFIG_TRACER_MAX_TRACE
2445 if (iter->snapshot && iter->trace->use_max_tr)
2446 return ERR_PTR(-EBUSY);
2449 if (!iter->snapshot)
2450 atomic_inc(&trace_record_cmdline_disabled);
2452 if (*pos != iter->pos) {
2457 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2458 for_each_tracing_cpu(cpu)
2459 tracing_iter_reset(iter, cpu);
2461 tracing_iter_reset(iter, cpu_file);
2464 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2469 * If we overflowed the seq_file before, then we want
2470 * to just reuse the trace_seq buffer again.
2476 p = s_next(m, p, &l);
2480 trace_event_read_lock();
2481 trace_access_lock(cpu_file);
2485 static void s_stop(struct seq_file *m, void *p)
2487 struct trace_iterator *iter = m->private;
2489 #ifdef CONFIG_TRACER_MAX_TRACE
2490 if (iter->snapshot && iter->trace->use_max_tr)
2494 if (!iter->snapshot)
2495 atomic_dec(&trace_record_cmdline_disabled);
2497 trace_access_unlock(iter->cpu_file);
2498 trace_event_read_unlock();
2502 get_total_entries(struct trace_buffer *buf,
2503 unsigned long *total, unsigned long *entries)
2505 unsigned long count;
2511 for_each_tracing_cpu(cpu) {
2512 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2514 * If this buffer has skipped entries, then we hold all
2515 * entries for the trace and we need to ignore the
2516 * ones before the time stamp.
2518 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2519 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2520 /* total is the same as the entries */
2524 ring_buffer_overrun_cpu(buf->buffer, cpu);
2529 static void print_lat_help_header(struct seq_file *m)
2531 seq_puts(m, "# _------=> CPU# \n"
2532 "# / _-----=> irqs-off \n"
2533 "# | / _----=> need-resched \n"
2534 "# || / _---=> hardirq/softirq \n"
2535 "# ||| / _--=> preempt-depth \n"
2537 "# cmd pid ||||| time | caller \n"
2538 "# \\ / ||||| \\ | / \n");
2541 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2543 unsigned long total;
2544 unsigned long entries;
2546 get_total_entries(buf, &total, &entries);
2547 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2548 entries, total, num_online_cpus());
2552 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2554 print_event_info(buf, m);
2555 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2559 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2561 print_event_info(buf, m);
2562 seq_puts(m, "# _-----=> irqs-off\n"
2563 "# / _----=> need-resched\n"
2564 "# | / _---=> hardirq/softirq\n"
2565 "# || / _--=> preempt-depth\n"
2567 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2568 "# | | | |||| | |\n");
2572 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2574 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2575 struct trace_buffer *buf = iter->trace_buffer;
2576 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2577 struct tracer *type = iter->trace;
2578 unsigned long entries;
2579 unsigned long total;
2580 const char *name = "preemption";
2584 get_total_entries(buf, &total, &entries);
2586 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2588 seq_puts(m, "# -----------------------------------"
2589 "---------------------------------\n");
2590 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2591 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2592 nsecs_to_usecs(data->saved_latency),
2596 #if defined(CONFIG_PREEMPT_NONE)
2598 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2600 #elif defined(CONFIG_PREEMPT)
2605 /* These are reserved for later use */
2608 seq_printf(m, " #P:%d)\n", num_online_cpus());
2612 seq_puts(m, "# -----------------\n");
2613 seq_printf(m, "# | task: %.16s-%d "
2614 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2615 data->comm, data->pid,
2616 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2617 data->policy, data->rt_priority);
2618 seq_puts(m, "# -----------------\n");
2620 if (data->critical_start) {
2621 seq_puts(m, "# => started at: ");
2622 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2623 trace_print_seq(m, &iter->seq);
2624 seq_puts(m, "\n# => ended at: ");
2625 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2626 trace_print_seq(m, &iter->seq);
2627 seq_puts(m, "\n#\n");
2633 static void test_cpu_buff_start(struct trace_iterator *iter)
2635 struct trace_seq *s = &iter->seq;
2636 struct trace_array *tr = iter->tr;
2638 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2641 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2644 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2647 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2651 cpumask_set_cpu(iter->cpu, iter->started);
2653 /* Don't print started cpu buffer for the first entry of the trace */
2655 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2659 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2661 struct trace_array *tr = iter->tr;
2662 struct trace_seq *s = &iter->seq;
2663 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2664 struct trace_entry *entry;
2665 struct trace_event *event;
2669 test_cpu_buff_start(iter);
2671 event = ftrace_find_event(entry->type);
2673 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2674 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2675 trace_print_lat_context(iter);
2677 trace_print_context(iter);
2680 if (trace_seq_has_overflowed(s))
2681 return TRACE_TYPE_PARTIAL_LINE;
2684 return event->funcs->trace(iter, sym_flags, event);
2686 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2688 return trace_handle_return(s);
2691 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2693 struct trace_array *tr = iter->tr;
2694 struct trace_seq *s = &iter->seq;
2695 struct trace_entry *entry;
2696 struct trace_event *event;
2700 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2701 trace_seq_printf(s, "%d %d %llu ",
2702 entry->pid, iter->cpu, iter->ts);
2704 if (trace_seq_has_overflowed(s))
2705 return TRACE_TYPE_PARTIAL_LINE;
2707 event = ftrace_find_event(entry->type);
2709 return event->funcs->raw(iter, 0, event);
2711 trace_seq_printf(s, "%d ?\n", entry->type);
2713 return trace_handle_return(s);
2716 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2718 struct trace_array *tr = iter->tr;
2719 struct trace_seq *s = &iter->seq;
2720 unsigned char newline = '\n';
2721 struct trace_entry *entry;
2722 struct trace_event *event;
2726 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2727 SEQ_PUT_HEX_FIELD(s, entry->pid);
2728 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2729 SEQ_PUT_HEX_FIELD(s, iter->ts);
2730 if (trace_seq_has_overflowed(s))
2731 return TRACE_TYPE_PARTIAL_LINE;
2734 event = ftrace_find_event(entry->type);
2736 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2737 if (ret != TRACE_TYPE_HANDLED)
2741 SEQ_PUT_FIELD(s, newline);
2743 return trace_handle_return(s);
2746 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2748 struct trace_array *tr = iter->tr;
2749 struct trace_seq *s = &iter->seq;
2750 struct trace_entry *entry;
2751 struct trace_event *event;
2755 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2756 SEQ_PUT_FIELD(s, entry->pid);
2757 SEQ_PUT_FIELD(s, iter->cpu);
2758 SEQ_PUT_FIELD(s, iter->ts);
2759 if (trace_seq_has_overflowed(s))
2760 return TRACE_TYPE_PARTIAL_LINE;
2763 event = ftrace_find_event(entry->type);
2764 return event ? event->funcs->binary(iter, 0, event) :
2768 int trace_empty(struct trace_iterator *iter)
2770 struct ring_buffer_iter *buf_iter;
2773 /* If we are looking at one CPU buffer, only check that one */
2774 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2775 cpu = iter->cpu_file;
2776 buf_iter = trace_buffer_iter(iter, cpu);
2778 if (!ring_buffer_iter_empty(buf_iter))
2781 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2787 for_each_tracing_cpu(cpu) {
2788 buf_iter = trace_buffer_iter(iter, cpu);
2790 if (!ring_buffer_iter_empty(buf_iter))
2793 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2801 /* Called with trace_event_read_lock() held. */
2802 enum print_line_t print_trace_line(struct trace_iterator *iter)
2804 struct trace_array *tr = iter->tr;
2805 unsigned long trace_flags = tr->trace_flags;
2806 enum print_line_t ret;
2808 if (iter->lost_events) {
2809 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2810 iter->cpu, iter->lost_events);
2811 if (trace_seq_has_overflowed(&iter->seq))
2812 return TRACE_TYPE_PARTIAL_LINE;
2815 if (iter->trace && iter->trace->print_line) {
2816 ret = iter->trace->print_line(iter);
2817 if (ret != TRACE_TYPE_UNHANDLED)
2821 if (iter->ent->type == TRACE_BPUTS &&
2822 trace_flags & TRACE_ITER_PRINTK &&
2823 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2824 return trace_print_bputs_msg_only(iter);
2826 if (iter->ent->type == TRACE_BPRINT &&
2827 trace_flags & TRACE_ITER_PRINTK &&
2828 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2829 return trace_print_bprintk_msg_only(iter);
2831 if (iter->ent->type == TRACE_PRINT &&
2832 trace_flags & TRACE_ITER_PRINTK &&
2833 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2834 return trace_print_printk_msg_only(iter);
2836 if (trace_flags & TRACE_ITER_BIN)
2837 return print_bin_fmt(iter);
2839 if (trace_flags & TRACE_ITER_HEX)
2840 return print_hex_fmt(iter);
2842 if (trace_flags & TRACE_ITER_RAW)
2843 return print_raw_fmt(iter);
2845 return print_trace_fmt(iter);
2848 void trace_latency_header(struct seq_file *m)
2850 struct trace_iterator *iter = m->private;
2851 struct trace_array *tr = iter->tr;
2853 /* print nothing if the buffers are empty */
2854 if (trace_empty(iter))
2857 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2858 print_trace_header(m, iter);
2860 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2861 print_lat_help_header(m);
2864 void trace_default_header(struct seq_file *m)
2866 struct trace_iterator *iter = m->private;
2867 struct trace_array *tr = iter->tr;
2868 unsigned long trace_flags = tr->trace_flags;
2870 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2873 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2874 /* print nothing if the buffers are empty */
2875 if (trace_empty(iter))
2877 print_trace_header(m, iter);
2878 if (!(trace_flags & TRACE_ITER_VERBOSE))
2879 print_lat_help_header(m);
2881 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2882 if (trace_flags & TRACE_ITER_IRQ_INFO)
2883 print_func_help_header_irq(iter->trace_buffer, m);
2885 print_func_help_header(iter->trace_buffer, m);
2890 static void test_ftrace_alive(struct seq_file *m)
2892 if (!ftrace_is_dead())
2894 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2895 "# MAY BE MISSING FUNCTION EVENTS\n");
2898 #ifdef CONFIG_TRACER_MAX_TRACE
2899 static void show_snapshot_main_help(struct seq_file *m)
2901 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2902 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2903 "# Takes a snapshot of the main buffer.\n"
2904 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2905 "# (Doesn't have to be '2' works with any number that\n"
2906 "# is not a '0' or '1')\n");
2909 static void show_snapshot_percpu_help(struct seq_file *m)
2911 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2912 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2913 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2914 "# Takes a snapshot of the main buffer for this cpu.\n");
2916 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2917 "# Must use main snapshot file to allocate.\n");
2919 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2920 "# (Doesn't have to be '2' works with any number that\n"
2921 "# is not a '0' or '1')\n");
2924 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2926 if (iter->tr->allocated_snapshot)
2927 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2929 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2931 seq_puts(m, "# Snapshot commands:\n");
2932 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2933 show_snapshot_main_help(m);
2935 show_snapshot_percpu_help(m);
2938 /* Should never be called */
2939 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2942 static int s_show(struct seq_file *m, void *v)
2944 struct trace_iterator *iter = v;
2947 if (iter->ent == NULL) {
2949 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2951 test_ftrace_alive(m);
2953 if (iter->snapshot && trace_empty(iter))
2954 print_snapshot_help(m, iter);
2955 else if (iter->trace && iter->trace->print_header)
2956 iter->trace->print_header(m);
2958 trace_default_header(m);
2960 } else if (iter->leftover) {
2962 * If we filled the seq_file buffer earlier, we
2963 * want to just show it now.
2965 ret = trace_print_seq(m, &iter->seq);
2967 /* ret should this time be zero, but you never know */
2968 iter->leftover = ret;
2971 print_trace_line(iter);
2972 ret = trace_print_seq(m, &iter->seq);
2974 * If we overflow the seq_file buffer, then it will
2975 * ask us for this data again at start up.
2977 * ret is 0 if seq_file write succeeded.
2980 iter->leftover = ret;
2987 * Should be used after trace_array_get(), trace_types_lock
2988 * ensures that i_cdev was already initialized.
2990 static inline int tracing_get_cpu(struct inode *inode)
2992 if (inode->i_cdev) /* See trace_create_cpu_file() */
2993 return (long)inode->i_cdev - 1;
2994 return RING_BUFFER_ALL_CPUS;
2997 static const struct seq_operations tracer_seq_ops = {
3004 static struct trace_iterator *
3005 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3007 struct trace_array *tr = inode->i_private;
3008 struct trace_iterator *iter;
3011 if (tracing_disabled)
3012 return ERR_PTR(-ENODEV);
3014 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3016 return ERR_PTR(-ENOMEM);
3018 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3020 if (!iter->buffer_iter)
3024 * We make a copy of the current tracer to avoid concurrent
3025 * changes on it while we are reading.
3027 mutex_lock(&trace_types_lock);
3028 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3032 *iter->trace = *tr->current_trace;
3034 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3039 #ifdef CONFIG_TRACER_MAX_TRACE
3040 /* Currently only the top directory has a snapshot */
3041 if (tr->current_trace->print_max || snapshot)
3042 iter->trace_buffer = &tr->max_buffer;
3045 iter->trace_buffer = &tr->trace_buffer;
3046 iter->snapshot = snapshot;
3048 iter->cpu_file = tracing_get_cpu(inode);
3049 mutex_init(&iter->mutex);
3051 /* Notify the tracer early; before we stop tracing. */
3052 if (iter->trace && iter->trace->open)
3053 iter->trace->open(iter);
3055 /* Annotate start of buffers if we had overruns */
3056 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3057 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3059 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3060 if (trace_clocks[tr->clock_id].in_ns)
3061 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3063 /* stop the trace while dumping if we are not opening "snapshot" */
3064 if (!iter->snapshot)
3065 tracing_stop_tr(tr);
3067 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3068 for_each_tracing_cpu(cpu) {
3069 iter->buffer_iter[cpu] =
3070 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3072 ring_buffer_read_prepare_sync();
3073 for_each_tracing_cpu(cpu) {
3074 ring_buffer_read_start(iter->buffer_iter[cpu]);
3075 tracing_iter_reset(iter, cpu);
3078 cpu = iter->cpu_file;
3079 iter->buffer_iter[cpu] =
3080 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3081 ring_buffer_read_prepare_sync();
3082 ring_buffer_read_start(iter->buffer_iter[cpu]);
3083 tracing_iter_reset(iter, cpu);
3086 mutex_unlock(&trace_types_lock);
3091 mutex_unlock(&trace_types_lock);
3093 kfree(iter->buffer_iter);
3095 seq_release_private(inode, file);
3096 return ERR_PTR(-ENOMEM);
3099 int tracing_open_generic(struct inode *inode, struct file *filp)
3101 if (tracing_disabled)
3104 filp->private_data = inode->i_private;
3108 bool tracing_is_disabled(void)
3110 return (tracing_disabled) ? true: false;
3114 * Open and update trace_array ref count.
3115 * Must have the current trace_array passed to it.
3117 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3119 struct trace_array *tr = inode->i_private;
3121 if (tracing_disabled)
3124 if (trace_array_get(tr) < 0)
3127 filp->private_data = inode->i_private;
3132 static int tracing_release(struct inode *inode, struct file *file)
3134 struct trace_array *tr = inode->i_private;
3135 struct seq_file *m = file->private_data;
3136 struct trace_iterator *iter;
3139 if (!(file->f_mode & FMODE_READ)) {
3140 trace_array_put(tr);
3144 /* Writes do not use seq_file */
3146 mutex_lock(&trace_types_lock);
3148 for_each_tracing_cpu(cpu) {
3149 if (iter->buffer_iter[cpu])
3150 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3153 if (iter->trace && iter->trace->close)
3154 iter->trace->close(iter);
3156 if (!iter->snapshot)
3157 /* reenable tracing if it was previously enabled */
3158 tracing_start_tr(tr);
3160 __trace_array_put(tr);
3162 mutex_unlock(&trace_types_lock);
3164 mutex_destroy(&iter->mutex);
3165 free_cpumask_var(iter->started);
3167 kfree(iter->buffer_iter);
3168 seq_release_private(inode, file);
3173 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3175 struct trace_array *tr = inode->i_private;
3177 trace_array_put(tr);
3181 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3183 struct trace_array *tr = inode->i_private;
3185 trace_array_put(tr);
3187 return single_release(inode, file);
3190 static int tracing_open(struct inode *inode, struct file *file)
3192 struct trace_array *tr = inode->i_private;
3193 struct trace_iterator *iter;
3196 if (trace_array_get(tr) < 0)
3199 /* If this file was open for write, then erase contents */
3200 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3201 int cpu = tracing_get_cpu(inode);
3203 if (cpu == RING_BUFFER_ALL_CPUS)
3204 tracing_reset_online_cpus(&tr->trace_buffer);
3206 tracing_reset(&tr->trace_buffer, cpu);
3209 if (file->f_mode & FMODE_READ) {
3210 iter = __tracing_open(inode, file, false);
3212 ret = PTR_ERR(iter);
3213 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3214 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3218 trace_array_put(tr);
3224 * Some tracers are not suitable for instance buffers.
3225 * A tracer is always available for the global array (toplevel)
3226 * or if it explicitly states that it is.
3229 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3231 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3234 /* Find the next tracer that this trace array may use */
3235 static struct tracer *
3236 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3238 while (t && !trace_ok_for_array(t, tr))
3245 t_next(struct seq_file *m, void *v, loff_t *pos)
3247 struct trace_array *tr = m->private;
3248 struct tracer *t = v;
3253 t = get_tracer_for_array(tr, t->next);
3258 static void *t_start(struct seq_file *m, loff_t *pos)
3260 struct trace_array *tr = m->private;
3264 mutex_lock(&trace_types_lock);
3266 t = get_tracer_for_array(tr, trace_types);
3267 for (; t && l < *pos; t = t_next(m, t, &l))
3273 static void t_stop(struct seq_file *m, void *p)
3275 mutex_unlock(&trace_types_lock);
3278 static int t_show(struct seq_file *m, void *v)
3280 struct tracer *t = v;
3285 seq_puts(m, t->name);
3294 static const struct seq_operations show_traces_seq_ops = {
3301 static int show_traces_open(struct inode *inode, struct file *file)
3303 struct trace_array *tr = inode->i_private;
3307 if (tracing_disabled)
3310 ret = seq_open(file, &show_traces_seq_ops);
3314 m = file->private_data;
3321 tracing_write_stub(struct file *filp, const char __user *ubuf,
3322 size_t count, loff_t *ppos)
3327 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3331 if (file->f_mode & FMODE_READ)
3332 ret = seq_lseek(file, offset, whence);
3334 file->f_pos = ret = 0;
3339 static const struct file_operations tracing_fops = {
3340 .open = tracing_open,
3342 .write = tracing_write_stub,
3343 .llseek = tracing_lseek,
3344 .release = tracing_release,
3347 static const struct file_operations show_traces_fops = {
3348 .open = show_traces_open,
3350 .release = seq_release,
3351 .llseek = seq_lseek,
3355 * The tracer itself will not take this lock, but still we want
3356 * to provide a consistent cpumask to user-space:
3358 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3361 * Temporary storage for the character representation of the
3362 * CPU bitmask (and one more byte for the newline):
3364 static char mask_str[NR_CPUS + 1];
3367 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3368 size_t count, loff_t *ppos)
3370 struct trace_array *tr = file_inode(filp)->i_private;
3373 mutex_lock(&tracing_cpumask_update_lock);
3375 len = snprintf(mask_str, count, "%*pb\n",
3376 cpumask_pr_args(tr->tracing_cpumask));
3381 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3384 mutex_unlock(&tracing_cpumask_update_lock);
3390 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3391 size_t count, loff_t *ppos)
3393 struct trace_array *tr = file_inode(filp)->i_private;
3394 cpumask_var_t tracing_cpumask_new;
3397 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3400 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3404 mutex_lock(&tracing_cpumask_update_lock);
3406 local_irq_disable();
3407 arch_spin_lock(&tr->max_lock);
3408 for_each_tracing_cpu(cpu) {
3410 * Increase/decrease the disabled counter if we are
3411 * about to flip a bit in the cpumask:
3413 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3414 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3415 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3416 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3418 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3419 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3420 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3421 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3424 arch_spin_unlock(&tr->max_lock);
3427 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3429 mutex_unlock(&tracing_cpumask_update_lock);
3430 free_cpumask_var(tracing_cpumask_new);
3435 free_cpumask_var(tracing_cpumask_new);
3440 static const struct file_operations tracing_cpumask_fops = {
3441 .open = tracing_open_generic_tr,
3442 .read = tracing_cpumask_read,
3443 .write = tracing_cpumask_write,
3444 .release = tracing_release_generic_tr,
3445 .llseek = generic_file_llseek,
3448 static int tracing_trace_options_show(struct seq_file *m, void *v)
3450 struct tracer_opt *trace_opts;
3451 struct trace_array *tr = m->private;
3455 mutex_lock(&trace_types_lock);
3456 tracer_flags = tr->current_trace->flags->val;
3457 trace_opts = tr->current_trace->flags->opts;
3459 for (i = 0; trace_options[i]; i++) {
3460 if (tr->trace_flags & (1 << i))
3461 seq_printf(m, "%s\n", trace_options[i]);
3463 seq_printf(m, "no%s\n", trace_options[i]);
3466 for (i = 0; trace_opts[i].name; i++) {
3467 if (tracer_flags & trace_opts[i].bit)
3468 seq_printf(m, "%s\n", trace_opts[i].name);
3470 seq_printf(m, "no%s\n", trace_opts[i].name);
3472 mutex_unlock(&trace_types_lock);
3477 static int __set_tracer_option(struct trace_array *tr,
3478 struct tracer_flags *tracer_flags,
3479 struct tracer_opt *opts, int neg)
3481 struct tracer *trace = tracer_flags->trace;
3484 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3489 tracer_flags->val &= ~opts->bit;
3491 tracer_flags->val |= opts->bit;
3495 /* Try to assign a tracer specific option */
3496 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3498 struct tracer *trace = tr->current_trace;
3499 struct tracer_flags *tracer_flags = trace->flags;
3500 struct tracer_opt *opts = NULL;
3503 for (i = 0; tracer_flags->opts[i].name; i++) {
3504 opts = &tracer_flags->opts[i];
3506 if (strcmp(cmp, opts->name) == 0)
3507 return __set_tracer_option(tr, trace->flags, opts, neg);
3513 /* Some tracers require overwrite to stay enabled */
3514 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3516 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3522 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3524 /* do nothing if flag is already set */
3525 if (!!(tr->trace_flags & mask) == !!enabled)
3528 /* Give the tracer a chance to approve the change */
3529 if (tr->current_trace->flag_changed)
3530 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3534 tr->trace_flags |= mask;
3536 tr->trace_flags &= ~mask;
3538 if (mask == TRACE_ITER_RECORD_CMD)
3539 trace_event_enable_cmd_record(enabled);
3541 if (mask == TRACE_ITER_EVENT_FORK)
3542 trace_event_follow_fork(tr, enabled);
3544 if (mask == TRACE_ITER_OVERWRITE) {
3545 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3546 #ifdef CONFIG_TRACER_MAX_TRACE
3547 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3551 if (mask == TRACE_ITER_PRINTK) {
3552 trace_printk_start_stop_comm(enabled);
3553 trace_printk_control(enabled);
3559 static int trace_set_options(struct trace_array *tr, char *option)
3565 size_t orig_len = strlen(option);
3567 cmp = strstrip(option);
3569 if (strncmp(cmp, "no", 2) == 0) {
3574 mutex_lock(&trace_types_lock);
3576 for (i = 0; trace_options[i]; i++) {
3577 if (strcmp(cmp, trace_options[i]) == 0) {
3578 ret = set_tracer_flag(tr, 1 << i, !neg);
3583 /* If no option could be set, test the specific tracer options */
3584 if (!trace_options[i])
3585 ret = set_tracer_option(tr, cmp, neg);
3587 mutex_unlock(&trace_types_lock);
3590 * If the first trailing whitespace is replaced with '\0' by strstrip,
3591 * turn it back into a space.
3593 if (orig_len > strlen(option))
3594 option[strlen(option)] = ' ';
3599 static void __init apply_trace_boot_options(void)
3601 char *buf = trace_boot_options_buf;
3605 option = strsep(&buf, ",");
3611 trace_set_options(&global_trace, option);
3613 /* Put back the comma to allow this to be called again */
3620 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3621 size_t cnt, loff_t *ppos)
3623 struct seq_file *m = filp->private_data;
3624 struct trace_array *tr = m->private;
3628 if (cnt >= sizeof(buf))
3631 if (copy_from_user(buf, ubuf, cnt))
3636 ret = trace_set_options(tr, buf);
3645 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3647 struct trace_array *tr = inode->i_private;
3650 if (tracing_disabled)
3653 if (trace_array_get(tr) < 0)
3656 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3658 trace_array_put(tr);
3663 static const struct file_operations tracing_iter_fops = {
3664 .open = tracing_trace_options_open,
3666 .llseek = seq_lseek,
3667 .release = tracing_single_release_tr,
3668 .write = tracing_trace_options_write,
3671 static const char readme_msg[] =
3672 "tracing mini-HOWTO:\n\n"
3673 "# echo 0 > tracing_on : quick way to disable tracing\n"
3674 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3675 " Important files:\n"
3676 " trace\t\t\t- The static contents of the buffer\n"
3677 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3678 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3679 " current_tracer\t- function and latency tracers\n"
3680 " available_tracers\t- list of configured tracers for current_tracer\n"
3681 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3682 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3683 " trace_clock\t\t-change the clock used to order events\n"
3684 " local: Per cpu clock but may not be synced across CPUs\n"
3685 " global: Synced across CPUs but slows tracing down.\n"
3686 " counter: Not a clock, but just an increment\n"
3687 " uptime: Jiffy counter from time of boot\n"
3688 " perf: Same clock that perf events use\n"
3689 #ifdef CONFIG_X86_64
3690 " x86-tsc: TSC cycle counter\n"
3692 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3693 " tracing_cpumask\t- Limit which CPUs to trace\n"
3694 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3695 "\t\t\t Remove sub-buffer with rmdir\n"
3696 " trace_options\t\t- Set format or modify how tracing happens\n"
3697 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3698 "\t\t\t option name\n"
3699 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3700 #ifdef CONFIG_DYNAMIC_FTRACE
3701 "\n available_filter_functions - list of functions that can be filtered on\n"
3702 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3703 "\t\t\t functions\n"
3704 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3705 "\t modules: Can select a group via module\n"
3706 "\t Format: :mod:<module-name>\n"
3707 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3708 "\t triggers: a command to perform when function is hit\n"
3709 "\t Format: <function>:<trigger>[:count]\n"
3710 "\t trigger: traceon, traceoff\n"
3711 "\t\t enable_event:<system>:<event>\n"
3712 "\t\t disable_event:<system>:<event>\n"
3713 #ifdef CONFIG_STACKTRACE
3716 #ifdef CONFIG_TRACER_SNAPSHOT
3721 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3722 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3723 "\t The first one will disable tracing every time do_fault is hit\n"
3724 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3725 "\t The first time do trap is hit and it disables tracing, the\n"
3726 "\t counter will decrement to 2. If tracing is already disabled,\n"
3727 "\t the counter will not decrement. It only decrements when the\n"
3728 "\t trigger did work\n"
3729 "\t To remove trigger without count:\n"
3730 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3731 "\t To remove trigger with a count:\n"
3732 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3733 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3734 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735 "\t modules: Can select a group via module command :mod:\n"
3736 "\t Does not accept triggers\n"
3737 #endif /* CONFIG_DYNAMIC_FTRACE */
3738 #ifdef CONFIG_FUNCTION_TRACER
3739 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3742 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3743 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3744 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3745 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3747 #ifdef CONFIG_TRACER_SNAPSHOT
3748 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3749 "\t\t\t snapshot buffer. Read the contents for more\n"
3750 "\t\t\t information\n"
3752 #ifdef CONFIG_STACK_TRACER
3753 " stack_trace\t\t- Shows the max stack trace when active\n"
3754 " stack_max_size\t- Shows current max stack size that was traced\n"
3755 "\t\t\t Write into this file to reset the max size (trigger a\n"
3756 "\t\t\t new trace)\n"
3757 #ifdef CONFIG_DYNAMIC_FTRACE
3758 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3761 #endif /* CONFIG_STACK_TRACER */
3762 " events/\t\t- Directory containing all trace event subsystems:\n"
3763 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3764 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3765 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3767 " filter\t\t- If set, only events passing filter are traced\n"
3768 " events/<system>/<event>/\t- Directory containing control files for\n"
3770 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3771 " filter\t\t- If set, only events passing filter are traced\n"
3772 " trigger\t\t- If set, a command to perform when event is hit\n"
3773 "\t Format: <trigger>[:count][if <filter>]\n"
3774 "\t trigger: traceon, traceoff\n"
3775 "\t enable_event:<system>:<event>\n"
3776 "\t disable_event:<system>:<event>\n"
3777 #ifdef CONFIG_HIST_TRIGGERS
3778 "\t enable_hist:<system>:<event>\n"
3779 "\t disable_hist:<system>:<event>\n"
3781 #ifdef CONFIG_STACKTRACE
3784 #ifdef CONFIG_TRACER_SNAPSHOT
3787 #ifdef CONFIG_HIST_TRIGGERS
3788 "\t\t hist (see below)\n"
3790 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3791 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3792 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3793 "\t events/block/block_unplug/trigger\n"
3794 "\t The first disables tracing every time block_unplug is hit.\n"
3795 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3796 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3797 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3798 "\t Like function triggers, the counter is only decremented if it\n"
3799 "\t enabled or disabled tracing.\n"
3800 "\t To remove a trigger without a count:\n"
3801 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3802 "\t To remove a trigger with a count:\n"
3803 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3804 "\t Filters can be ignored when removing a trigger.\n"
3805 #ifdef CONFIG_HIST_TRIGGERS
3806 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
3807 "\t Format: hist:keys=<field1[,field2,...]>\n"
3808 "\t [:values=<field1[,field2,...]>]\n"
3809 "\t [:sort=<field1[,field2,...]>]\n"
3810 "\t [:size=#entries]\n"
3811 "\t [:pause][:continue][:clear]\n"
3812 "\t [:name=histname1]\n"
3813 "\t [if <filter>]\n\n"
3814 "\t When a matching event is hit, an entry is added to a hash\n"
3815 "\t table using the key(s) and value(s) named, and the value of a\n"
3816 "\t sum called 'hitcount' is incremented. Keys and values\n"
3817 "\t correspond to fields in the event's format description. Keys\n"
3818 "\t can be any field, or the special string 'stacktrace'.\n"
3819 "\t Compound keys consisting of up to two fields can be specified\n"
3820 "\t by the 'keys' keyword. Values must correspond to numeric\n"
3821 "\t fields. Sort keys consisting of up to two fields can be\n"
3822 "\t specified using the 'sort' keyword. The sort direction can\n"
3823 "\t be modified by appending '.descending' or '.ascending' to a\n"
3824 "\t sort field. The 'size' parameter can be used to specify more\n"
3825 "\t or fewer than the default 2048 entries for the hashtable size.\n"
3826 "\t If a hist trigger is given a name using the 'name' parameter,\n"
3827 "\t its histogram data will be shared with other triggers of the\n"
3828 "\t same name, and trigger hits will update this common data.\n\n"
3829 "\t Reading the 'hist' file for the event will dump the hash\n"
3830 "\t table in its entirety to stdout. If there are multiple hist\n"
3831 "\t triggers attached to an event, there will be a table for each\n"
3832 "\t trigger in the output. The table displayed for a named\n"
3833 "\t trigger will be the same as any other instance having the\n"
3834 "\t same name. The default format used to display a given field\n"
3835 "\t can be modified by appending any of the following modifiers\n"
3836 "\t to the field name, as applicable:\n\n"
3837 "\t .hex display a number as a hex value\n"
3838 "\t .sym display an address as a symbol\n"
3839 "\t .sym-offset display an address as a symbol and offset\n"
3840 "\t .execname display a common_pid as a program name\n"
3841 "\t .syscall display a syscall id as a syscall name\n\n"
3842 "\t .log2 display log2 value rather than raw number\n\n"
3843 "\t The 'pause' parameter can be used to pause an existing hist\n"
3844 "\t trigger or to start a hist trigger but not log any events\n"
3845 "\t until told to do so. 'continue' can be used to start or\n"
3846 "\t restart a paused hist trigger.\n\n"
3847 "\t The 'clear' parameter will clear the contents of a running\n"
3848 "\t hist trigger and leave its current paused/active state\n"
3850 "\t The enable_hist and disable_hist triggers can be used to\n"
3851 "\t have one event conditionally start and stop another event's\n"
3852 "\t already-attached hist trigger. The syntax is analagous to\n"
3853 "\t the enable_event and disable_event triggers.\n"
3858 tracing_readme_read(struct file *filp, char __user *ubuf,
3859 size_t cnt, loff_t *ppos)
3861 return simple_read_from_buffer(ubuf, cnt, ppos,
3862 readme_msg, strlen(readme_msg));
3865 static const struct file_operations tracing_readme_fops = {
3866 .open = tracing_open_generic,
3867 .read = tracing_readme_read,
3868 .llseek = generic_file_llseek,
3871 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3873 unsigned int *ptr = v;
3875 if (*pos || m->count)
3880 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3882 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3891 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3897 arch_spin_lock(&trace_cmdline_lock);
3899 v = &savedcmd->map_cmdline_to_pid[0];
3901 v = saved_cmdlines_next(m, v, &l);
3909 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3911 arch_spin_unlock(&trace_cmdline_lock);
3915 static int saved_cmdlines_show(struct seq_file *m, void *v)
3917 char buf[TASK_COMM_LEN];
3918 unsigned int *pid = v;
3920 __trace_find_cmdline(*pid, buf);
3921 seq_printf(m, "%d %s\n", *pid, buf);
3925 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3926 .start = saved_cmdlines_start,
3927 .next = saved_cmdlines_next,
3928 .stop = saved_cmdlines_stop,
3929 .show = saved_cmdlines_show,
3932 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3934 if (tracing_disabled)
3937 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3940 static const struct file_operations tracing_saved_cmdlines_fops = {
3941 .open = tracing_saved_cmdlines_open,
3943 .llseek = seq_lseek,
3944 .release = seq_release,
3948 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3949 size_t cnt, loff_t *ppos)
3954 arch_spin_lock(&trace_cmdline_lock);
3955 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3956 arch_spin_unlock(&trace_cmdline_lock);
3958 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3961 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3963 kfree(s->saved_cmdlines);
3964 kfree(s->map_cmdline_to_pid);
3968 static int tracing_resize_saved_cmdlines(unsigned int val)
3970 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3972 s = kmalloc(sizeof(*s), GFP_KERNEL);
3976 if (allocate_cmdlines_buffer(val, s) < 0) {
3981 arch_spin_lock(&trace_cmdline_lock);
3982 savedcmd_temp = savedcmd;
3984 arch_spin_unlock(&trace_cmdline_lock);
3985 free_saved_cmdlines_buffer(savedcmd_temp);
3991 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3992 size_t cnt, loff_t *ppos)
3997 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4001 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4002 if (!val || val > PID_MAX_DEFAULT)
4005 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4014 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4015 .open = tracing_open_generic,
4016 .read = tracing_saved_cmdlines_size_read,
4017 .write = tracing_saved_cmdlines_size_write,
4020 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
4021 static union trace_enum_map_item *
4022 update_enum_map(union trace_enum_map_item *ptr)
4024 if (!ptr->map.enum_string) {
4025 if (ptr->tail.next) {
4026 ptr = ptr->tail.next;
4027 /* Set ptr to the next real item (skip head) */
4035 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4037 union trace_enum_map_item *ptr = v;
4040 * Paranoid! If ptr points to end, we don't want to increment past it.
4041 * This really should never happen.
4043 ptr = update_enum_map(ptr);
4044 if (WARN_ON_ONCE(!ptr))
4051 ptr = update_enum_map(ptr);
4056 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4058 union trace_enum_map_item *v;
4061 mutex_lock(&trace_enum_mutex);
4063 v = trace_enum_maps;
4067 while (v && l < *pos) {
4068 v = enum_map_next(m, v, &l);
4074 static void enum_map_stop(struct seq_file *m, void *v)
4076 mutex_unlock(&trace_enum_mutex);
4079 static int enum_map_show(struct seq_file *m, void *v)
4081 union trace_enum_map_item *ptr = v;
4083 seq_printf(m, "%s %ld (%s)\n",
4084 ptr->map.enum_string, ptr->map.enum_value,
4090 static const struct seq_operations tracing_enum_map_seq_ops = {
4091 .start = enum_map_start,
4092 .next = enum_map_next,
4093 .stop = enum_map_stop,
4094 .show = enum_map_show,
4097 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4099 if (tracing_disabled)
4102 return seq_open(filp, &tracing_enum_map_seq_ops);
4105 static const struct file_operations tracing_enum_map_fops = {
4106 .open = tracing_enum_map_open,
4108 .llseek = seq_lseek,
4109 .release = seq_release,
4112 static inline union trace_enum_map_item *
4113 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4115 /* Return tail of array given the head */
4116 return ptr + ptr->head.length + 1;
4120 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4123 struct trace_enum_map **stop;
4124 struct trace_enum_map **map;
4125 union trace_enum_map_item *map_array;
4126 union trace_enum_map_item *ptr;
4131 * The trace_enum_maps contains the map plus a head and tail item,
4132 * where the head holds the module and length of array, and the
4133 * tail holds a pointer to the next list.
4135 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4137 pr_warn("Unable to allocate trace enum mapping\n");
4141 mutex_lock(&trace_enum_mutex);
4143 if (!trace_enum_maps)
4144 trace_enum_maps = map_array;
4146 ptr = trace_enum_maps;
4148 ptr = trace_enum_jmp_to_tail(ptr);
4149 if (!ptr->tail.next)
4151 ptr = ptr->tail.next;
4154 ptr->tail.next = map_array;
4156 map_array->head.mod = mod;
4157 map_array->head.length = len;
4160 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4161 map_array->map = **map;
4164 memset(map_array, 0, sizeof(*map_array));
4166 mutex_unlock(&trace_enum_mutex);
4169 static void trace_create_enum_file(struct dentry *d_tracer)
4171 trace_create_file("enum_map", 0444, d_tracer,
4172 NULL, &tracing_enum_map_fops);
4175 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4176 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4177 static inline void trace_insert_enum_map_file(struct module *mod,
4178 struct trace_enum_map **start, int len) { }
4179 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4181 static void trace_insert_enum_map(struct module *mod,
4182 struct trace_enum_map **start, int len)
4184 struct trace_enum_map **map;
4191 trace_event_enum_update(map, len);
4193 trace_insert_enum_map_file(mod, start, len);
4197 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4198 size_t cnt, loff_t *ppos)
4200 struct trace_array *tr = filp->private_data;
4201 char buf[MAX_TRACER_SIZE+2];
4204 mutex_lock(&trace_types_lock);
4205 r = sprintf(buf, "%s\n", tr->current_trace->name);
4206 mutex_unlock(&trace_types_lock);
4208 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4211 int tracer_init(struct tracer *t, struct trace_array *tr)
4213 tracing_reset_online_cpus(&tr->trace_buffer);
4217 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4221 for_each_tracing_cpu(cpu)
4222 per_cpu_ptr(buf->data, cpu)->entries = val;
4225 #ifdef CONFIG_TRACER_MAX_TRACE
4226 /* resize @tr's buffer to the size of @size_tr's entries */
4227 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4228 struct trace_buffer *size_buf, int cpu_id)
4232 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4233 for_each_tracing_cpu(cpu) {
4234 ret = ring_buffer_resize(trace_buf->buffer,
4235 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4238 per_cpu_ptr(trace_buf->data, cpu)->entries =
4239 per_cpu_ptr(size_buf->data, cpu)->entries;
4242 ret = ring_buffer_resize(trace_buf->buffer,
4243 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4245 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4246 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4251 #endif /* CONFIG_TRACER_MAX_TRACE */
4253 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4254 unsigned long size, int cpu)
4259 * If kernel or user changes the size of the ring buffer
4260 * we use the size that was given, and we can forget about
4261 * expanding it later.
4263 ring_buffer_expanded = true;
4265 /* May be called before buffers are initialized */
4266 if (!tr->trace_buffer.buffer)
4269 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4273 #ifdef CONFIG_TRACER_MAX_TRACE
4274 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4275 !tr->current_trace->use_max_tr)
4278 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4280 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4281 &tr->trace_buffer, cpu);
4284 * AARGH! We are left with different
4285 * size max buffer!!!!
4286 * The max buffer is our "snapshot" buffer.
4287 * When a tracer needs a snapshot (one of the
4288 * latency tracers), it swaps the max buffer
4289 * with the saved snap shot. We succeeded to
4290 * update the size of the main buffer, but failed to
4291 * update the size of the max buffer. But when we tried
4292 * to reset the main buffer to the original size, we
4293 * failed there too. This is very unlikely to
4294 * happen, but if it does, warn and kill all
4298 tracing_disabled = 1;
4303 if (cpu == RING_BUFFER_ALL_CPUS)
4304 set_buffer_entries(&tr->max_buffer, size);
4306 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4309 #endif /* CONFIG_TRACER_MAX_TRACE */
4311 if (cpu == RING_BUFFER_ALL_CPUS)
4312 set_buffer_entries(&tr->trace_buffer, size);
4314 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4319 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4320 unsigned long size, int cpu_id)
4324 mutex_lock(&trace_types_lock);
4326 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4327 /* make sure, this cpu is enabled in the mask */
4328 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4334 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4339 mutex_unlock(&trace_types_lock);
4346 * tracing_update_buffers - used by tracing facility to expand ring buffers
4348 * To save on memory when the tracing is never used on a system with it
4349 * configured in. The ring buffers are set to a minimum size. But once
4350 * a user starts to use the tracing facility, then they need to grow
4351 * to their default size.
4353 * This function is to be called when a tracer is about to be used.
4355 int tracing_update_buffers(void)
4359 mutex_lock(&trace_types_lock);
4360 if (!ring_buffer_expanded)
4361 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4362 RING_BUFFER_ALL_CPUS);
4363 mutex_unlock(&trace_types_lock);
4368 struct trace_option_dentry;
4371 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4374 * Used to clear out the tracer before deletion of an instance.
4375 * Must have trace_types_lock held.
4377 static void tracing_set_nop(struct trace_array *tr)
4379 if (tr->current_trace == &nop_trace)
4382 tr->current_trace->enabled--;
4384 if (tr->current_trace->reset)
4385 tr->current_trace->reset(tr);
4387 tr->current_trace = &nop_trace;
4390 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4392 /* Only enable if the directory has been created already. */
4396 create_trace_option_files(tr, t);
4399 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4402 #ifdef CONFIG_TRACER_MAX_TRACE
4407 mutex_lock(&trace_types_lock);
4409 if (!ring_buffer_expanded) {
4410 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4411 RING_BUFFER_ALL_CPUS);
4417 for (t = trace_types; t; t = t->next) {
4418 if (strcmp(t->name, buf) == 0)
4425 if (t == tr->current_trace)
4428 /* Some tracers are only allowed for the top level buffer */
4429 if (!trace_ok_for_array(t, tr)) {
4434 /* If trace pipe files are being read, we can't change the tracer */
4435 if (tr->current_trace->ref) {
4440 trace_branch_disable();
4442 tr->current_trace->enabled--;
4444 if (tr->current_trace->reset)
4445 tr->current_trace->reset(tr);
4447 /* Current trace needs to be nop_trace before synchronize_sched */
4448 tr->current_trace = &nop_trace;
4450 #ifdef CONFIG_TRACER_MAX_TRACE
4451 had_max_tr = tr->allocated_snapshot;
4453 if (had_max_tr && !t->use_max_tr) {
4455 * We need to make sure that the update_max_tr sees that
4456 * current_trace changed to nop_trace to keep it from
4457 * swapping the buffers after we resize it.
4458 * The update_max_tr is called from interrupts disabled
4459 * so a synchronized_sched() is sufficient.
4461 synchronize_sched();
4466 #ifdef CONFIG_TRACER_MAX_TRACE
4467 if (t->use_max_tr && !had_max_tr) {
4468 ret = alloc_snapshot(tr);
4475 ret = tracer_init(t, tr);
4480 tr->current_trace = t;
4481 tr->current_trace->enabled++;
4482 trace_branch_enable(tr);
4484 mutex_unlock(&trace_types_lock);
4490 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4491 size_t cnt, loff_t *ppos)
4493 struct trace_array *tr = filp->private_data;
4494 char buf[MAX_TRACER_SIZE+1];
4501 if (cnt > MAX_TRACER_SIZE)
4502 cnt = MAX_TRACER_SIZE;
4504 if (copy_from_user(buf, ubuf, cnt))
4509 /* strip ending whitespace. */
4510 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4513 err = tracing_set_tracer(tr, buf);
4523 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4524 size_t cnt, loff_t *ppos)
4529 r = snprintf(buf, sizeof(buf), "%ld\n",
4530 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4531 if (r > sizeof(buf))
4533 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4537 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4538 size_t cnt, loff_t *ppos)
4543 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4553 tracing_thresh_read(struct file *filp, char __user *ubuf,
4554 size_t cnt, loff_t *ppos)
4556 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4560 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4561 size_t cnt, loff_t *ppos)
4563 struct trace_array *tr = filp->private_data;
4566 mutex_lock(&trace_types_lock);
4567 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4571 if (tr->current_trace->update_thresh) {
4572 ret = tr->current_trace->update_thresh(tr);
4579 mutex_unlock(&trace_types_lock);
4584 #ifdef CONFIG_TRACER_MAX_TRACE
4587 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4588 size_t cnt, loff_t *ppos)
4590 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4594 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4595 size_t cnt, loff_t *ppos)
4597 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4602 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4604 struct trace_array *tr = inode->i_private;
4605 struct trace_iterator *iter;
4608 if (tracing_disabled)
4611 if (trace_array_get(tr) < 0)
4614 mutex_lock(&trace_types_lock);
4616 /* create a buffer to store the information to pass to userspace */
4617 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4620 __trace_array_put(tr);
4624 trace_seq_init(&iter->seq);
4625 iter->trace = tr->current_trace;
4627 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4632 /* trace pipe does not show start of buffer */
4633 cpumask_setall(iter->started);
4635 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4636 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4638 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4639 if (trace_clocks[tr->clock_id].in_ns)
4640 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4643 iter->trace_buffer = &tr->trace_buffer;
4644 iter->cpu_file = tracing_get_cpu(inode);
4645 mutex_init(&iter->mutex);
4646 filp->private_data = iter;
4648 if (iter->trace->pipe_open)
4649 iter->trace->pipe_open(iter);
4651 nonseekable_open(inode, filp);
4653 tr->current_trace->ref++;
4655 mutex_unlock(&trace_types_lock);
4661 __trace_array_put(tr);
4662 mutex_unlock(&trace_types_lock);
4666 static int tracing_release_pipe(struct inode *inode, struct file *file)
4668 struct trace_iterator *iter = file->private_data;
4669 struct trace_array *tr = inode->i_private;
4671 mutex_lock(&trace_types_lock);
4673 tr->current_trace->ref--;
4675 if (iter->trace->pipe_close)
4676 iter->trace->pipe_close(iter);
4678 mutex_unlock(&trace_types_lock);
4680 free_cpumask_var(iter->started);
4681 mutex_destroy(&iter->mutex);
4684 trace_array_put(tr);
4690 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4692 struct trace_array *tr = iter->tr;
4694 /* Iterators are static, they should be filled or empty */
4695 if (trace_buffer_iter(iter, iter->cpu_file))
4696 return POLLIN | POLLRDNORM;
4698 if (tr->trace_flags & TRACE_ITER_BLOCK)
4700 * Always select as readable when in blocking mode
4702 return POLLIN | POLLRDNORM;
4704 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4709 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4711 struct trace_iterator *iter = filp->private_data;
4713 return trace_poll(iter, filp, poll_table);
4716 /* Must be called with iter->mutex held. */
4717 static int tracing_wait_pipe(struct file *filp)
4719 struct trace_iterator *iter = filp->private_data;
4722 while (trace_empty(iter)) {
4724 if ((filp->f_flags & O_NONBLOCK)) {
4729 * We block until we read something and tracing is disabled.
4730 * We still block if tracing is disabled, but we have never
4731 * read anything. This allows a user to cat this file, and
4732 * then enable tracing. But after we have read something,
4733 * we give an EOF when tracing is again disabled.
4735 * iter->pos will be 0 if we haven't read anything.
4737 if (!tracing_is_on() && iter->pos)
4740 mutex_unlock(&iter->mutex);
4742 ret = wait_on_pipe(iter, false);
4744 mutex_lock(&iter->mutex);
4757 tracing_read_pipe(struct file *filp, char __user *ubuf,
4758 size_t cnt, loff_t *ppos)
4760 struct trace_iterator *iter = filp->private_data;
4763 /* return any leftover data */
4764 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4768 trace_seq_init(&iter->seq);
4771 * Avoid more than one consumer on a single file descriptor
4772 * This is just a matter of traces coherency, the ring buffer itself
4775 mutex_lock(&iter->mutex);
4776 if (iter->trace->read) {
4777 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4783 sret = tracing_wait_pipe(filp);
4787 /* stop when tracing is finished */
4788 if (trace_empty(iter)) {
4793 if (cnt >= PAGE_SIZE)
4794 cnt = PAGE_SIZE - 1;
4796 /* reset all but tr, trace, and overruns */
4797 memset(&iter->seq, 0,
4798 sizeof(struct trace_iterator) -
4799 offsetof(struct trace_iterator, seq));
4800 cpumask_clear(iter->started);
4803 trace_event_read_lock();
4804 trace_access_lock(iter->cpu_file);
4805 while (trace_find_next_entry_inc(iter) != NULL) {
4806 enum print_line_t ret;
4807 int save_len = iter->seq.seq.len;
4809 ret = print_trace_line(iter);
4810 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4811 /* don't print partial lines */
4812 iter->seq.seq.len = save_len;
4815 if (ret != TRACE_TYPE_NO_CONSUME)
4816 trace_consume(iter);
4818 if (trace_seq_used(&iter->seq) >= cnt)
4822 * Setting the full flag means we reached the trace_seq buffer
4823 * size and we should leave by partial output condition above.
4824 * One of the trace_seq_* functions is not used properly.
4826 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4829 trace_access_unlock(iter->cpu_file);
4830 trace_event_read_unlock();
4832 /* Now copy what we have to the user */
4833 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4834 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4835 trace_seq_init(&iter->seq);
4838 * If there was nothing to send to user, in spite of consuming trace
4839 * entries, go back to wait for more entries.
4845 mutex_unlock(&iter->mutex);
4850 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4853 __free_page(spd->pages[idx]);
4856 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4858 .confirm = generic_pipe_buf_confirm,
4859 .release = generic_pipe_buf_release,
4860 .steal = generic_pipe_buf_steal,
4861 .get = generic_pipe_buf_get,
4865 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4871 /* Seq buffer is page-sized, exactly what we need. */
4873 save_len = iter->seq.seq.len;
4874 ret = print_trace_line(iter);
4876 if (trace_seq_has_overflowed(&iter->seq)) {
4877 iter->seq.seq.len = save_len;
4882 * This should not be hit, because it should only
4883 * be set if the iter->seq overflowed. But check it
4884 * anyway to be safe.
4886 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4887 iter->seq.seq.len = save_len;
4891 count = trace_seq_used(&iter->seq) - save_len;
4894 iter->seq.seq.len = save_len;
4898 if (ret != TRACE_TYPE_NO_CONSUME)
4899 trace_consume(iter);
4901 if (!trace_find_next_entry_inc(iter)) {
4911 static ssize_t tracing_splice_read_pipe(struct file *filp,
4913 struct pipe_inode_info *pipe,
4917 struct page *pages_def[PIPE_DEF_BUFFERS];
4918 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4919 struct trace_iterator *iter = filp->private_data;
4920 struct splice_pipe_desc spd = {
4922 .partial = partial_def,
4923 .nr_pages = 0, /* This gets updated below. */
4924 .nr_pages_max = PIPE_DEF_BUFFERS,
4926 .ops = &tracing_pipe_buf_ops,
4927 .spd_release = tracing_spd_release_pipe,
4933 if (splice_grow_spd(pipe, &spd))
4936 mutex_lock(&iter->mutex);
4938 if (iter->trace->splice_read) {
4939 ret = iter->trace->splice_read(iter, filp,
4940 ppos, pipe, len, flags);
4945 ret = tracing_wait_pipe(filp);
4949 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4954 trace_event_read_lock();
4955 trace_access_lock(iter->cpu_file);
4957 /* Fill as many pages as possible. */
4958 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4959 spd.pages[i] = alloc_page(GFP_KERNEL);
4963 rem = tracing_fill_pipe_page(rem, iter);
4965 /* Copy the data into the page, so we can start over. */
4966 ret = trace_seq_to_buffer(&iter->seq,
4967 page_address(spd.pages[i]),
4968 trace_seq_used(&iter->seq));
4970 __free_page(spd.pages[i]);
4973 spd.partial[i].offset = 0;
4974 spd.partial[i].len = trace_seq_used(&iter->seq);
4976 trace_seq_init(&iter->seq);
4979 trace_access_unlock(iter->cpu_file);
4980 trace_event_read_unlock();
4981 mutex_unlock(&iter->mutex);
4986 ret = splice_to_pipe(pipe, &spd);
4990 splice_shrink_spd(&spd);
4994 mutex_unlock(&iter->mutex);
4999 tracing_entries_read(struct file *filp, char __user *ubuf,
5000 size_t cnt, loff_t *ppos)
5002 struct inode *inode = file_inode(filp);
5003 struct trace_array *tr = inode->i_private;
5004 int cpu = tracing_get_cpu(inode);
5009 mutex_lock(&trace_types_lock);
5011 if (cpu == RING_BUFFER_ALL_CPUS) {
5012 int cpu, buf_size_same;
5017 /* check if all cpu sizes are same */
5018 for_each_tracing_cpu(cpu) {
5019 /* fill in the size from first enabled cpu */
5021 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5022 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5028 if (buf_size_same) {
5029 if (!ring_buffer_expanded)
5030 r = sprintf(buf, "%lu (expanded: %lu)\n",
5032 trace_buf_size >> 10);
5034 r = sprintf(buf, "%lu\n", size >> 10);
5036 r = sprintf(buf, "X\n");
5038 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5040 mutex_unlock(&trace_types_lock);
5042 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5047 tracing_entries_write(struct file *filp, const char __user *ubuf,
5048 size_t cnt, loff_t *ppos)
5050 struct inode *inode = file_inode(filp);
5051 struct trace_array *tr = inode->i_private;
5055 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5059 /* must have at least 1 entry */
5063 /* value is in KB */
5065 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5075 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5076 size_t cnt, loff_t *ppos)
5078 struct trace_array *tr = filp->private_data;
5081 unsigned long size = 0, expanded_size = 0;
5083 mutex_lock(&trace_types_lock);
5084 for_each_tracing_cpu(cpu) {
5085 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5086 if (!ring_buffer_expanded)
5087 expanded_size += trace_buf_size >> 10;
5089 if (ring_buffer_expanded)
5090 r = sprintf(buf, "%lu\n", size);
5092 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5093 mutex_unlock(&trace_types_lock);
5095 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5099 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5100 size_t cnt, loff_t *ppos)
5103 * There is no need to read what the user has written, this function
5104 * is just to make sure that there is no error when "echo" is used
5113 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5115 struct trace_array *tr = inode->i_private;
5117 /* disable tracing ? */
5118 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5119 tracer_tracing_off(tr);
5120 /* resize the ring buffer to 0 */
5121 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5123 trace_array_put(tr);
5129 tracing_mark_write(struct file *filp, const char __user *ubuf,
5130 size_t cnt, loff_t *fpos)
5132 unsigned long addr = (unsigned long)ubuf;
5133 struct trace_array *tr = filp->private_data;
5134 struct ring_buffer_event *event;
5135 struct ring_buffer *buffer;
5136 struct print_entry *entry;
5137 unsigned long irq_flags;
5138 struct page *pages[2];
5148 if (tracing_disabled)
5151 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5154 if (cnt > TRACE_BUF_SIZE)
5155 cnt = TRACE_BUF_SIZE;
5158 * Userspace is injecting traces into the kernel trace buffer.
5159 * We want to be as non intrusive as possible.
5160 * To do so, we do not want to allocate any special buffers
5161 * or take any locks, but instead write the userspace data
5162 * straight into the ring buffer.
5164 * First we need to pin the userspace buffer into memory,
5165 * which, most likely it is, because it just referenced it.
5166 * But there's no guarantee that it is. By using get_user_pages_fast()
5167 * and kmap_atomic/kunmap_atomic() we can get access to the
5168 * pages directly. We then write the data directly into the
5171 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5173 /* check if we cross pages */
5174 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5177 offset = addr & (PAGE_SIZE - 1);
5180 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5181 if (ret < nr_pages) {
5183 put_page(pages[ret]);
5188 for (i = 0; i < nr_pages; i++)
5189 map_page[i] = kmap_atomic(pages[i]);
5191 local_save_flags(irq_flags);
5192 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5193 buffer = tr->trace_buffer.buffer;
5194 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5195 irq_flags, preempt_count());
5197 /* Ring buffer disabled, return as if not open for write */
5202 entry = ring_buffer_event_data(event);
5203 entry->ip = _THIS_IP_;
5205 if (nr_pages == 2) {
5206 len = PAGE_SIZE - offset;
5207 memcpy(&entry->buf, map_page[0] + offset, len);
5208 memcpy(&entry->buf[len], map_page[1], cnt - len);
5210 memcpy(&entry->buf, map_page[0] + offset, cnt);
5212 if (entry->buf[cnt - 1] != '\n') {
5213 entry->buf[cnt] = '\n';
5214 entry->buf[cnt + 1] = '\0';
5216 entry->buf[cnt] = '\0';
5218 __buffer_unlock_commit(buffer, event);
5225 for (i = nr_pages - 1; i >= 0; i--) {
5226 kunmap_atomic(map_page[i]);
5233 static int tracing_clock_show(struct seq_file *m, void *v)
5235 struct trace_array *tr = m->private;
5238 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5240 "%s%s%s%s", i ? " " : "",
5241 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5242 i == tr->clock_id ? "]" : "");
5248 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5252 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5253 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5256 if (i == ARRAY_SIZE(trace_clocks))
5259 mutex_lock(&trace_types_lock);
5263 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5266 * New clock may not be consistent with the previous clock.
5267 * Reset the buffer so that it doesn't have incomparable timestamps.
5269 tracing_reset_online_cpus(&tr->trace_buffer);
5271 #ifdef CONFIG_TRACER_MAX_TRACE
5272 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5273 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5274 tracing_reset_online_cpus(&tr->max_buffer);
5277 mutex_unlock(&trace_types_lock);
5282 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5283 size_t cnt, loff_t *fpos)
5285 struct seq_file *m = filp->private_data;
5286 struct trace_array *tr = m->private;
5288 const char *clockstr;
5291 if (cnt >= sizeof(buf))
5294 if (copy_from_user(buf, ubuf, cnt))
5299 clockstr = strstrip(buf);
5301 ret = tracing_set_clock(tr, clockstr);
5310 static int tracing_clock_open(struct inode *inode, struct file *file)
5312 struct trace_array *tr = inode->i_private;
5315 if (tracing_disabled)
5318 if (trace_array_get(tr))
5321 ret = single_open(file, tracing_clock_show, inode->i_private);
5323 trace_array_put(tr);
5328 struct ftrace_buffer_info {
5329 struct trace_iterator iter;
5334 #ifdef CONFIG_TRACER_SNAPSHOT
5335 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5337 struct trace_array *tr = inode->i_private;
5338 struct trace_iterator *iter;
5342 if (trace_array_get(tr) < 0)
5345 if (file->f_mode & FMODE_READ) {
5346 iter = __tracing_open(inode, file, true);
5348 ret = PTR_ERR(iter);
5350 /* Writes still need the seq_file to hold the private data */
5352 m = kzalloc(sizeof(*m), GFP_KERNEL);
5355 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5363 iter->trace_buffer = &tr->max_buffer;
5364 iter->cpu_file = tracing_get_cpu(inode);
5366 file->private_data = m;
5370 trace_array_put(tr);
5376 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5379 struct seq_file *m = filp->private_data;
5380 struct trace_iterator *iter = m->private;
5381 struct trace_array *tr = iter->tr;
5385 ret = tracing_update_buffers();
5389 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5393 mutex_lock(&trace_types_lock);
5395 if (tr->current_trace->use_max_tr) {
5402 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5406 if (tr->allocated_snapshot)
5410 /* Only allow per-cpu swap if the ring buffer supports it */
5411 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5412 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5417 if (!tr->allocated_snapshot) {
5418 ret = alloc_snapshot(tr);
5422 local_irq_disable();
5423 /* Now, we're going to swap */
5424 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5425 update_max_tr(tr, current, smp_processor_id());
5427 update_max_tr_single(tr, current, iter->cpu_file);
5431 if (tr->allocated_snapshot) {
5432 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5433 tracing_reset_online_cpus(&tr->max_buffer);
5435 tracing_reset(&tr->max_buffer, iter->cpu_file);
5445 mutex_unlock(&trace_types_lock);
5449 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5451 struct seq_file *m = file->private_data;
5454 ret = tracing_release(inode, file);
5456 if (file->f_mode & FMODE_READ)
5459 /* If write only, the seq_file is just a stub */
5467 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5468 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5469 size_t count, loff_t *ppos);
5470 static int tracing_buffers_release(struct inode *inode, struct file *file);
5471 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5472 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5474 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5476 struct ftrace_buffer_info *info;
5479 ret = tracing_buffers_open(inode, filp);
5483 info = filp->private_data;
5485 if (info->iter.trace->use_max_tr) {
5486 tracing_buffers_release(inode, filp);
5490 info->iter.snapshot = true;
5491 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5496 #endif /* CONFIG_TRACER_SNAPSHOT */
5499 static const struct file_operations tracing_thresh_fops = {
5500 .open = tracing_open_generic,
5501 .read = tracing_thresh_read,
5502 .write = tracing_thresh_write,
5503 .llseek = generic_file_llseek,
5506 #ifdef CONFIG_TRACER_MAX_TRACE
5507 static const struct file_operations tracing_max_lat_fops = {
5508 .open = tracing_open_generic,
5509 .read = tracing_max_lat_read,
5510 .write = tracing_max_lat_write,
5511 .llseek = generic_file_llseek,
5515 static const struct file_operations set_tracer_fops = {
5516 .open = tracing_open_generic,
5517 .read = tracing_set_trace_read,
5518 .write = tracing_set_trace_write,
5519 .llseek = generic_file_llseek,
5522 static const struct file_operations tracing_pipe_fops = {
5523 .open = tracing_open_pipe,
5524 .poll = tracing_poll_pipe,
5525 .read = tracing_read_pipe,
5526 .splice_read = tracing_splice_read_pipe,
5527 .release = tracing_release_pipe,
5528 .llseek = no_llseek,
5531 static const struct file_operations tracing_entries_fops = {
5532 .open = tracing_open_generic_tr,
5533 .read = tracing_entries_read,
5534 .write = tracing_entries_write,
5535 .llseek = generic_file_llseek,
5536 .release = tracing_release_generic_tr,
5539 static const struct file_operations tracing_total_entries_fops = {
5540 .open = tracing_open_generic_tr,
5541 .read = tracing_total_entries_read,
5542 .llseek = generic_file_llseek,
5543 .release = tracing_release_generic_tr,
5546 static const struct file_operations tracing_free_buffer_fops = {
5547 .open = tracing_open_generic_tr,
5548 .write = tracing_free_buffer_write,
5549 .release = tracing_free_buffer_release,
5552 static const struct file_operations tracing_mark_fops = {
5553 .open = tracing_open_generic_tr,
5554 .write = tracing_mark_write,
5555 .llseek = generic_file_llseek,
5556 .release = tracing_release_generic_tr,
5559 static const struct file_operations trace_clock_fops = {
5560 .open = tracing_clock_open,
5562 .llseek = seq_lseek,
5563 .release = tracing_single_release_tr,
5564 .write = tracing_clock_write,
5567 #ifdef CONFIG_TRACER_SNAPSHOT
5568 static const struct file_operations snapshot_fops = {
5569 .open = tracing_snapshot_open,
5571 .write = tracing_snapshot_write,
5572 .llseek = tracing_lseek,
5573 .release = tracing_snapshot_release,
5576 static const struct file_operations snapshot_raw_fops = {
5577 .open = snapshot_raw_open,
5578 .read = tracing_buffers_read,
5579 .release = tracing_buffers_release,
5580 .splice_read = tracing_buffers_splice_read,
5581 .llseek = no_llseek,
5584 #endif /* CONFIG_TRACER_SNAPSHOT */
5586 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5588 struct trace_array *tr = inode->i_private;
5589 struct ftrace_buffer_info *info;
5592 if (tracing_disabled)
5595 if (trace_array_get(tr) < 0)
5598 info = kzalloc(sizeof(*info), GFP_KERNEL);
5600 trace_array_put(tr);
5604 mutex_lock(&trace_types_lock);
5607 info->iter.cpu_file = tracing_get_cpu(inode);
5608 info->iter.trace = tr->current_trace;
5609 info->iter.trace_buffer = &tr->trace_buffer;
5611 /* Force reading ring buffer for first read */
5612 info->read = (unsigned int)-1;
5614 filp->private_data = info;
5616 tr->current_trace->ref++;
5618 mutex_unlock(&trace_types_lock);
5620 ret = nonseekable_open(inode, filp);
5622 trace_array_put(tr);
5628 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5630 struct ftrace_buffer_info *info = filp->private_data;
5631 struct trace_iterator *iter = &info->iter;
5633 return trace_poll(iter, filp, poll_table);
5637 tracing_buffers_read(struct file *filp, char __user *ubuf,
5638 size_t count, loff_t *ppos)
5640 struct ftrace_buffer_info *info = filp->private_data;
5641 struct trace_iterator *iter = &info->iter;
5648 #ifdef CONFIG_TRACER_MAX_TRACE
5649 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5654 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5659 /* Do we have previous read data to read? */
5660 if (info->read < PAGE_SIZE)
5664 trace_access_lock(iter->cpu_file);
5665 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5669 trace_access_unlock(iter->cpu_file);
5672 if (trace_empty(iter)) {
5673 if ((filp->f_flags & O_NONBLOCK))
5676 ret = wait_on_pipe(iter, false);
5687 size = PAGE_SIZE - info->read;
5691 ret = copy_to_user(ubuf, info->spare + info->read, size);
5703 static int tracing_buffers_release(struct inode *inode, struct file *file)
5705 struct ftrace_buffer_info *info = file->private_data;
5706 struct trace_iterator *iter = &info->iter;
5708 mutex_lock(&trace_types_lock);
5710 iter->tr->current_trace->ref--;
5712 __trace_array_put(iter->tr);
5715 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5718 mutex_unlock(&trace_types_lock);
5724 struct ring_buffer *buffer;
5729 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5730 struct pipe_buffer *buf)
5732 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5737 ring_buffer_free_read_page(ref->buffer, ref->page);
5742 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5743 struct pipe_buffer *buf)
5745 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5750 /* Pipe buffer operations for a buffer. */
5751 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5753 .confirm = generic_pipe_buf_confirm,
5754 .release = buffer_pipe_buf_release,
5755 .steal = generic_pipe_buf_steal,
5756 .get = buffer_pipe_buf_get,
5760 * Callback from splice_to_pipe(), if we need to release some pages
5761 * at the end of the spd in case we error'ed out in filling the pipe.
5763 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5765 struct buffer_ref *ref =
5766 (struct buffer_ref *)spd->partial[i].private;
5771 ring_buffer_free_read_page(ref->buffer, ref->page);
5773 spd->partial[i].private = 0;
5777 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5778 struct pipe_inode_info *pipe, size_t len,
5781 struct ftrace_buffer_info *info = file->private_data;
5782 struct trace_iterator *iter = &info->iter;
5783 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5784 struct page *pages_def[PIPE_DEF_BUFFERS];
5785 struct splice_pipe_desc spd = {
5787 .partial = partial_def,
5788 .nr_pages_max = PIPE_DEF_BUFFERS,
5790 .ops = &buffer_pipe_buf_ops,
5791 .spd_release = buffer_spd_release,
5793 struct buffer_ref *ref;
5794 int entries, size, i;
5797 #ifdef CONFIG_TRACER_MAX_TRACE
5798 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5802 if (splice_grow_spd(pipe, &spd))
5805 if (*ppos & (PAGE_SIZE - 1))
5808 if (len & (PAGE_SIZE - 1)) {
5809 if (len < PAGE_SIZE)
5815 trace_access_lock(iter->cpu_file);
5816 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5818 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5822 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5829 ref->buffer = iter->trace_buffer->buffer;
5830 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5837 r = ring_buffer_read_page(ref->buffer, &ref->page,
5838 len, iter->cpu_file, 1);
5840 ring_buffer_free_read_page(ref->buffer, ref->page);
5846 * zero out any left over data, this is going to
5849 size = ring_buffer_page_len(ref->page);
5850 if (size < PAGE_SIZE)
5851 memset(ref->page + size, 0, PAGE_SIZE - size);
5853 page = virt_to_page(ref->page);
5855 spd.pages[i] = page;
5856 spd.partial[i].len = PAGE_SIZE;
5857 spd.partial[i].offset = 0;
5858 spd.partial[i].private = (unsigned long)ref;
5862 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5865 trace_access_unlock(iter->cpu_file);
5868 /* did we read anything? */
5869 if (!spd.nr_pages) {
5873 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5876 ret = wait_on_pipe(iter, true);
5883 ret = splice_to_pipe(pipe, &spd);
5884 splice_shrink_spd(&spd);
5889 static const struct file_operations tracing_buffers_fops = {
5890 .open = tracing_buffers_open,
5891 .read = tracing_buffers_read,
5892 .poll = tracing_buffers_poll,
5893 .release = tracing_buffers_release,
5894 .splice_read = tracing_buffers_splice_read,
5895 .llseek = no_llseek,
5899 tracing_stats_read(struct file *filp, char __user *ubuf,
5900 size_t count, loff_t *ppos)
5902 struct inode *inode = file_inode(filp);
5903 struct trace_array *tr = inode->i_private;
5904 struct trace_buffer *trace_buf = &tr->trace_buffer;
5905 int cpu = tracing_get_cpu(inode);
5906 struct trace_seq *s;
5908 unsigned long long t;
5909 unsigned long usec_rem;
5911 s = kmalloc(sizeof(*s), GFP_KERNEL);
5917 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5918 trace_seq_printf(s, "entries: %ld\n", cnt);
5920 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5921 trace_seq_printf(s, "overrun: %ld\n", cnt);
5923 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5924 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5926 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5927 trace_seq_printf(s, "bytes: %ld\n", cnt);
5929 if (trace_clocks[tr->clock_id].in_ns) {
5930 /* local or global for trace_clock */
5931 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5932 usec_rem = do_div(t, USEC_PER_SEC);
5933 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5936 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5937 usec_rem = do_div(t, USEC_PER_SEC);
5938 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5940 /* counter or tsc mode for trace_clock */
5941 trace_seq_printf(s, "oldest event ts: %llu\n",
5942 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5944 trace_seq_printf(s, "now ts: %llu\n",
5945 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5948 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5949 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5951 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5952 trace_seq_printf(s, "read events: %ld\n", cnt);
5954 count = simple_read_from_buffer(ubuf, count, ppos,
5955 s->buffer, trace_seq_used(s));
5962 static const struct file_operations tracing_stats_fops = {
5963 .open = tracing_open_generic_tr,
5964 .read = tracing_stats_read,
5965 .llseek = generic_file_llseek,
5966 .release = tracing_release_generic_tr,
5969 #ifdef CONFIG_DYNAMIC_FTRACE
5971 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5977 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5978 size_t cnt, loff_t *ppos)
5980 static char ftrace_dyn_info_buffer[1024];
5981 static DEFINE_MUTEX(dyn_info_mutex);
5982 unsigned long *p = filp->private_data;
5983 char *buf = ftrace_dyn_info_buffer;
5984 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5987 mutex_lock(&dyn_info_mutex);
5988 r = sprintf(buf, "%ld ", *p);
5990 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5993 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5995 mutex_unlock(&dyn_info_mutex);
6000 static const struct file_operations tracing_dyn_info_fops = {
6001 .open = tracing_open_generic,
6002 .read = tracing_read_dyn_info,
6003 .llseek = generic_file_llseek,
6005 #endif /* CONFIG_DYNAMIC_FTRACE */
6007 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6009 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6015 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6017 unsigned long *count = (long *)data;
6029 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6030 struct ftrace_probe_ops *ops, void *data)
6032 long count = (long)data;
6034 seq_printf(m, "%ps:", (void *)ip);
6036 seq_puts(m, "snapshot");
6039 seq_puts(m, ":unlimited\n");
6041 seq_printf(m, ":count=%ld\n", count);
6046 static struct ftrace_probe_ops snapshot_probe_ops = {
6047 .func = ftrace_snapshot,
6048 .print = ftrace_snapshot_print,
6051 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6052 .func = ftrace_count_snapshot,
6053 .print = ftrace_snapshot_print,
6057 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6058 char *glob, char *cmd, char *param, int enable)
6060 struct ftrace_probe_ops *ops;
6061 void *count = (void *)-1;
6065 /* hash funcs only work with set_ftrace_filter */
6069 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6071 if (glob[0] == '!') {
6072 unregister_ftrace_function_probe_func(glob+1, ops);
6079 number = strsep(¶m, ":");
6081 if (!strlen(number))
6085 * We use the callback data field (which is a pointer)
6088 ret = kstrtoul(number, 0, (unsigned long *)&count);
6093 ret = register_ftrace_function_probe(glob, ops, count);
6096 alloc_snapshot(&global_trace);
6098 return ret < 0 ? ret : 0;
6101 static struct ftrace_func_command ftrace_snapshot_cmd = {
6103 .func = ftrace_trace_snapshot_callback,
6106 static __init int register_snapshot_cmd(void)
6108 return register_ftrace_command(&ftrace_snapshot_cmd);
6111 static inline __init int register_snapshot_cmd(void) { return 0; }
6112 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6114 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6116 if (WARN_ON(!tr->dir))
6117 return ERR_PTR(-ENODEV);
6119 /* Top directory uses NULL as the parent */
6120 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6123 /* All sub buffers have a descriptor */
6127 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6129 struct dentry *d_tracer;
6132 return tr->percpu_dir;
6134 d_tracer = tracing_get_dentry(tr);
6135 if (IS_ERR(d_tracer))
6138 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6140 WARN_ONCE(!tr->percpu_dir,
6141 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6143 return tr->percpu_dir;
6146 static struct dentry *
6147 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6148 void *data, long cpu, const struct file_operations *fops)
6150 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6152 if (ret) /* See tracing_get_cpu() */
6153 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6158 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6160 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6161 struct dentry *d_cpu;
6162 char cpu_dir[30]; /* 30 characters should be more than enough */
6167 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6168 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6170 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6174 /* per cpu trace_pipe */
6175 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6176 tr, cpu, &tracing_pipe_fops);
6179 trace_create_cpu_file("trace", 0644, d_cpu,
6180 tr, cpu, &tracing_fops);
6182 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6183 tr, cpu, &tracing_buffers_fops);
6185 trace_create_cpu_file("stats", 0444, d_cpu,
6186 tr, cpu, &tracing_stats_fops);
6188 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6189 tr, cpu, &tracing_entries_fops);
6191 #ifdef CONFIG_TRACER_SNAPSHOT
6192 trace_create_cpu_file("snapshot", 0644, d_cpu,
6193 tr, cpu, &snapshot_fops);
6195 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6196 tr, cpu, &snapshot_raw_fops);
6200 #ifdef CONFIG_FTRACE_SELFTEST
6201 /* Let selftest have access to static functions in this file */
6202 #include "trace_selftest.c"
6206 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6209 struct trace_option_dentry *topt = filp->private_data;
6212 if (topt->flags->val & topt->opt->bit)
6217 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6221 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6224 struct trace_option_dentry *topt = filp->private_data;
6228 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6232 if (val != 0 && val != 1)
6235 if (!!(topt->flags->val & topt->opt->bit) != val) {
6236 mutex_lock(&trace_types_lock);
6237 ret = __set_tracer_option(topt->tr, topt->flags,
6239 mutex_unlock(&trace_types_lock);
6250 static const struct file_operations trace_options_fops = {
6251 .open = tracing_open_generic,
6252 .read = trace_options_read,
6253 .write = trace_options_write,
6254 .llseek = generic_file_llseek,
6258 * In order to pass in both the trace_array descriptor as well as the index
6259 * to the flag that the trace option file represents, the trace_array
6260 * has a character array of trace_flags_index[], which holds the index
6261 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6262 * The address of this character array is passed to the flag option file
6263 * read/write callbacks.
6265 * In order to extract both the index and the trace_array descriptor,
6266 * get_tr_index() uses the following algorithm.
6270 * As the pointer itself contains the address of the index (remember
6273 * Then to get the trace_array descriptor, by subtracting that index
6274 * from the ptr, we get to the start of the index itself.
6276 * ptr - idx == &index[0]
6278 * Then a simple container_of() from that pointer gets us to the
6279 * trace_array descriptor.
6281 static void get_tr_index(void *data, struct trace_array **ptr,
6282 unsigned int *pindex)
6284 *pindex = *(unsigned char *)data;
6286 *ptr = container_of(data - *pindex, struct trace_array,
6291 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6294 void *tr_index = filp->private_data;
6295 struct trace_array *tr;
6299 get_tr_index(tr_index, &tr, &index);
6301 if (tr->trace_flags & (1 << index))
6306 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6310 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6313 void *tr_index = filp->private_data;
6314 struct trace_array *tr;
6319 get_tr_index(tr_index, &tr, &index);
6321 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6325 if (val != 0 && val != 1)
6328 mutex_lock(&trace_types_lock);
6329 ret = set_tracer_flag(tr, 1 << index, val);
6330 mutex_unlock(&trace_types_lock);
6340 static const struct file_operations trace_options_core_fops = {
6341 .open = tracing_open_generic,
6342 .read = trace_options_core_read,
6343 .write = trace_options_core_write,
6344 .llseek = generic_file_llseek,
6347 struct dentry *trace_create_file(const char *name,
6349 struct dentry *parent,
6351 const struct file_operations *fops)
6355 ret = tracefs_create_file(name, mode, parent, data, fops);
6357 pr_warn("Could not create tracefs '%s' entry\n", name);
6363 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6365 struct dentry *d_tracer;
6370 d_tracer = tracing_get_dentry(tr);
6371 if (IS_ERR(d_tracer))
6374 tr->options = tracefs_create_dir("options", d_tracer);
6376 pr_warn("Could not create tracefs directory 'options'\n");
6384 create_trace_option_file(struct trace_array *tr,
6385 struct trace_option_dentry *topt,
6386 struct tracer_flags *flags,
6387 struct tracer_opt *opt)
6389 struct dentry *t_options;
6391 t_options = trace_options_init_dentry(tr);
6395 topt->flags = flags;
6399 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6400 &trace_options_fops);
6405 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6407 struct trace_option_dentry *topts;
6408 struct trace_options *tr_topts;
6409 struct tracer_flags *flags;
6410 struct tracer_opt *opts;
6417 flags = tracer->flags;
6419 if (!flags || !flags->opts)
6423 * If this is an instance, only create flags for tracers
6424 * the instance may have.
6426 if (!trace_ok_for_array(tracer, tr))
6429 for (i = 0; i < tr->nr_topts; i++) {
6430 /* Make sure there's no duplicate flags. */
6431 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6437 for (cnt = 0; opts[cnt].name; cnt++)
6440 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6444 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6451 tr->topts = tr_topts;
6452 tr->topts[tr->nr_topts].tracer = tracer;
6453 tr->topts[tr->nr_topts].topts = topts;
6456 for (cnt = 0; opts[cnt].name; cnt++) {
6457 create_trace_option_file(tr, &topts[cnt], flags,
6459 WARN_ONCE(topts[cnt].entry == NULL,
6460 "Failed to create trace option: %s",
6465 static struct dentry *
6466 create_trace_option_core_file(struct trace_array *tr,
6467 const char *option, long index)
6469 struct dentry *t_options;
6471 t_options = trace_options_init_dentry(tr);
6475 return trace_create_file(option, 0644, t_options,
6476 (void *)&tr->trace_flags_index[index],
6477 &trace_options_core_fops);
6480 static void create_trace_options_dir(struct trace_array *tr)
6482 struct dentry *t_options;
6483 bool top_level = tr == &global_trace;
6486 t_options = trace_options_init_dentry(tr);
6490 for (i = 0; trace_options[i]; i++) {
6492 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6493 create_trace_option_core_file(tr, trace_options[i], i);
6498 rb_simple_read(struct file *filp, char __user *ubuf,
6499 size_t cnt, loff_t *ppos)
6501 struct trace_array *tr = filp->private_data;
6505 r = tracer_tracing_is_on(tr);
6506 r = sprintf(buf, "%d\n", r);
6508 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6512 rb_simple_write(struct file *filp, const char __user *ubuf,
6513 size_t cnt, loff_t *ppos)
6515 struct trace_array *tr = filp->private_data;
6516 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6520 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6525 mutex_lock(&trace_types_lock);
6527 tracer_tracing_on(tr);
6528 if (tr->current_trace->start)
6529 tr->current_trace->start(tr);
6531 tracer_tracing_off(tr);
6532 if (tr->current_trace->stop)
6533 tr->current_trace->stop(tr);
6535 mutex_unlock(&trace_types_lock);
6543 static const struct file_operations rb_simple_fops = {
6544 .open = tracing_open_generic_tr,
6545 .read = rb_simple_read,
6546 .write = rb_simple_write,
6547 .release = tracing_release_generic_tr,
6548 .llseek = default_llseek,
6551 struct dentry *trace_instance_dir;
6554 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6557 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6559 enum ring_buffer_flags rb_flags;
6561 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6565 buf->buffer = ring_buffer_alloc(size, rb_flags);
6569 buf->data = alloc_percpu(struct trace_array_cpu);
6571 ring_buffer_free(buf->buffer);
6575 /* Allocate the first page for all buffers */
6576 set_buffer_entries(&tr->trace_buffer,
6577 ring_buffer_size(tr->trace_buffer.buffer, 0));
6582 static int allocate_trace_buffers(struct trace_array *tr, int size)
6586 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6590 #ifdef CONFIG_TRACER_MAX_TRACE
6591 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6592 allocate_snapshot ? size : 1);
6594 ring_buffer_free(tr->trace_buffer.buffer);
6595 free_percpu(tr->trace_buffer.data);
6598 tr->allocated_snapshot = allocate_snapshot;
6601 * Only the top level trace array gets its snapshot allocated
6602 * from the kernel command line.
6604 allocate_snapshot = false;
6609 static void free_trace_buffer(struct trace_buffer *buf)
6612 ring_buffer_free(buf->buffer);
6614 free_percpu(buf->data);
6619 static void free_trace_buffers(struct trace_array *tr)
6624 free_trace_buffer(&tr->trace_buffer);
6626 #ifdef CONFIG_TRACER_MAX_TRACE
6627 free_trace_buffer(&tr->max_buffer);
6631 static void init_trace_flags_index(struct trace_array *tr)
6635 /* Used by the trace options files */
6636 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6637 tr->trace_flags_index[i] = i;
6640 static void __update_tracer_options(struct trace_array *tr)
6644 for (t = trace_types; t; t = t->next)
6645 add_tracer_options(tr, t);
6648 static void update_tracer_options(struct trace_array *tr)
6650 mutex_lock(&trace_types_lock);
6651 __update_tracer_options(tr);
6652 mutex_unlock(&trace_types_lock);
6655 static int instance_mkdir(const char *name)
6657 struct trace_array *tr;
6660 mutex_lock(&trace_types_lock);
6663 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6664 if (tr->name && strcmp(tr->name, name) == 0)
6669 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6673 tr->name = kstrdup(name, GFP_KERNEL);
6677 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6680 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
6682 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6684 raw_spin_lock_init(&tr->start_lock);
6686 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6688 tr->current_trace = &nop_trace;
6690 INIT_LIST_HEAD(&tr->systems);
6691 INIT_LIST_HEAD(&tr->events);
6693 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6696 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6700 ret = event_trace_add_tracer(tr->dir, tr);
6702 tracefs_remove_recursive(tr->dir);
6706 init_tracer_tracefs(tr, tr->dir);
6707 init_trace_flags_index(tr);
6708 __update_tracer_options(tr);
6710 list_add(&tr->list, &ftrace_trace_arrays);
6712 mutex_unlock(&trace_types_lock);
6717 free_trace_buffers(tr);
6718 free_cpumask_var(tr->tracing_cpumask);
6723 mutex_unlock(&trace_types_lock);
6729 static int instance_rmdir(const char *name)
6731 struct trace_array *tr;
6736 mutex_lock(&trace_types_lock);
6739 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6740 if (tr->name && strcmp(tr->name, name) == 0) {
6749 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6752 list_del(&tr->list);
6754 /* Disable all the flags that were enabled coming in */
6755 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
6756 if ((1 << i) & ZEROED_TRACE_FLAGS)
6757 set_tracer_flag(tr, 1 << i, 0);
6760 tracing_set_nop(tr);
6761 event_trace_del_tracer(tr);
6762 ftrace_destroy_function_files(tr);
6763 tracefs_remove_recursive(tr->dir);
6764 free_trace_buffers(tr);
6766 for (i = 0; i < tr->nr_topts; i++) {
6767 kfree(tr->topts[i].topts);
6777 mutex_unlock(&trace_types_lock);
6782 static __init void create_trace_instances(struct dentry *d_tracer)
6784 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6787 if (WARN_ON(!trace_instance_dir))
6792 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6796 trace_create_file("available_tracers", 0444, d_tracer,
6797 tr, &show_traces_fops);
6799 trace_create_file("current_tracer", 0644, d_tracer,
6800 tr, &set_tracer_fops);
6802 trace_create_file("tracing_cpumask", 0644, d_tracer,
6803 tr, &tracing_cpumask_fops);
6805 trace_create_file("trace_options", 0644, d_tracer,
6806 tr, &tracing_iter_fops);
6808 trace_create_file("trace", 0644, d_tracer,
6811 trace_create_file("trace_pipe", 0444, d_tracer,
6812 tr, &tracing_pipe_fops);
6814 trace_create_file("buffer_size_kb", 0644, d_tracer,
6815 tr, &tracing_entries_fops);
6817 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6818 tr, &tracing_total_entries_fops);
6820 trace_create_file("free_buffer", 0200, d_tracer,
6821 tr, &tracing_free_buffer_fops);
6823 trace_create_file("trace_marker", 0220, d_tracer,
6824 tr, &tracing_mark_fops);
6826 trace_create_file("trace_clock", 0644, d_tracer, tr,
6829 trace_create_file("tracing_on", 0644, d_tracer,
6830 tr, &rb_simple_fops);
6832 create_trace_options_dir(tr);
6834 #ifdef CONFIG_TRACER_MAX_TRACE
6835 trace_create_file("tracing_max_latency", 0644, d_tracer,
6836 &tr->max_latency, &tracing_max_lat_fops);
6839 if (ftrace_create_function_files(tr, d_tracer))
6840 WARN(1, "Could not allocate function filter files");
6842 #ifdef CONFIG_TRACER_SNAPSHOT
6843 trace_create_file("snapshot", 0644, d_tracer,
6844 tr, &snapshot_fops);
6847 for_each_tracing_cpu(cpu)
6848 tracing_init_tracefs_percpu(tr, cpu);
6852 static struct vfsmount *trace_automount(void *ingore)
6854 struct vfsmount *mnt;
6855 struct file_system_type *type;
6858 * To maintain backward compatibility for tools that mount
6859 * debugfs to get to the tracing facility, tracefs is automatically
6860 * mounted to the debugfs/tracing directory.
6862 type = get_fs_type("tracefs");
6865 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6866 put_filesystem(type);
6875 * tracing_init_dentry - initialize top level trace array
6877 * This is called when creating files or directories in the tracing
6878 * directory. It is called via fs_initcall() by any of the boot up code
6879 * and expects to return the dentry of the top level tracing directory.
6881 struct dentry *tracing_init_dentry(void)
6883 struct trace_array *tr = &global_trace;
6885 /* The top level trace array uses NULL as parent */
6889 if (WARN_ON(!tracefs_initialized()) ||
6890 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6891 WARN_ON(!debugfs_initialized())))
6892 return ERR_PTR(-ENODEV);
6895 * As there may still be users that expect the tracing
6896 * files to exist in debugfs/tracing, we must automount
6897 * the tracefs file system there, so older tools still
6898 * work with the newer kerenl.
6900 tr->dir = debugfs_create_automount("tracing", NULL,
6901 trace_automount, NULL);
6903 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6904 return ERR_PTR(-ENOMEM);
6910 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6911 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6913 static void __init trace_enum_init(void)
6917 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6918 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6921 #ifdef CONFIG_MODULES
6922 static void trace_module_add_enums(struct module *mod)
6924 if (!mod->num_trace_enums)
6928 * Modules with bad taint do not have events created, do
6929 * not bother with enums either.
6931 if (trace_module_has_bad_taint(mod))
6934 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6937 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6938 static void trace_module_remove_enums(struct module *mod)
6940 union trace_enum_map_item *map;
6941 union trace_enum_map_item **last = &trace_enum_maps;
6943 if (!mod->num_trace_enums)
6946 mutex_lock(&trace_enum_mutex);
6948 map = trace_enum_maps;
6951 if (map->head.mod == mod)
6953 map = trace_enum_jmp_to_tail(map);
6954 last = &map->tail.next;
6955 map = map->tail.next;
6960 *last = trace_enum_jmp_to_tail(map)->tail.next;
6963 mutex_unlock(&trace_enum_mutex);
6966 static inline void trace_module_remove_enums(struct module *mod) { }
6967 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6969 static int trace_module_notify(struct notifier_block *self,
6970 unsigned long val, void *data)
6972 struct module *mod = data;
6975 case MODULE_STATE_COMING:
6976 trace_module_add_enums(mod);
6978 case MODULE_STATE_GOING:
6979 trace_module_remove_enums(mod);
6986 static struct notifier_block trace_module_nb = {
6987 .notifier_call = trace_module_notify,
6990 #endif /* CONFIG_MODULES */
6992 static __init int tracer_init_tracefs(void)
6994 struct dentry *d_tracer;
6996 trace_access_lock_init();
6998 d_tracer = tracing_init_dentry();
6999 if (IS_ERR(d_tracer))
7002 init_tracer_tracefs(&global_trace, d_tracer);
7004 trace_create_file("tracing_thresh", 0644, d_tracer,
7005 &global_trace, &tracing_thresh_fops);
7007 trace_create_file("README", 0444, d_tracer,
7008 NULL, &tracing_readme_fops);
7010 trace_create_file("saved_cmdlines", 0444, d_tracer,
7011 NULL, &tracing_saved_cmdlines_fops);
7013 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7014 NULL, &tracing_saved_cmdlines_size_fops);
7018 trace_create_enum_file(d_tracer);
7020 #ifdef CONFIG_MODULES
7021 register_module_notifier(&trace_module_nb);
7024 #ifdef CONFIG_DYNAMIC_FTRACE
7025 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7026 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7029 create_trace_instances(d_tracer);
7031 update_tracer_options(&global_trace);
7036 static int trace_panic_handler(struct notifier_block *this,
7037 unsigned long event, void *unused)
7039 if (ftrace_dump_on_oops)
7040 ftrace_dump(ftrace_dump_on_oops);
7044 static struct notifier_block trace_panic_notifier = {
7045 .notifier_call = trace_panic_handler,
7047 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7050 static int trace_die_handler(struct notifier_block *self,
7056 if (ftrace_dump_on_oops)
7057 ftrace_dump(ftrace_dump_on_oops);
7065 static struct notifier_block trace_die_notifier = {
7066 .notifier_call = trace_die_handler,
7071 * printk is set to max of 1024, we really don't need it that big.
7072 * Nothing should be printing 1000 characters anyway.
7074 #define TRACE_MAX_PRINT 1000
7077 * Define here KERN_TRACE so that we have one place to modify
7078 * it if we decide to change what log level the ftrace dump
7081 #define KERN_TRACE KERN_EMERG
7084 trace_printk_seq(struct trace_seq *s)
7086 /* Probably should print a warning here. */
7087 if (s->seq.len >= TRACE_MAX_PRINT)
7088 s->seq.len = TRACE_MAX_PRINT;
7091 * More paranoid code. Although the buffer size is set to
7092 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7093 * an extra layer of protection.
7095 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7096 s->seq.len = s->seq.size - 1;
7098 /* should be zero ended, but we are paranoid. */
7099 s->buffer[s->seq.len] = 0;
7101 printk(KERN_TRACE "%s", s->buffer);
7106 void trace_init_global_iter(struct trace_iterator *iter)
7108 iter->tr = &global_trace;
7109 iter->trace = iter->tr->current_trace;
7110 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7111 iter->trace_buffer = &global_trace.trace_buffer;
7113 if (iter->trace && iter->trace->open)
7114 iter->trace->open(iter);
7116 /* Annotate start of buffers if we had overruns */
7117 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7118 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7120 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7121 if (trace_clocks[iter->tr->clock_id].in_ns)
7122 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7125 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7127 /* use static because iter can be a bit big for the stack */
7128 static struct trace_iterator iter;
7129 static atomic_t dump_running;
7130 struct trace_array *tr = &global_trace;
7131 unsigned int old_userobj;
7132 unsigned long flags;
7135 /* Only allow one dump user at a time. */
7136 if (atomic_inc_return(&dump_running) != 1) {
7137 atomic_dec(&dump_running);
7142 * Always turn off tracing when we dump.
7143 * We don't need to show trace output of what happens
7144 * between multiple crashes.
7146 * If the user does a sysrq-z, then they can re-enable
7147 * tracing with echo 1 > tracing_on.
7151 local_irq_save(flags);
7153 /* Simulate the iterator */
7154 trace_init_global_iter(&iter);
7156 for_each_tracing_cpu(cpu) {
7157 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7160 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7162 /* don't look at user memory in panic mode */
7163 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7165 switch (oops_dump_mode) {
7167 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7170 iter.cpu_file = raw_smp_processor_id();
7175 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7176 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7179 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7181 /* Did function tracer already get disabled? */
7182 if (ftrace_is_dead()) {
7183 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7184 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7188 * We need to stop all tracing on all CPUS to read the
7189 * the next buffer. This is a bit expensive, but is
7190 * not done often. We fill all what we can read,
7191 * and then release the locks again.
7194 while (!trace_empty(&iter)) {
7197 printk(KERN_TRACE "---------------------------------\n");
7201 /* reset all but tr, trace, and overruns */
7202 memset(&iter.seq, 0,
7203 sizeof(struct trace_iterator) -
7204 offsetof(struct trace_iterator, seq));
7205 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7208 if (trace_find_next_entry_inc(&iter) != NULL) {
7211 ret = print_trace_line(&iter);
7212 if (ret != TRACE_TYPE_NO_CONSUME)
7213 trace_consume(&iter);
7215 touch_nmi_watchdog();
7217 trace_printk_seq(&iter.seq);
7221 printk(KERN_TRACE " (ftrace buffer empty)\n");
7223 printk(KERN_TRACE "---------------------------------\n");
7226 tr->trace_flags |= old_userobj;
7228 for_each_tracing_cpu(cpu) {
7229 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7231 atomic_dec(&dump_running);
7232 local_irq_restore(flags);
7234 EXPORT_SYMBOL_GPL(ftrace_dump);
7236 __init static int tracer_alloc_buffers(void)
7242 * Make sure we don't accidently add more trace options
7243 * than we have bits for.
7245 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7247 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7250 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7251 goto out_free_buffer_mask;
7253 /* Only allocate trace_printk buffers if a trace_printk exists */
7254 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7255 /* Must be called before global_trace.buffer is allocated */
7256 trace_printk_init_buffers();
7258 /* To save memory, keep the ring buffer size to its minimum */
7259 if (ring_buffer_expanded)
7260 ring_buf_size = trace_buf_size;
7264 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7265 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7267 raw_spin_lock_init(&global_trace.start_lock);
7269 /* Used for event triggers */
7270 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7272 goto out_free_cpumask;
7274 if (trace_create_savedcmd() < 0)
7275 goto out_free_temp_buffer;
7277 /* TODO: make the number of buffers hot pluggable with CPUS */
7278 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7279 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7281 goto out_free_savedcmd;
7284 if (global_trace.buffer_disabled)
7287 if (trace_boot_clock) {
7288 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7290 pr_warn("Trace clock %s not defined, going back to default\n",
7295 * register_tracer() might reference current_trace, so it
7296 * needs to be set before we register anything. This is
7297 * just a bootstrap of current_trace anyway.
7299 global_trace.current_trace = &nop_trace;
7301 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7303 ftrace_init_global_array_ops(&global_trace);
7305 init_trace_flags_index(&global_trace);
7307 register_tracer(&nop_trace);
7309 /* All seems OK, enable tracing */
7310 tracing_disabled = 0;
7312 atomic_notifier_chain_register(&panic_notifier_list,
7313 &trace_panic_notifier);
7315 register_die_notifier(&trace_die_notifier);
7317 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7319 INIT_LIST_HEAD(&global_trace.systems);
7320 INIT_LIST_HEAD(&global_trace.events);
7321 list_add(&global_trace.list, &ftrace_trace_arrays);
7323 apply_trace_boot_options();
7325 register_snapshot_cmd();
7330 free_saved_cmdlines_buffer(savedcmd);
7331 out_free_temp_buffer:
7332 ring_buffer_free(temp_buffer);
7334 free_cpumask_var(global_trace.tracing_cpumask);
7335 out_free_buffer_mask:
7336 free_cpumask_var(tracing_buffer_mask);
7341 void __init trace_init(void)
7343 if (tracepoint_printk) {
7344 tracepoint_print_iter =
7345 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7346 if (WARN_ON(!tracepoint_print_iter))
7347 tracepoint_printk = 0;
7349 tracer_alloc_buffers();
7353 __init static int clear_boot_tracer(void)
7356 * The default tracer at boot buffer is an init section.
7357 * This function is called in lateinit. If we did not
7358 * find the boot tracer, then clear it out, to prevent
7359 * later registration from accessing the buffer that is
7360 * about to be freed.
7362 if (!default_bootup_tracer)
7365 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7366 default_bootup_tracer);
7367 default_bootup_tracer = NULL;
7372 fs_initcall(tracer_init_tracefs);
7373 late_initcall(clear_boot_tracer);