2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
78 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
88 static DEFINE_PER_CPU(bool, trace_cmdline_save);
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
96 static int tracing_disabled = 1;
98 cpumask_var_t __read_mostly tracing_buffer_mask;
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
116 enum ftrace_dump_mode ftrace_dump_on_oops;
118 /* When set, tracing will stop when a WARN*() is hit */
119 int __disable_trace_on_warning;
121 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
122 /* Map of enums to their values, for "enum_map" file */
123 struct trace_enum_map_head {
125 unsigned long length;
128 union trace_enum_map_item;
130 struct trace_enum_map_tail {
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
139 static DEFINE_MUTEX(trace_enum_mutex);
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
148 union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
154 static union trace_enum_map_item *trace_enum_maps;
155 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
157 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
159 #define MAX_TRACER_SIZE 100
160 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
161 static char *default_bootup_tracer;
163 static bool allocate_snapshot;
165 static int __init set_cmdline_ftrace(char *str)
167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
168 default_bootup_tracer = bootup_tracer_buf;
169 /* We are using ftrace early, expand it */
170 ring_buffer_expanded = true;
173 __setup("ftrace=", set_cmdline_ftrace);
175 static int __init set_ftrace_dump_on_oops(char *str)
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
189 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
191 static int __init stop_trace_on_warning(char *str)
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
197 __setup("traceoff_on_warning", stop_trace_on_warning);
199 static int __init boot_alloc_snapshot(char *str)
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
206 __setup("alloc_snapshot", boot_alloc_snapshot);
209 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
211 static int __init set_trace_boot_options(char *str)
213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
216 __setup("trace_options=", set_trace_boot_options);
218 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219 static char *trace_boot_clock __initdata;
221 static int __init set_trace_boot_clock(char *str)
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
227 __setup("trace_clock=", set_trace_boot_clock);
229 static int __init set_tracepoint_printk(char *str)
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
235 __setup("tp_printk", set_tracepoint_printk);
237 unsigned long long ns2usecs(cycle_t nsec)
244 /* trace_flags holds trace_options default values */
245 #define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
252 /* trace_options that are only supported by global_trace */
253 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
256 /* trace_flags that are default zero for instances */
257 #define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
272 static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
276 LIST_HEAD(ftrace_trace_arrays);
278 int trace_array_get(struct trace_array *this_tr)
280 struct trace_array *tr;
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
291 mutex_unlock(&trace_types_lock);
296 static void __trace_array_put(struct trace_array *this_tr)
298 WARN_ON(!this_tr->ref);
302 void trace_array_put(struct trace_array *this_tr)
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
309 int call_filter_check_discard(struct trace_event_call *call, void *rec,
310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
315 ring_buffer_discard_commit(buffer, event);
322 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
326 /* Early boot up does not have a buffer yet */
328 return trace_clock_local();
330 ts = ring_buffer_time_stamp(buf->buffer, cpu);
331 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
336 cycle_t ftrace_now(int cpu)
338 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
342 * tracing_is_enabled - Show if global_trace has been disabled
344 * Shows if the global trace has been enabled or not. It uses the
345 * mirror flag "buffer_disabled" to be used in fast paths such as for
346 * the irqsoff tracer. But it may be inaccurate due to races. If you
347 * need to know the accurate state, use tracing_is_on() which is a little
348 * slower, but accurate.
350 int tracing_is_enabled(void)
353 * For quick access (irqsoff uses this in fast path), just
354 * return the mirror variable of the state of the ring buffer.
355 * It's a little racy, but we don't really care.
358 return !global_trace.buffer_disabled;
362 * trace_buf_size is the size in bytes that is allocated
363 * for a buffer. Note, the number of bytes is always rounded
366 * This number is purposely set to a low number of 16384.
367 * If the dump on oops happens, it will be much appreciated
368 * to not have to wait for all that output. Anyway this can be
369 * boot time and run time configurable.
371 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
373 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
375 /* trace_types holds a link list of available tracers. */
376 static struct tracer *trace_types __read_mostly;
379 * trace_types_lock is used to protect the trace_types list.
381 DEFINE_MUTEX(trace_types_lock);
384 * serialize the access of the ring buffer
386 * ring buffer serializes readers, but it is low level protection.
387 * The validity of the events (which returns by ring_buffer_peek() ..etc)
388 * are not protected by ring buffer.
390 * The content of events may become garbage if we allow other process consumes
391 * these events concurrently:
392 * A) the page of the consumed events may become a normal page
393 * (not reader page) in ring buffer, and this page will be rewrited
394 * by events producer.
395 * B) The page of the consumed events may become a page for splice_read,
396 * and this page will be returned to system.
398 * These primitives allow multi process access to different cpu ring buffer
401 * These primitives don't distinguish read-only and read-consume access.
402 * Multi read-only access are also serialized.
406 static DECLARE_RWSEM(all_cpu_access_lock);
407 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
409 static inline void trace_access_lock(int cpu)
411 if (cpu == RING_BUFFER_ALL_CPUS) {
412 /* gain it for accessing the whole ring buffer. */
413 down_write(&all_cpu_access_lock);
415 /* gain it for accessing a cpu ring buffer. */
417 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
418 down_read(&all_cpu_access_lock);
420 /* Secondly block other access to this @cpu ring buffer. */
421 mutex_lock(&per_cpu(cpu_access_lock, cpu));
425 static inline void trace_access_unlock(int cpu)
427 if (cpu == RING_BUFFER_ALL_CPUS) {
428 up_write(&all_cpu_access_lock);
430 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
431 up_read(&all_cpu_access_lock);
435 static inline void trace_access_lock_init(void)
439 for_each_possible_cpu(cpu)
440 mutex_init(&per_cpu(cpu_access_lock, cpu));
445 static DEFINE_MUTEX(access_lock);
447 static inline void trace_access_lock(int cpu)
450 mutex_lock(&access_lock);
453 static inline void trace_access_unlock(int cpu)
456 mutex_unlock(&access_lock);
459 static inline void trace_access_lock_init(void)
465 #ifdef CONFIG_STACKTRACE
466 static void __ftrace_trace_stack(struct ring_buffer *buffer,
468 int skip, int pc, struct pt_regs *regs);
469 static inline void ftrace_trace_stack(struct trace_array *tr,
470 struct ring_buffer *buffer,
472 int skip, int pc, struct pt_regs *regs);
475 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
477 int skip, int pc, struct pt_regs *regs)
480 static inline void ftrace_trace_stack(struct trace_array *tr,
481 struct ring_buffer *buffer,
483 int skip, int pc, struct pt_regs *regs)
489 static void tracer_tracing_on(struct trace_array *tr)
491 if (tr->trace_buffer.buffer)
492 ring_buffer_record_on(tr->trace_buffer.buffer);
494 * This flag is looked at when buffers haven't been allocated
495 * yet, or by some tracers (like irqsoff), that just want to
496 * know if the ring buffer has been disabled, but it can handle
497 * races of where it gets disabled but we still do a record.
498 * As the check is in the fast path of the tracers, it is more
499 * important to be fast than accurate.
501 tr->buffer_disabled = 0;
502 /* Make the flag seen by readers */
507 * tracing_on - enable tracing buffers
509 * This function enables tracing buffers that may have been
510 * disabled with tracing_off.
512 void tracing_on(void)
514 tracer_tracing_on(&global_trace);
516 EXPORT_SYMBOL_GPL(tracing_on);
519 * __trace_puts - write a constant string into the trace buffer.
520 * @ip: The address of the caller
521 * @str: The constant string to write
522 * @size: The size of the string.
524 int __trace_puts(unsigned long ip, const char *str, int size)
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct print_entry *entry;
529 unsigned long irq_flags;
533 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
536 pc = preempt_count();
538 if (unlikely(tracing_selftest_running || tracing_disabled))
541 alloc = sizeof(*entry) + size + 2; /* possible \n added */
543 local_save_flags(irq_flags);
544 buffer = global_trace.trace_buffer.buffer;
545 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
550 entry = ring_buffer_event_data(event);
553 memcpy(&entry->buf, str, size);
555 /* Add a newline if necessary */
556 if (entry->buf[size - 1] != '\n') {
557 entry->buf[size] = '\n';
558 entry->buf[size + 1] = '\0';
560 entry->buf[size] = '\0';
562 __buffer_unlock_commit(buffer, event);
563 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
567 EXPORT_SYMBOL_GPL(__trace_puts);
570 * __trace_bputs - write the pointer to a constant string into trace buffer
571 * @ip: The address of the caller
572 * @str: The constant string to write to the buffer to
574 int __trace_bputs(unsigned long ip, const char *str)
576 struct ring_buffer_event *event;
577 struct ring_buffer *buffer;
578 struct bputs_entry *entry;
579 unsigned long irq_flags;
580 int size = sizeof(struct bputs_entry);
583 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
586 pc = preempt_count();
588 if (unlikely(tracing_selftest_running || tracing_disabled))
591 local_save_flags(irq_flags);
592 buffer = global_trace.trace_buffer.buffer;
593 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
598 entry = ring_buffer_event_data(event);
602 __buffer_unlock_commit(buffer, event);
603 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
607 EXPORT_SYMBOL_GPL(__trace_bputs);
609 #ifdef CONFIG_TRACER_SNAPSHOT
611 * trace_snapshot - take a snapshot of the current buffer.
613 * This causes a swap between the snapshot buffer and the current live
614 * tracing buffer. You can use this to take snapshots of the live
615 * trace when some condition is triggered, but continue to trace.
617 * Note, make sure to allocate the snapshot with either
618 * a tracing_snapshot_alloc(), or by doing it manually
619 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
621 * If the snapshot buffer is not allocated, it will stop tracing.
622 * Basically making a permanent snapshot.
624 void tracing_snapshot(void)
626 struct trace_array *tr = &global_trace;
627 struct tracer *tracer = tr->current_trace;
631 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
632 internal_trace_puts("*** snapshot is being ignored ***\n");
636 if (!tr->allocated_snapshot) {
637 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
638 internal_trace_puts("*** stopping trace here! ***\n");
643 /* Note, snapshot can not be used when the tracer uses it */
644 if (tracer->use_max_tr) {
645 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
646 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
650 local_irq_save(flags);
651 update_max_tr(tr, current, smp_processor_id());
652 local_irq_restore(flags);
654 EXPORT_SYMBOL_GPL(tracing_snapshot);
656 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
657 struct trace_buffer *size_buf, int cpu_id);
658 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
660 static int alloc_snapshot(struct trace_array *tr)
664 if (!tr->allocated_snapshot) {
666 /* allocate spare buffer */
667 ret = resize_buffer_duplicate_size(&tr->max_buffer,
668 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
672 tr->allocated_snapshot = true;
678 static void free_snapshot(struct trace_array *tr)
681 * We don't free the ring buffer. instead, resize it because
682 * The max_tr ring buffer has some state (e.g. ring->clock) and
683 * we want preserve it.
685 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
686 set_buffer_entries(&tr->max_buffer, 1);
687 tracing_reset_online_cpus(&tr->max_buffer);
688 tr->allocated_snapshot = false;
692 * tracing_alloc_snapshot - allocate snapshot buffer.
694 * This only allocates the snapshot buffer if it isn't already
695 * allocated - it doesn't also take a snapshot.
697 * This is meant to be used in cases where the snapshot buffer needs
698 * to be set up for events that can't sleep but need to be able to
699 * trigger a snapshot.
701 int tracing_alloc_snapshot(void)
703 struct trace_array *tr = &global_trace;
706 ret = alloc_snapshot(tr);
711 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
714 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
716 * This is similar to trace_snapshot(), but it will allocate the
717 * snapshot buffer if it isn't already allocated. Use this only
718 * where it is safe to sleep, as the allocation may sleep.
720 * This causes a swap between the snapshot buffer and the current live
721 * tracing buffer. You can use this to take snapshots of the live
722 * trace when some condition is triggered, but continue to trace.
724 void tracing_snapshot_alloc(void)
728 ret = tracing_alloc_snapshot();
734 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
736 void tracing_snapshot(void)
738 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
740 EXPORT_SYMBOL_GPL(tracing_snapshot);
741 int tracing_alloc_snapshot(void)
743 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
746 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
747 void tracing_snapshot_alloc(void)
752 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
753 #endif /* CONFIG_TRACER_SNAPSHOT */
755 static void tracer_tracing_off(struct trace_array *tr)
757 if (tr->trace_buffer.buffer)
758 ring_buffer_record_off(tr->trace_buffer.buffer);
760 * This flag is looked at when buffers haven't been allocated
761 * yet, or by some tracers (like irqsoff), that just want to
762 * know if the ring buffer has been disabled, but it can handle
763 * races of where it gets disabled but we still do a record.
764 * As the check is in the fast path of the tracers, it is more
765 * important to be fast than accurate.
767 tr->buffer_disabled = 1;
768 /* Make the flag seen by readers */
773 * tracing_off - turn off tracing buffers
775 * This function stops the tracing buffers from recording data.
776 * It does not disable any overhead the tracers themselves may
777 * be causing. This function simply causes all recording to
778 * the ring buffers to fail.
780 void tracing_off(void)
782 tracer_tracing_off(&global_trace);
784 EXPORT_SYMBOL_GPL(tracing_off);
786 void disable_trace_on_warning(void)
788 if (__disable_trace_on_warning)
793 * tracer_tracing_is_on - show real state of ring buffer enabled
794 * @tr : the trace array to know if ring buffer is enabled
796 * Shows real state of the ring buffer if it is enabled or not.
798 static int tracer_tracing_is_on(struct trace_array *tr)
800 if (tr->trace_buffer.buffer)
801 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
802 return !tr->buffer_disabled;
806 * tracing_is_on - show state of ring buffers enabled
808 int tracing_is_on(void)
810 return tracer_tracing_is_on(&global_trace);
812 EXPORT_SYMBOL_GPL(tracing_is_on);
814 static int __init set_buf_size(char *str)
816 unsigned long buf_size;
820 buf_size = memparse(str, &str);
821 /* nr_entries can not be zero */
824 trace_buf_size = buf_size;
827 __setup("trace_buf_size=", set_buf_size);
829 static int __init set_tracing_thresh(char *str)
831 unsigned long threshold;
836 ret = kstrtoul(str, 0, &threshold);
839 tracing_thresh = threshold * 1000;
842 __setup("tracing_thresh=", set_tracing_thresh);
844 unsigned long nsecs_to_usecs(unsigned long nsecs)
850 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
851 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
852 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
853 * of strings in the order that the enums were defined.
858 /* These must match the bit postions in trace_iterator_flags */
859 static const char *trace_options[] = {
867 int in_ns; /* is this clock in nanoseconds? */
869 { trace_clock_local, "local", 1 },
870 { trace_clock_global, "global", 1 },
871 { trace_clock_counter, "counter", 0 },
872 { trace_clock_jiffies, "uptime", 0 },
873 { trace_clock, "perf", 1 },
874 { ktime_get_mono_fast_ns, "mono", 1 },
875 { ktime_get_raw_fast_ns, "mono_raw", 1 },
880 * trace_parser_get_init - gets the buffer for trace parser
882 int trace_parser_get_init(struct trace_parser *parser, int size)
884 memset(parser, 0, sizeof(*parser));
886 parser->buffer = kmalloc(size, GFP_KERNEL);
895 * trace_parser_put - frees the buffer for trace parser
897 void trace_parser_put(struct trace_parser *parser)
899 kfree(parser->buffer);
903 * trace_get_user - reads the user input string separated by space
904 * (matched by isspace(ch))
906 * For each string found the 'struct trace_parser' is updated,
907 * and the function returns.
909 * Returns number of bytes read.
911 * See kernel/trace/trace.h for 'struct trace_parser' details.
913 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
914 size_t cnt, loff_t *ppos)
921 trace_parser_clear(parser);
923 ret = get_user(ch, ubuf++);
931 * The parser is not finished with the last write,
932 * continue reading the user input without skipping spaces.
935 /* skip white space */
936 while (cnt && isspace(ch)) {
937 ret = get_user(ch, ubuf++);
944 /* only spaces were written */
954 /* read the non-space input */
955 while (cnt && !isspace(ch)) {
956 if (parser->idx < parser->size - 1)
957 parser->buffer[parser->idx++] = ch;
962 ret = get_user(ch, ubuf++);
969 /* We either got finished input or we have to wait for another call. */
971 parser->buffer[parser->idx] = 0;
972 parser->cont = false;
973 } else if (parser->idx < parser->size - 1) {
975 parser->buffer[parser->idx++] = ch;
988 /* TODO add a seq_buf_to_buffer() */
989 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
993 if (trace_seq_used(s) <= s->seq.readpos)
996 len = trace_seq_used(s) - s->seq.readpos;
999 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1001 s->seq.readpos += cnt;
1005 unsigned long __read_mostly tracing_thresh;
1007 #ifdef CONFIG_TRACER_MAX_TRACE
1009 * Copy the new maximum trace into the separate maximum-trace
1010 * structure. (this way the maximum trace is permanently saved,
1011 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1014 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1016 struct trace_buffer *trace_buf = &tr->trace_buffer;
1017 struct trace_buffer *max_buf = &tr->max_buffer;
1018 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1019 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1022 max_buf->time_start = data->preempt_timestamp;
1024 max_data->saved_latency = tr->max_latency;
1025 max_data->critical_start = data->critical_start;
1026 max_data->critical_end = data->critical_end;
1028 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1029 max_data->pid = tsk->pid;
1031 * If tsk == current, then use current_uid(), as that does not use
1032 * RCU. The irq tracer can be called out of RCU scope.
1035 max_data->uid = current_uid();
1037 max_data->uid = task_uid(tsk);
1039 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1040 max_data->policy = tsk->policy;
1041 max_data->rt_priority = tsk->rt_priority;
1043 /* record this tasks comm */
1044 tracing_record_cmdline(tsk);
1048 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1050 * @tsk: the task with the latency
1051 * @cpu: The cpu that initiated the trace.
1053 * Flip the buffers between the @tr and the max_tr and record information
1054 * about which task was the cause of this latency.
1057 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059 struct ring_buffer *buf;
1064 WARN_ON_ONCE(!irqs_disabled());
1066 if (!tr->allocated_snapshot) {
1067 /* Only the nop tracer should hit this when disabling */
1068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1072 arch_spin_lock(&tr->max_lock);
1074 buf = tr->trace_buffer.buffer;
1075 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1076 tr->max_buffer.buffer = buf;
1078 __update_max_tr(tr, tsk, cpu);
1079 arch_spin_unlock(&tr->max_lock);
1083 * update_max_tr_single - only copy one trace over, and reset the rest
1085 * @tsk - task with the latency
1086 * @cpu - the cpu of the buffer to copy.
1088 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1091 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1098 WARN_ON_ONCE(!irqs_disabled());
1099 if (!tr->allocated_snapshot) {
1100 /* Only the nop tracer should hit this when disabling */
1101 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1105 arch_spin_lock(&tr->max_lock);
1107 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1109 if (ret == -EBUSY) {
1111 * We failed to swap the buffer due to a commit taking
1112 * place on this CPU. We fail to record, but we reset
1113 * the max trace buffer (no one writes directly to it)
1114 * and flag that it failed.
1116 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1117 "Failed to swap buffers due to commit in progress\n");
1120 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1122 __update_max_tr(tr, tsk, cpu);
1123 arch_spin_unlock(&tr->max_lock);
1125 #endif /* CONFIG_TRACER_MAX_TRACE */
1127 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1129 /* Iterators are static, they should be filled or empty */
1130 if (trace_buffer_iter(iter, iter->cpu_file))
1133 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1137 #ifdef CONFIG_FTRACE_STARTUP_TEST
1138 static int run_tracer_selftest(struct tracer *type)
1140 struct trace_array *tr = &global_trace;
1141 struct tracer *saved_tracer = tr->current_trace;
1144 if (!type->selftest || tracing_selftest_disabled)
1148 * Run a selftest on this tracer.
1149 * Here we reset the trace buffer, and set the current
1150 * tracer to be this tracer. The tracer can then run some
1151 * internal tracing to verify that everything is in order.
1152 * If we fail, we do not register this tracer.
1154 tracing_reset_online_cpus(&tr->trace_buffer);
1156 tr->current_trace = type;
1158 #ifdef CONFIG_TRACER_MAX_TRACE
1159 if (type->use_max_tr) {
1160 /* If we expanded the buffers, make sure the max is expanded too */
1161 if (ring_buffer_expanded)
1162 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1163 RING_BUFFER_ALL_CPUS);
1164 tr->allocated_snapshot = true;
1168 /* the test is responsible for initializing and enabling */
1169 pr_info("Testing tracer %s: ", type->name);
1170 ret = type->selftest(type, tr);
1171 /* the test is responsible for resetting too */
1172 tr->current_trace = saved_tracer;
1174 printk(KERN_CONT "FAILED!\n");
1175 /* Add the warning after printing 'FAILED' */
1179 /* Only reset on passing, to avoid touching corrupted buffers */
1180 tracing_reset_online_cpus(&tr->trace_buffer);
1182 #ifdef CONFIG_TRACER_MAX_TRACE
1183 if (type->use_max_tr) {
1184 tr->allocated_snapshot = false;
1186 /* Shrink the max buffer again */
1187 if (ring_buffer_expanded)
1188 ring_buffer_resize(tr->max_buffer.buffer, 1,
1189 RING_BUFFER_ALL_CPUS);
1193 printk(KERN_CONT "PASSED\n");
1197 static inline int run_tracer_selftest(struct tracer *type)
1201 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1203 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1205 static void __init apply_trace_boot_options(void);
1208 * register_tracer - register a tracer with the ftrace system.
1209 * @type - the plugin for the tracer
1211 * Register a new plugin tracer.
1213 int __init register_tracer(struct tracer *type)
1219 pr_info("Tracer must have a name\n");
1223 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1224 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1228 mutex_lock(&trace_types_lock);
1230 tracing_selftest_running = true;
1232 for (t = trace_types; t; t = t->next) {
1233 if (strcmp(type->name, t->name) == 0) {
1235 pr_info("Tracer %s already registered\n",
1242 if (!type->set_flag)
1243 type->set_flag = &dummy_set_flag;
1245 /*allocate a dummy tracer_flags*/
1246 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1251 type->flags->val = 0;
1252 type->flags->opts = dummy_tracer_opt;
1254 if (!type->flags->opts)
1255 type->flags->opts = dummy_tracer_opt;
1257 /* store the tracer for __set_tracer_option */
1258 type->flags->trace = type;
1260 ret = run_tracer_selftest(type);
1264 type->next = trace_types;
1266 add_tracer_options(&global_trace, type);
1269 tracing_selftest_running = false;
1270 mutex_unlock(&trace_types_lock);
1272 if (ret || !default_bootup_tracer)
1275 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1278 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1279 /* Do we want this tracer to start on bootup? */
1280 tracing_set_tracer(&global_trace, type->name);
1281 default_bootup_tracer = NULL;
1283 apply_trace_boot_options();
1285 /* disable other selftests, since this will break it. */
1286 tracing_selftest_disabled = true;
1287 #ifdef CONFIG_FTRACE_STARTUP_TEST
1288 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1296 void tracing_reset(struct trace_buffer *buf, int cpu)
1298 struct ring_buffer *buffer = buf->buffer;
1303 ring_buffer_record_disable(buffer);
1305 /* Make sure all commits have finished */
1306 synchronize_sched();
1307 ring_buffer_reset_cpu(buffer, cpu);
1309 ring_buffer_record_enable(buffer);
1312 void tracing_reset_online_cpus(struct trace_buffer *buf)
1314 struct ring_buffer *buffer = buf->buffer;
1320 ring_buffer_record_disable(buffer);
1322 /* Make sure all commits have finished */
1323 synchronize_sched();
1325 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1327 for_each_online_cpu(cpu)
1328 ring_buffer_reset_cpu(buffer, cpu);
1330 ring_buffer_record_enable(buffer);
1333 /* Must have trace_types_lock held */
1334 void tracing_reset_all_online_cpus(void)
1336 struct trace_array *tr;
1338 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1339 tracing_reset_online_cpus(&tr->trace_buffer);
1340 #ifdef CONFIG_TRACER_MAX_TRACE
1341 tracing_reset_online_cpus(&tr->max_buffer);
1346 #define SAVED_CMDLINES_DEFAULT 128
1347 #define NO_CMDLINE_MAP UINT_MAX
1348 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1349 struct saved_cmdlines_buffer {
1350 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1351 unsigned *map_cmdline_to_pid;
1352 unsigned cmdline_num;
1354 char *saved_cmdlines;
1356 static struct saved_cmdlines_buffer *savedcmd;
1358 /* temporary disable recording */
1359 static atomic_t trace_record_cmdline_disabled __read_mostly;
1361 static inline char *get_saved_cmdlines(int idx)
1363 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1366 static inline void set_cmdline(int idx, const char *cmdline)
1368 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1371 static int allocate_cmdlines_buffer(unsigned int val,
1372 struct saved_cmdlines_buffer *s)
1374 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1376 if (!s->map_cmdline_to_pid)
1379 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1380 if (!s->saved_cmdlines) {
1381 kfree(s->map_cmdline_to_pid);
1386 s->cmdline_num = val;
1387 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1388 sizeof(s->map_pid_to_cmdline));
1389 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1390 val * sizeof(*s->map_cmdline_to_pid));
1395 static int trace_create_savedcmd(void)
1399 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1403 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1413 int is_tracing_stopped(void)
1415 return global_trace.stop_count;
1419 * tracing_start - quick start of the tracer
1421 * If tracing is enabled but was stopped by tracing_stop,
1422 * this will start the tracer back up.
1424 void tracing_start(void)
1426 struct ring_buffer *buffer;
1427 unsigned long flags;
1429 if (tracing_disabled)
1432 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1433 if (--global_trace.stop_count) {
1434 if (global_trace.stop_count < 0) {
1435 /* Someone screwed up their debugging */
1437 global_trace.stop_count = 0;
1442 /* Prevent the buffers from switching */
1443 arch_spin_lock(&global_trace.max_lock);
1445 buffer = global_trace.trace_buffer.buffer;
1447 ring_buffer_record_enable(buffer);
1449 #ifdef CONFIG_TRACER_MAX_TRACE
1450 buffer = global_trace.max_buffer.buffer;
1452 ring_buffer_record_enable(buffer);
1455 arch_spin_unlock(&global_trace.max_lock);
1458 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1461 static void tracing_start_tr(struct trace_array *tr)
1463 struct ring_buffer *buffer;
1464 unsigned long flags;
1466 if (tracing_disabled)
1469 /* If global, we need to also start the max tracer */
1470 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1471 return tracing_start();
1473 raw_spin_lock_irqsave(&tr->start_lock, flags);
1475 if (--tr->stop_count) {
1476 if (tr->stop_count < 0) {
1477 /* Someone screwed up their debugging */
1484 buffer = tr->trace_buffer.buffer;
1486 ring_buffer_record_enable(buffer);
1489 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1493 * tracing_stop - quick stop of the tracer
1495 * Light weight way to stop tracing. Use in conjunction with
1498 void tracing_stop(void)
1500 struct ring_buffer *buffer;
1501 unsigned long flags;
1503 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1504 if (global_trace.stop_count++)
1507 /* Prevent the buffers from switching */
1508 arch_spin_lock(&global_trace.max_lock);
1510 buffer = global_trace.trace_buffer.buffer;
1512 ring_buffer_record_disable(buffer);
1514 #ifdef CONFIG_TRACER_MAX_TRACE
1515 buffer = global_trace.max_buffer.buffer;
1517 ring_buffer_record_disable(buffer);
1520 arch_spin_unlock(&global_trace.max_lock);
1523 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1526 static void tracing_stop_tr(struct trace_array *tr)
1528 struct ring_buffer *buffer;
1529 unsigned long flags;
1531 /* If global, we need to also stop the max tracer */
1532 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1533 return tracing_stop();
1535 raw_spin_lock_irqsave(&tr->start_lock, flags);
1536 if (tr->stop_count++)
1539 buffer = tr->trace_buffer.buffer;
1541 ring_buffer_record_disable(buffer);
1544 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1547 void trace_stop_cmdline_recording(void);
1549 static int trace_save_cmdline(struct task_struct *tsk)
1553 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1557 * It's not the end of the world if we don't get
1558 * the lock, but we also don't want to spin
1559 * nor do we want to disable interrupts,
1560 * so if we miss here, then better luck next time.
1562 if (!arch_spin_trylock(&trace_cmdline_lock))
1565 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1566 if (idx == NO_CMDLINE_MAP) {
1567 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1570 * Check whether the cmdline buffer at idx has a pid
1571 * mapped. We are going to overwrite that entry so we
1572 * need to clear the map_pid_to_cmdline. Otherwise we
1573 * would read the new comm for the old pid.
1575 pid = savedcmd->map_cmdline_to_pid[idx];
1576 if (pid != NO_CMDLINE_MAP)
1577 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1579 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1580 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1582 savedcmd->cmdline_idx = idx;
1585 set_cmdline(idx, tsk->comm);
1587 arch_spin_unlock(&trace_cmdline_lock);
1592 static void __trace_find_cmdline(int pid, char comm[])
1597 strcpy(comm, "<idle>");
1601 if (WARN_ON_ONCE(pid < 0)) {
1602 strcpy(comm, "<XXX>");
1606 if (pid > PID_MAX_DEFAULT) {
1607 strcpy(comm, "<...>");
1611 map = savedcmd->map_pid_to_cmdline[pid];
1612 if (map != NO_CMDLINE_MAP)
1613 strcpy(comm, get_saved_cmdlines(map));
1615 strcpy(comm, "<...>");
1618 void trace_find_cmdline(int pid, char comm[])
1621 arch_spin_lock(&trace_cmdline_lock);
1623 __trace_find_cmdline(pid, comm);
1625 arch_spin_unlock(&trace_cmdline_lock);
1629 void tracing_record_cmdline(struct task_struct *tsk)
1631 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1634 if (!__this_cpu_read(trace_cmdline_save))
1637 if (trace_save_cmdline(tsk))
1638 __this_cpu_write(trace_cmdline_save, false);
1642 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1645 struct task_struct *tsk = current;
1647 entry->preempt_count = pc & 0xff;
1648 entry->pid = (tsk) ? tsk->pid : 0;
1650 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1651 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1653 TRACE_FLAG_IRQS_NOSUPPORT |
1655 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1656 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1657 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1658 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1659 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1661 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1663 struct ring_buffer_event *
1664 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1667 unsigned long flags, int pc)
1669 struct ring_buffer_event *event;
1671 event = ring_buffer_lock_reserve(buffer, len);
1672 if (event != NULL) {
1673 struct trace_entry *ent = ring_buffer_event_data(event);
1675 tracing_generic_entry_update(ent, flags, pc);
1683 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1685 __this_cpu_write(trace_cmdline_save, true);
1686 ring_buffer_unlock_commit(buffer, event);
1689 void trace_buffer_unlock_commit(struct trace_array *tr,
1690 struct ring_buffer *buffer,
1691 struct ring_buffer_event *event,
1692 unsigned long flags, int pc)
1694 __buffer_unlock_commit(buffer, event);
1696 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1697 ftrace_trace_userstack(buffer, flags, pc);
1699 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1701 static struct ring_buffer *temp_buffer;
1703 struct ring_buffer_event *
1704 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1705 struct trace_event_file *trace_file,
1706 int type, unsigned long len,
1707 unsigned long flags, int pc)
1709 struct ring_buffer_event *entry;
1711 *current_rb = trace_file->tr->trace_buffer.buffer;
1712 entry = trace_buffer_lock_reserve(*current_rb,
1713 type, len, flags, pc);
1715 * If tracing is off, but we have triggers enabled
1716 * we still need to look at the event data. Use the temp_buffer
1717 * to store the trace event for the tigger to use. It's recusive
1718 * safe and will not be recorded anywhere.
1720 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1721 *current_rb = temp_buffer;
1722 entry = trace_buffer_lock_reserve(*current_rb,
1723 type, len, flags, pc);
1727 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1729 struct ring_buffer_event *
1730 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1731 int type, unsigned long len,
1732 unsigned long flags, int pc)
1734 *current_rb = global_trace.trace_buffer.buffer;
1735 return trace_buffer_lock_reserve(*current_rb,
1736 type, len, flags, pc);
1738 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1740 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1741 struct ring_buffer *buffer,
1742 struct ring_buffer_event *event,
1743 unsigned long flags, int pc,
1744 struct pt_regs *regs)
1746 __buffer_unlock_commit(buffer, event);
1748 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1749 ftrace_trace_userstack(buffer, flags, pc);
1751 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1753 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1754 struct ring_buffer_event *event)
1756 ring_buffer_discard_commit(buffer, event);
1758 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1761 trace_function(struct trace_array *tr,
1762 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1765 struct trace_event_call *call = &event_function;
1766 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1767 struct ring_buffer_event *event;
1768 struct ftrace_entry *entry;
1770 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1774 entry = ring_buffer_event_data(event);
1776 entry->parent_ip = parent_ip;
1778 if (!call_filter_check_discard(call, entry, buffer, event))
1779 __buffer_unlock_commit(buffer, event);
1782 #ifdef CONFIG_STACKTRACE
1784 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1785 struct ftrace_stack {
1786 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1789 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1790 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1792 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1793 unsigned long flags,
1794 int skip, int pc, struct pt_regs *regs)
1796 struct trace_event_call *call = &event_kernel_stack;
1797 struct ring_buffer_event *event;
1798 struct stack_entry *entry;
1799 struct stack_trace trace;
1801 int size = FTRACE_STACK_ENTRIES;
1803 trace.nr_entries = 0;
1807 * Since events can happen in NMIs there's no safe way to
1808 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1809 * or NMI comes in, it will just have to use the default
1810 * FTRACE_STACK_SIZE.
1812 preempt_disable_notrace();
1814 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1816 * We don't need any atomic variables, just a barrier.
1817 * If an interrupt comes in, we don't care, because it would
1818 * have exited and put the counter back to what we want.
1819 * We just need a barrier to keep gcc from moving things
1823 if (use_stack == 1) {
1824 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1825 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1828 save_stack_trace_regs(regs, &trace);
1830 save_stack_trace(&trace);
1832 if (trace.nr_entries > size)
1833 size = trace.nr_entries;
1835 /* From now on, use_stack is a boolean */
1838 size *= sizeof(unsigned long);
1840 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1841 sizeof(*entry) + size, flags, pc);
1844 entry = ring_buffer_event_data(event);
1846 memset(&entry->caller, 0, size);
1849 memcpy(&entry->caller, trace.entries,
1850 trace.nr_entries * sizeof(unsigned long));
1852 trace.max_entries = FTRACE_STACK_ENTRIES;
1853 trace.entries = entry->caller;
1855 save_stack_trace_regs(regs, &trace);
1857 save_stack_trace(&trace);
1860 entry->size = trace.nr_entries;
1862 if (!call_filter_check_discard(call, entry, buffer, event))
1863 __buffer_unlock_commit(buffer, event);
1866 /* Again, don't let gcc optimize things here */
1868 __this_cpu_dec(ftrace_stack_reserve);
1869 preempt_enable_notrace();
1873 static inline void ftrace_trace_stack(struct trace_array *tr,
1874 struct ring_buffer *buffer,
1875 unsigned long flags,
1876 int skip, int pc, struct pt_regs *regs)
1878 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1881 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1884 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1887 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1891 * trace_dump_stack - record a stack back trace in the trace buffer
1892 * @skip: Number of functions to skip (helper handlers)
1894 void trace_dump_stack(int skip)
1896 unsigned long flags;
1898 if (tracing_disabled || tracing_selftest_running)
1901 local_save_flags(flags);
1904 * Skip 3 more, seems to get us at the caller of
1908 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1909 flags, skip, preempt_count(), NULL);
1912 static DEFINE_PER_CPU(int, user_stack_count);
1915 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1917 struct trace_event_call *call = &event_user_stack;
1918 struct ring_buffer_event *event;
1919 struct userstack_entry *entry;
1920 struct stack_trace trace;
1922 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1926 * NMIs can not handle page faults, even with fix ups.
1927 * The save user stack can (and often does) fault.
1929 if (unlikely(in_nmi()))
1933 * prevent recursion, since the user stack tracing may
1934 * trigger other kernel events.
1937 if (__this_cpu_read(user_stack_count))
1940 __this_cpu_inc(user_stack_count);
1942 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1943 sizeof(*entry), flags, pc);
1945 goto out_drop_count;
1946 entry = ring_buffer_event_data(event);
1948 entry->tgid = current->tgid;
1949 memset(&entry->caller, 0, sizeof(entry->caller));
1951 trace.nr_entries = 0;
1952 trace.max_entries = FTRACE_STACK_ENTRIES;
1954 trace.entries = entry->caller;
1956 save_stack_trace_user(&trace);
1957 if (!call_filter_check_discard(call, entry, buffer, event))
1958 __buffer_unlock_commit(buffer, event);
1961 __this_cpu_dec(user_stack_count);
1967 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1969 ftrace_trace_userstack(tr, flags, preempt_count());
1973 #endif /* CONFIG_STACKTRACE */
1975 /* created for use with alloc_percpu */
1976 struct trace_buffer_struct {
1977 char buffer[TRACE_BUF_SIZE];
1980 static struct trace_buffer_struct *trace_percpu_buffer;
1981 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1982 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1983 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1986 * The buffer used is dependent on the context. There is a per cpu
1987 * buffer for normal context, softirq contex, hard irq context and
1988 * for NMI context. Thise allows for lockless recording.
1990 * Note, if the buffers failed to be allocated, then this returns NULL
1992 static char *get_trace_buf(void)
1994 struct trace_buffer_struct *percpu_buffer;
1997 * If we have allocated per cpu buffers, then we do not
1998 * need to do any locking.
2001 percpu_buffer = trace_percpu_nmi_buffer;
2003 percpu_buffer = trace_percpu_irq_buffer;
2004 else if (in_softirq())
2005 percpu_buffer = trace_percpu_sirq_buffer;
2007 percpu_buffer = trace_percpu_buffer;
2012 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2015 static int alloc_percpu_trace_buffer(void)
2017 struct trace_buffer_struct *buffers;
2018 struct trace_buffer_struct *sirq_buffers;
2019 struct trace_buffer_struct *irq_buffers;
2020 struct trace_buffer_struct *nmi_buffers;
2022 buffers = alloc_percpu(struct trace_buffer_struct);
2026 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2030 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2034 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2038 trace_percpu_buffer = buffers;
2039 trace_percpu_sirq_buffer = sirq_buffers;
2040 trace_percpu_irq_buffer = irq_buffers;
2041 trace_percpu_nmi_buffer = nmi_buffers;
2046 free_percpu(irq_buffers);
2048 free_percpu(sirq_buffers);
2050 free_percpu(buffers);
2052 WARN(1, "Could not allocate percpu trace_printk buffer");
2056 static int buffers_allocated;
2058 void trace_printk_init_buffers(void)
2060 if (buffers_allocated)
2063 if (alloc_percpu_trace_buffer())
2066 /* trace_printk() is for debug use only. Don't use it in production. */
2069 pr_warn("**********************************************************\n");
2070 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2072 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2074 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2075 pr_warn("** unsafe for production use. **\n");
2077 pr_warn("** If you see this message and you are not debugging **\n");
2078 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2080 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2081 pr_warn("**********************************************************\n");
2083 /* Expand the buffers to set size */
2084 tracing_update_buffers();
2086 buffers_allocated = 1;
2089 * trace_printk_init_buffers() can be called by modules.
2090 * If that happens, then we need to start cmdline recording
2091 * directly here. If the global_trace.buffer is already
2092 * allocated here, then this was called by module code.
2094 if (global_trace.trace_buffer.buffer)
2095 tracing_start_cmdline_record();
2098 void trace_printk_start_comm(void)
2100 /* Start tracing comms if trace printk is set */
2101 if (!buffers_allocated)
2103 tracing_start_cmdline_record();
2106 static void trace_printk_start_stop_comm(int enabled)
2108 if (!buffers_allocated)
2112 tracing_start_cmdline_record();
2114 tracing_stop_cmdline_record();
2118 * trace_vbprintk - write binary msg to tracing buffer
2121 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2123 struct trace_event_call *call = &event_bprint;
2124 struct ring_buffer_event *event;
2125 struct ring_buffer *buffer;
2126 struct trace_array *tr = &global_trace;
2127 struct bprint_entry *entry;
2128 unsigned long flags;
2130 int len = 0, size, pc;
2132 if (unlikely(tracing_selftest_running || tracing_disabled))
2135 /* Don't pollute graph traces with trace_vprintk internals */
2136 pause_graph_tracing();
2138 pc = preempt_count();
2139 preempt_disable_notrace();
2141 tbuffer = get_trace_buf();
2147 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2149 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2152 local_save_flags(flags);
2153 size = sizeof(*entry) + sizeof(u32) * len;
2154 buffer = tr->trace_buffer.buffer;
2155 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2159 entry = ring_buffer_event_data(event);
2163 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2164 if (!call_filter_check_discard(call, entry, buffer, event)) {
2165 __buffer_unlock_commit(buffer, event);
2166 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2170 preempt_enable_notrace();
2171 unpause_graph_tracing();
2175 EXPORT_SYMBOL_GPL(trace_vbprintk);
2178 __trace_array_vprintk(struct ring_buffer *buffer,
2179 unsigned long ip, const char *fmt, va_list args)
2181 struct trace_event_call *call = &event_print;
2182 struct ring_buffer_event *event;
2183 int len = 0, size, pc;
2184 struct print_entry *entry;
2185 unsigned long flags;
2188 if (tracing_disabled || tracing_selftest_running)
2191 /* Don't pollute graph traces with trace_vprintk internals */
2192 pause_graph_tracing();
2194 pc = preempt_count();
2195 preempt_disable_notrace();
2198 tbuffer = get_trace_buf();
2204 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2206 local_save_flags(flags);
2207 size = sizeof(*entry) + len + 1;
2208 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2212 entry = ring_buffer_event_data(event);
2215 memcpy(&entry->buf, tbuffer, len + 1);
2216 if (!call_filter_check_discard(call, entry, buffer, event)) {
2217 __buffer_unlock_commit(buffer, event);
2218 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2221 preempt_enable_notrace();
2222 unpause_graph_tracing();
2227 int trace_array_vprintk(struct trace_array *tr,
2228 unsigned long ip, const char *fmt, va_list args)
2230 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2233 int trace_array_printk(struct trace_array *tr,
2234 unsigned long ip, const char *fmt, ...)
2239 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2243 ret = trace_array_vprintk(tr, ip, fmt, ap);
2248 int trace_array_printk_buf(struct ring_buffer *buffer,
2249 unsigned long ip, const char *fmt, ...)
2254 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2258 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2263 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2265 return trace_array_vprintk(&global_trace, ip, fmt, args);
2267 EXPORT_SYMBOL_GPL(trace_vprintk);
2269 static void trace_iterator_increment(struct trace_iterator *iter)
2271 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2275 ring_buffer_read(buf_iter, NULL);
2278 static struct trace_entry *
2279 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2280 unsigned long *lost_events)
2282 struct ring_buffer_event *event;
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2286 event = ring_buffer_iter_peek(buf_iter, ts);
2288 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2292 iter->ent_size = ring_buffer_event_length(event);
2293 return ring_buffer_event_data(event);
2299 static struct trace_entry *
2300 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2301 unsigned long *missing_events, u64 *ent_ts)
2303 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2304 struct trace_entry *ent, *next = NULL;
2305 unsigned long lost_events = 0, next_lost = 0;
2306 int cpu_file = iter->cpu_file;
2307 u64 next_ts = 0, ts;
2313 * If we are in a per_cpu trace file, don't bother by iterating over
2314 * all cpu and peek directly.
2316 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2317 if (ring_buffer_empty_cpu(buffer, cpu_file))
2319 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2321 *ent_cpu = cpu_file;
2326 for_each_tracing_cpu(cpu) {
2328 if (ring_buffer_empty_cpu(buffer, cpu))
2331 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2334 * Pick the entry with the smallest timestamp:
2336 if (ent && (!next || ts < next_ts)) {
2340 next_lost = lost_events;
2341 next_size = iter->ent_size;
2345 iter->ent_size = next_size;
2348 *ent_cpu = next_cpu;
2354 *missing_events = next_lost;
2359 /* Find the next real entry, without updating the iterator itself */
2360 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2361 int *ent_cpu, u64 *ent_ts)
2363 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2366 /* Find the next real entry, and increment the iterator to the next entry */
2367 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2369 iter->ent = __find_next_entry(iter, &iter->cpu,
2370 &iter->lost_events, &iter->ts);
2373 trace_iterator_increment(iter);
2375 return iter->ent ? iter : NULL;
2378 static void trace_consume(struct trace_iterator *iter)
2380 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2381 &iter->lost_events);
2384 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2386 struct trace_iterator *iter = m->private;
2390 WARN_ON_ONCE(iter->leftover);
2394 /* can't go backwards */
2399 ent = trace_find_next_entry_inc(iter);
2403 while (ent && iter->idx < i)
2404 ent = trace_find_next_entry_inc(iter);
2411 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2413 struct ring_buffer_event *event;
2414 struct ring_buffer_iter *buf_iter;
2415 unsigned long entries = 0;
2418 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2420 buf_iter = trace_buffer_iter(iter, cpu);
2424 ring_buffer_iter_reset(buf_iter);
2427 * We could have the case with the max latency tracers
2428 * that a reset never took place on a cpu. This is evident
2429 * by the timestamp being before the start of the buffer.
2431 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2432 if (ts >= iter->trace_buffer->time_start)
2435 ring_buffer_read(buf_iter, NULL);
2438 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2442 * The current tracer is copied to avoid a global locking
2445 static void *s_start(struct seq_file *m, loff_t *pos)
2447 struct trace_iterator *iter = m->private;
2448 struct trace_array *tr = iter->tr;
2449 int cpu_file = iter->cpu_file;
2455 * copy the tracer to avoid using a global lock all around.
2456 * iter->trace is a copy of current_trace, the pointer to the
2457 * name may be used instead of a strcmp(), as iter->trace->name
2458 * will point to the same string as current_trace->name.
2460 mutex_lock(&trace_types_lock);
2461 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2462 *iter->trace = *tr->current_trace;
2463 mutex_unlock(&trace_types_lock);
2465 #ifdef CONFIG_TRACER_MAX_TRACE
2466 if (iter->snapshot && iter->trace->use_max_tr)
2467 return ERR_PTR(-EBUSY);
2470 if (!iter->snapshot)
2471 atomic_inc(&trace_record_cmdline_disabled);
2473 if (*pos != iter->pos) {
2478 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2479 for_each_tracing_cpu(cpu)
2480 tracing_iter_reset(iter, cpu);
2482 tracing_iter_reset(iter, cpu_file);
2485 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2490 * If we overflowed the seq_file before, then we want
2491 * to just reuse the trace_seq buffer again.
2497 p = s_next(m, p, &l);
2501 trace_event_read_lock();
2502 trace_access_lock(cpu_file);
2506 static void s_stop(struct seq_file *m, void *p)
2508 struct trace_iterator *iter = m->private;
2510 #ifdef CONFIG_TRACER_MAX_TRACE
2511 if (iter->snapshot && iter->trace->use_max_tr)
2515 if (!iter->snapshot)
2516 atomic_dec(&trace_record_cmdline_disabled);
2518 trace_access_unlock(iter->cpu_file);
2519 trace_event_read_unlock();
2523 get_total_entries(struct trace_buffer *buf,
2524 unsigned long *total, unsigned long *entries)
2526 unsigned long count;
2532 for_each_tracing_cpu(cpu) {
2533 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2535 * If this buffer has skipped entries, then we hold all
2536 * entries for the trace and we need to ignore the
2537 * ones before the time stamp.
2539 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2540 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2541 /* total is the same as the entries */
2545 ring_buffer_overrun_cpu(buf->buffer, cpu);
2550 static void print_lat_help_header(struct seq_file *m)
2552 seq_puts(m, "# _------=> CPU# \n"
2553 "# / _-----=> irqs-off \n"
2554 "# | / _----=> need-resched \n"
2555 "# || / _---=> hardirq/softirq \n"
2556 "# ||| / _--=> preempt-depth \n"
2558 "# cmd pid ||||| time | caller \n"
2559 "# \\ / ||||| \\ | / \n");
2562 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2564 unsigned long total;
2565 unsigned long entries;
2567 get_total_entries(buf, &total, &entries);
2568 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2569 entries, total, num_online_cpus());
2573 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2575 print_event_info(buf, m);
2576 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2580 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2582 print_event_info(buf, m);
2583 seq_puts(m, "# _-----=> irqs-off\n"
2584 "# / _----=> need-resched\n"
2585 "# | / _---=> hardirq/softirq\n"
2586 "# || / _--=> preempt-depth\n"
2588 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2589 "# | | | |||| | |\n");
2593 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2595 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2596 struct trace_buffer *buf = iter->trace_buffer;
2597 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2598 struct tracer *type = iter->trace;
2599 unsigned long entries;
2600 unsigned long total;
2601 const char *name = "preemption";
2605 get_total_entries(buf, &total, &entries);
2607 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2609 seq_puts(m, "# -----------------------------------"
2610 "---------------------------------\n");
2611 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2612 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2613 nsecs_to_usecs(data->saved_latency),
2617 #if defined(CONFIG_PREEMPT_NONE)
2619 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2621 #elif defined(CONFIG_PREEMPT)
2626 /* These are reserved for later use */
2629 seq_printf(m, " #P:%d)\n", num_online_cpus());
2633 seq_puts(m, "# -----------------\n");
2634 seq_printf(m, "# | task: %.16s-%d "
2635 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2636 data->comm, data->pid,
2637 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2638 data->policy, data->rt_priority);
2639 seq_puts(m, "# -----------------\n");
2641 if (data->critical_start) {
2642 seq_puts(m, "# => started at: ");
2643 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2644 trace_print_seq(m, &iter->seq);
2645 seq_puts(m, "\n# => ended at: ");
2646 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2647 trace_print_seq(m, &iter->seq);
2648 seq_puts(m, "\n#\n");
2654 static void test_cpu_buff_start(struct trace_iterator *iter)
2656 struct trace_seq *s = &iter->seq;
2657 struct trace_array *tr = iter->tr;
2659 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2662 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2665 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2668 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2672 cpumask_set_cpu(iter->cpu, iter->started);
2674 /* Don't print started cpu buffer for the first entry of the trace */
2676 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2680 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2682 struct trace_array *tr = iter->tr;
2683 struct trace_seq *s = &iter->seq;
2684 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2685 struct trace_entry *entry;
2686 struct trace_event *event;
2690 test_cpu_buff_start(iter);
2692 event = ftrace_find_event(entry->type);
2694 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2695 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2696 trace_print_lat_context(iter);
2698 trace_print_context(iter);
2701 if (trace_seq_has_overflowed(s))
2702 return TRACE_TYPE_PARTIAL_LINE;
2705 return event->funcs->trace(iter, sym_flags, event);
2707 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2709 return trace_handle_return(s);
2712 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2714 struct trace_array *tr = iter->tr;
2715 struct trace_seq *s = &iter->seq;
2716 struct trace_entry *entry;
2717 struct trace_event *event;
2721 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2722 trace_seq_printf(s, "%d %d %llu ",
2723 entry->pid, iter->cpu, iter->ts);
2725 if (trace_seq_has_overflowed(s))
2726 return TRACE_TYPE_PARTIAL_LINE;
2728 event = ftrace_find_event(entry->type);
2730 return event->funcs->raw(iter, 0, event);
2732 trace_seq_printf(s, "%d ?\n", entry->type);
2734 return trace_handle_return(s);
2737 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2739 struct trace_array *tr = iter->tr;
2740 struct trace_seq *s = &iter->seq;
2741 unsigned char newline = '\n';
2742 struct trace_entry *entry;
2743 struct trace_event *event;
2747 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2748 SEQ_PUT_HEX_FIELD(s, entry->pid);
2749 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2750 SEQ_PUT_HEX_FIELD(s, iter->ts);
2751 if (trace_seq_has_overflowed(s))
2752 return TRACE_TYPE_PARTIAL_LINE;
2755 event = ftrace_find_event(entry->type);
2757 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2758 if (ret != TRACE_TYPE_HANDLED)
2762 SEQ_PUT_FIELD(s, newline);
2764 return trace_handle_return(s);
2767 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2769 struct trace_array *tr = iter->tr;
2770 struct trace_seq *s = &iter->seq;
2771 struct trace_entry *entry;
2772 struct trace_event *event;
2776 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2777 SEQ_PUT_FIELD(s, entry->pid);
2778 SEQ_PUT_FIELD(s, iter->cpu);
2779 SEQ_PUT_FIELD(s, iter->ts);
2780 if (trace_seq_has_overflowed(s))
2781 return TRACE_TYPE_PARTIAL_LINE;
2784 event = ftrace_find_event(entry->type);
2785 return event ? event->funcs->binary(iter, 0, event) :
2789 int trace_empty(struct trace_iterator *iter)
2791 struct ring_buffer_iter *buf_iter;
2794 /* If we are looking at one CPU buffer, only check that one */
2795 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2796 cpu = iter->cpu_file;
2797 buf_iter = trace_buffer_iter(iter, cpu);
2799 if (!ring_buffer_iter_empty(buf_iter))
2802 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2808 for_each_tracing_cpu(cpu) {
2809 buf_iter = trace_buffer_iter(iter, cpu);
2811 if (!ring_buffer_iter_empty(buf_iter))
2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2822 /* Called with trace_event_read_lock() held. */
2823 enum print_line_t print_trace_line(struct trace_iterator *iter)
2825 struct trace_array *tr = iter->tr;
2826 unsigned long trace_flags = tr->trace_flags;
2827 enum print_line_t ret;
2829 if (iter->lost_events) {
2830 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2831 iter->cpu, iter->lost_events);
2832 if (trace_seq_has_overflowed(&iter->seq))
2833 return TRACE_TYPE_PARTIAL_LINE;
2836 if (iter->trace && iter->trace->print_line) {
2837 ret = iter->trace->print_line(iter);
2838 if (ret != TRACE_TYPE_UNHANDLED)
2842 if (iter->ent->type == TRACE_BPUTS &&
2843 trace_flags & TRACE_ITER_PRINTK &&
2844 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2845 return trace_print_bputs_msg_only(iter);
2847 if (iter->ent->type == TRACE_BPRINT &&
2848 trace_flags & TRACE_ITER_PRINTK &&
2849 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2850 return trace_print_bprintk_msg_only(iter);
2852 if (iter->ent->type == TRACE_PRINT &&
2853 trace_flags & TRACE_ITER_PRINTK &&
2854 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2855 return trace_print_printk_msg_only(iter);
2857 if (trace_flags & TRACE_ITER_BIN)
2858 return print_bin_fmt(iter);
2860 if (trace_flags & TRACE_ITER_HEX)
2861 return print_hex_fmt(iter);
2863 if (trace_flags & TRACE_ITER_RAW)
2864 return print_raw_fmt(iter);
2866 return print_trace_fmt(iter);
2869 void trace_latency_header(struct seq_file *m)
2871 struct trace_iterator *iter = m->private;
2872 struct trace_array *tr = iter->tr;
2874 /* print nothing if the buffers are empty */
2875 if (trace_empty(iter))
2878 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2879 print_trace_header(m, iter);
2881 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2882 print_lat_help_header(m);
2885 void trace_default_header(struct seq_file *m)
2887 struct trace_iterator *iter = m->private;
2888 struct trace_array *tr = iter->tr;
2889 unsigned long trace_flags = tr->trace_flags;
2891 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2894 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2895 /* print nothing if the buffers are empty */
2896 if (trace_empty(iter))
2898 print_trace_header(m, iter);
2899 if (!(trace_flags & TRACE_ITER_VERBOSE))
2900 print_lat_help_header(m);
2902 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2903 if (trace_flags & TRACE_ITER_IRQ_INFO)
2904 print_func_help_header_irq(iter->trace_buffer, m);
2906 print_func_help_header(iter->trace_buffer, m);
2911 static void test_ftrace_alive(struct seq_file *m)
2913 if (!ftrace_is_dead())
2915 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2916 "# MAY BE MISSING FUNCTION EVENTS\n");
2919 #ifdef CONFIG_TRACER_MAX_TRACE
2920 static void show_snapshot_main_help(struct seq_file *m)
2922 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2923 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2924 "# Takes a snapshot of the main buffer.\n"
2925 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2926 "# (Doesn't have to be '2' works with any number that\n"
2927 "# is not a '0' or '1')\n");
2930 static void show_snapshot_percpu_help(struct seq_file *m)
2932 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2933 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2934 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2935 "# Takes a snapshot of the main buffer for this cpu.\n");
2937 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2938 "# Must use main snapshot file to allocate.\n");
2940 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2941 "# (Doesn't have to be '2' works with any number that\n"
2942 "# is not a '0' or '1')\n");
2945 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2947 if (iter->tr->allocated_snapshot)
2948 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2950 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2952 seq_puts(m, "# Snapshot commands:\n");
2953 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2954 show_snapshot_main_help(m);
2956 show_snapshot_percpu_help(m);
2959 /* Should never be called */
2960 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2963 static int s_show(struct seq_file *m, void *v)
2965 struct trace_iterator *iter = v;
2968 if (iter->ent == NULL) {
2970 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2972 test_ftrace_alive(m);
2974 if (iter->snapshot && trace_empty(iter))
2975 print_snapshot_help(m, iter);
2976 else if (iter->trace && iter->trace->print_header)
2977 iter->trace->print_header(m);
2979 trace_default_header(m);
2981 } else if (iter->leftover) {
2983 * If we filled the seq_file buffer earlier, we
2984 * want to just show it now.
2986 ret = trace_print_seq(m, &iter->seq);
2988 /* ret should this time be zero, but you never know */
2989 iter->leftover = ret;
2992 print_trace_line(iter);
2993 ret = trace_print_seq(m, &iter->seq);
2995 * If we overflow the seq_file buffer, then it will
2996 * ask us for this data again at start up.
2998 * ret is 0 if seq_file write succeeded.
3001 iter->leftover = ret;
3008 * Should be used after trace_array_get(), trace_types_lock
3009 * ensures that i_cdev was already initialized.
3011 static inline int tracing_get_cpu(struct inode *inode)
3013 if (inode->i_cdev) /* See trace_create_cpu_file() */
3014 return (long)inode->i_cdev - 1;
3015 return RING_BUFFER_ALL_CPUS;
3018 static const struct seq_operations tracer_seq_ops = {
3025 static struct trace_iterator *
3026 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3028 struct trace_array *tr = inode->i_private;
3029 struct trace_iterator *iter;
3032 if (tracing_disabled)
3033 return ERR_PTR(-ENODEV);
3035 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3037 return ERR_PTR(-ENOMEM);
3039 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3041 if (!iter->buffer_iter)
3045 * We make a copy of the current tracer to avoid concurrent
3046 * changes on it while we are reading.
3048 mutex_lock(&trace_types_lock);
3049 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3053 *iter->trace = *tr->current_trace;
3055 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3060 #ifdef CONFIG_TRACER_MAX_TRACE
3061 /* Currently only the top directory has a snapshot */
3062 if (tr->current_trace->print_max || snapshot)
3063 iter->trace_buffer = &tr->max_buffer;
3066 iter->trace_buffer = &tr->trace_buffer;
3067 iter->snapshot = snapshot;
3069 iter->cpu_file = tracing_get_cpu(inode);
3070 mutex_init(&iter->mutex);
3072 /* Notify the tracer early; before we stop tracing. */
3073 if (iter->trace && iter->trace->open)
3074 iter->trace->open(iter);
3076 /* Annotate start of buffers if we had overruns */
3077 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3078 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3080 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3081 if (trace_clocks[tr->clock_id].in_ns)
3082 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3084 /* stop the trace while dumping if we are not opening "snapshot" */
3085 if (!iter->snapshot)
3086 tracing_stop_tr(tr);
3088 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3089 for_each_tracing_cpu(cpu) {
3090 iter->buffer_iter[cpu] =
3091 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3093 ring_buffer_read_prepare_sync();
3094 for_each_tracing_cpu(cpu) {
3095 ring_buffer_read_start(iter->buffer_iter[cpu]);
3096 tracing_iter_reset(iter, cpu);
3099 cpu = iter->cpu_file;
3100 iter->buffer_iter[cpu] =
3101 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3102 ring_buffer_read_prepare_sync();
3103 ring_buffer_read_start(iter->buffer_iter[cpu]);
3104 tracing_iter_reset(iter, cpu);
3107 mutex_unlock(&trace_types_lock);
3112 mutex_unlock(&trace_types_lock);
3114 kfree(iter->buffer_iter);
3116 seq_release_private(inode, file);
3117 return ERR_PTR(-ENOMEM);
3120 int tracing_open_generic(struct inode *inode, struct file *filp)
3122 if (tracing_disabled)
3125 filp->private_data = inode->i_private;
3129 bool tracing_is_disabled(void)
3131 return (tracing_disabled) ? true: false;
3135 * Open and update trace_array ref count.
3136 * Must have the current trace_array passed to it.
3138 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3140 struct trace_array *tr = inode->i_private;
3142 if (tracing_disabled)
3145 if (trace_array_get(tr) < 0)
3148 filp->private_data = inode->i_private;
3153 static int tracing_release(struct inode *inode, struct file *file)
3155 struct trace_array *tr = inode->i_private;
3156 struct seq_file *m = file->private_data;
3157 struct trace_iterator *iter;
3160 if (!(file->f_mode & FMODE_READ)) {
3161 trace_array_put(tr);
3165 /* Writes do not use seq_file */
3167 mutex_lock(&trace_types_lock);
3169 for_each_tracing_cpu(cpu) {
3170 if (iter->buffer_iter[cpu])
3171 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3174 if (iter->trace && iter->trace->close)
3175 iter->trace->close(iter);
3177 if (!iter->snapshot)
3178 /* reenable tracing if it was previously enabled */
3179 tracing_start_tr(tr);
3181 __trace_array_put(tr);
3183 mutex_unlock(&trace_types_lock);
3185 mutex_destroy(&iter->mutex);
3186 free_cpumask_var(iter->started);
3188 kfree(iter->buffer_iter);
3189 seq_release_private(inode, file);
3194 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3196 struct trace_array *tr = inode->i_private;
3198 trace_array_put(tr);
3202 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3204 struct trace_array *tr = inode->i_private;
3206 trace_array_put(tr);
3208 return single_release(inode, file);
3211 static int tracing_open(struct inode *inode, struct file *file)
3213 struct trace_array *tr = inode->i_private;
3214 struct trace_iterator *iter;
3217 if (trace_array_get(tr) < 0)
3220 /* If this file was open for write, then erase contents */
3221 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3222 int cpu = tracing_get_cpu(inode);
3224 if (cpu == RING_BUFFER_ALL_CPUS)
3225 tracing_reset_online_cpus(&tr->trace_buffer);
3227 tracing_reset(&tr->trace_buffer, cpu);
3230 if (file->f_mode & FMODE_READ) {
3231 iter = __tracing_open(inode, file, false);
3233 ret = PTR_ERR(iter);
3234 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3235 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3239 trace_array_put(tr);
3245 * Some tracers are not suitable for instance buffers.
3246 * A tracer is always available for the global array (toplevel)
3247 * or if it explicitly states that it is.
3250 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3252 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3255 /* Find the next tracer that this trace array may use */
3256 static struct tracer *
3257 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3259 while (t && !trace_ok_for_array(t, tr))
3266 t_next(struct seq_file *m, void *v, loff_t *pos)
3268 struct trace_array *tr = m->private;
3269 struct tracer *t = v;
3274 t = get_tracer_for_array(tr, t->next);
3279 static void *t_start(struct seq_file *m, loff_t *pos)
3281 struct trace_array *tr = m->private;
3285 mutex_lock(&trace_types_lock);
3287 t = get_tracer_for_array(tr, trace_types);
3288 for (; t && l < *pos; t = t_next(m, t, &l))
3294 static void t_stop(struct seq_file *m, void *p)
3296 mutex_unlock(&trace_types_lock);
3299 static int t_show(struct seq_file *m, void *v)
3301 struct tracer *t = v;
3306 seq_puts(m, t->name);
3315 static const struct seq_operations show_traces_seq_ops = {
3322 static int show_traces_open(struct inode *inode, struct file *file)
3324 struct trace_array *tr = inode->i_private;
3328 if (tracing_disabled)
3331 ret = seq_open(file, &show_traces_seq_ops);
3335 m = file->private_data;
3342 tracing_write_stub(struct file *filp, const char __user *ubuf,
3343 size_t count, loff_t *ppos)
3348 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3352 if (file->f_mode & FMODE_READ)
3353 ret = seq_lseek(file, offset, whence);
3355 file->f_pos = ret = 0;
3360 static const struct file_operations tracing_fops = {
3361 .open = tracing_open,
3363 .write = tracing_write_stub,
3364 .llseek = tracing_lseek,
3365 .release = tracing_release,
3368 static const struct file_operations show_traces_fops = {
3369 .open = show_traces_open,
3371 .release = seq_release,
3372 .llseek = seq_lseek,
3376 * The tracer itself will not take this lock, but still we want
3377 * to provide a consistent cpumask to user-space:
3379 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3382 * Temporary storage for the character representation of the
3383 * CPU bitmask (and one more byte for the newline):
3385 static char mask_str[NR_CPUS + 1];
3388 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3389 size_t count, loff_t *ppos)
3391 struct trace_array *tr = file_inode(filp)->i_private;
3394 mutex_lock(&tracing_cpumask_update_lock);
3396 len = snprintf(mask_str, count, "%*pb\n",
3397 cpumask_pr_args(tr->tracing_cpumask));
3402 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3405 mutex_unlock(&tracing_cpumask_update_lock);
3411 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3412 size_t count, loff_t *ppos)
3414 struct trace_array *tr = file_inode(filp)->i_private;
3415 cpumask_var_t tracing_cpumask_new;
3418 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3421 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3425 mutex_lock(&tracing_cpumask_update_lock);
3427 local_irq_disable();
3428 arch_spin_lock(&tr->max_lock);
3429 for_each_tracing_cpu(cpu) {
3431 * Increase/decrease the disabled counter if we are
3432 * about to flip a bit in the cpumask:
3434 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3435 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3436 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3437 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3439 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3440 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3441 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3442 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3445 arch_spin_unlock(&tr->max_lock);
3448 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3450 mutex_unlock(&tracing_cpumask_update_lock);
3451 free_cpumask_var(tracing_cpumask_new);
3456 free_cpumask_var(tracing_cpumask_new);
3461 static const struct file_operations tracing_cpumask_fops = {
3462 .open = tracing_open_generic_tr,
3463 .read = tracing_cpumask_read,
3464 .write = tracing_cpumask_write,
3465 .release = tracing_release_generic_tr,
3466 .llseek = generic_file_llseek,
3469 static int tracing_trace_options_show(struct seq_file *m, void *v)
3471 struct tracer_opt *trace_opts;
3472 struct trace_array *tr = m->private;
3476 mutex_lock(&trace_types_lock);
3477 tracer_flags = tr->current_trace->flags->val;
3478 trace_opts = tr->current_trace->flags->opts;
3480 for (i = 0; trace_options[i]; i++) {
3481 if (tr->trace_flags & (1 << i))
3482 seq_printf(m, "%s\n", trace_options[i]);
3484 seq_printf(m, "no%s\n", trace_options[i]);
3487 for (i = 0; trace_opts[i].name; i++) {
3488 if (tracer_flags & trace_opts[i].bit)
3489 seq_printf(m, "%s\n", trace_opts[i].name);
3491 seq_printf(m, "no%s\n", trace_opts[i].name);
3493 mutex_unlock(&trace_types_lock);
3498 static int __set_tracer_option(struct trace_array *tr,
3499 struct tracer_flags *tracer_flags,
3500 struct tracer_opt *opts, int neg)
3502 struct tracer *trace = tracer_flags->trace;
3505 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3510 tracer_flags->val &= ~opts->bit;
3512 tracer_flags->val |= opts->bit;
3516 /* Try to assign a tracer specific option */
3517 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3519 struct tracer *trace = tr->current_trace;
3520 struct tracer_flags *tracer_flags = trace->flags;
3521 struct tracer_opt *opts = NULL;
3524 for (i = 0; tracer_flags->opts[i].name; i++) {
3525 opts = &tracer_flags->opts[i];
3527 if (strcmp(cmp, opts->name) == 0)
3528 return __set_tracer_option(tr, trace->flags, opts, neg);
3534 /* Some tracers require overwrite to stay enabled */
3535 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3537 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3543 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3545 /* do nothing if flag is already set */
3546 if (!!(tr->trace_flags & mask) == !!enabled)
3549 /* Give the tracer a chance to approve the change */
3550 if (tr->current_trace->flag_changed)
3551 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3555 tr->trace_flags |= mask;
3557 tr->trace_flags &= ~mask;
3559 if (mask == TRACE_ITER_RECORD_CMD)
3560 trace_event_enable_cmd_record(enabled);
3562 if (mask == TRACE_ITER_EVENT_FORK)
3563 trace_event_follow_fork(tr, enabled);
3565 if (mask == TRACE_ITER_OVERWRITE) {
3566 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3567 #ifdef CONFIG_TRACER_MAX_TRACE
3568 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3572 if (mask == TRACE_ITER_PRINTK) {
3573 trace_printk_start_stop_comm(enabled);
3574 trace_printk_control(enabled);
3580 static int trace_set_options(struct trace_array *tr, char *option)
3586 size_t orig_len = strlen(option);
3588 cmp = strstrip(option);
3590 if (strncmp(cmp, "no", 2) == 0) {
3595 mutex_lock(&trace_types_lock);
3597 for (i = 0; trace_options[i]; i++) {
3598 if (strcmp(cmp, trace_options[i]) == 0) {
3599 ret = set_tracer_flag(tr, 1 << i, !neg);
3604 /* If no option could be set, test the specific tracer options */
3605 if (!trace_options[i])
3606 ret = set_tracer_option(tr, cmp, neg);
3608 mutex_unlock(&trace_types_lock);
3611 * If the first trailing whitespace is replaced with '\0' by strstrip,
3612 * turn it back into a space.
3614 if (orig_len > strlen(option))
3615 option[strlen(option)] = ' ';
3620 static void __init apply_trace_boot_options(void)
3622 char *buf = trace_boot_options_buf;
3626 option = strsep(&buf, ",");
3632 trace_set_options(&global_trace, option);
3634 /* Put back the comma to allow this to be called again */
3641 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3642 size_t cnt, loff_t *ppos)
3644 struct seq_file *m = filp->private_data;
3645 struct trace_array *tr = m->private;
3649 if (cnt >= sizeof(buf))
3652 if (copy_from_user(buf, ubuf, cnt))
3657 ret = trace_set_options(tr, buf);
3666 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3668 struct trace_array *tr = inode->i_private;
3671 if (tracing_disabled)
3674 if (trace_array_get(tr) < 0)
3677 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3679 trace_array_put(tr);
3684 static const struct file_operations tracing_iter_fops = {
3685 .open = tracing_trace_options_open,
3687 .llseek = seq_lseek,
3688 .release = tracing_single_release_tr,
3689 .write = tracing_trace_options_write,
3692 static const char readme_msg[] =
3693 "tracing mini-HOWTO:\n\n"
3694 "# echo 0 > tracing_on : quick way to disable tracing\n"
3695 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3696 " Important files:\n"
3697 " trace\t\t\t- The static contents of the buffer\n"
3698 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3699 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3700 " current_tracer\t- function and latency tracers\n"
3701 " available_tracers\t- list of configured tracers for current_tracer\n"
3702 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3703 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3704 " trace_clock\t\t-change the clock used to order events\n"
3705 " local: Per cpu clock but may not be synced across CPUs\n"
3706 " global: Synced across CPUs but slows tracing down.\n"
3707 " counter: Not a clock, but just an increment\n"
3708 " uptime: Jiffy counter from time of boot\n"
3709 " perf: Same clock that perf events use\n"
3710 #ifdef CONFIG_X86_64
3711 " x86-tsc: TSC cycle counter\n"
3713 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3714 " tracing_cpumask\t- Limit which CPUs to trace\n"
3715 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3716 "\t\t\t Remove sub-buffer with rmdir\n"
3717 " trace_options\t\t- Set format or modify how tracing happens\n"
3718 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3719 "\t\t\t option name\n"
3720 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3721 #ifdef CONFIG_DYNAMIC_FTRACE
3722 "\n available_filter_functions - list of functions that can be filtered on\n"
3723 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3724 "\t\t\t functions\n"
3725 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3726 "\t modules: Can select a group via module\n"
3727 "\t Format: :mod:<module-name>\n"
3728 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3729 "\t triggers: a command to perform when function is hit\n"
3730 "\t Format: <function>:<trigger>[:count]\n"
3731 "\t trigger: traceon, traceoff\n"
3732 "\t\t enable_event:<system>:<event>\n"
3733 "\t\t disable_event:<system>:<event>\n"
3734 #ifdef CONFIG_STACKTRACE
3737 #ifdef CONFIG_TRACER_SNAPSHOT
3742 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3743 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3744 "\t The first one will disable tracing every time do_fault is hit\n"
3745 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3746 "\t The first time do trap is hit and it disables tracing, the\n"
3747 "\t counter will decrement to 2. If tracing is already disabled,\n"
3748 "\t the counter will not decrement. It only decrements when the\n"
3749 "\t trigger did work\n"
3750 "\t To remove trigger without count:\n"
3751 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3752 "\t To remove trigger with a count:\n"
3753 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3754 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3755 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3756 "\t modules: Can select a group via module command :mod:\n"
3757 "\t Does not accept triggers\n"
3758 #endif /* CONFIG_DYNAMIC_FTRACE */
3759 #ifdef CONFIG_FUNCTION_TRACER
3760 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3763 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3764 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3765 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3766 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3768 #ifdef CONFIG_TRACER_SNAPSHOT
3769 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3770 "\t\t\t snapshot buffer. Read the contents for more\n"
3771 "\t\t\t information\n"
3773 #ifdef CONFIG_STACK_TRACER
3774 " stack_trace\t\t- Shows the max stack trace when active\n"
3775 " stack_max_size\t- Shows current max stack size that was traced\n"
3776 "\t\t\t Write into this file to reset the max size (trigger a\n"
3777 "\t\t\t new trace)\n"
3778 #ifdef CONFIG_DYNAMIC_FTRACE
3779 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3782 #endif /* CONFIG_STACK_TRACER */
3783 " events/\t\t- Directory containing all trace event subsystems:\n"
3784 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3785 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3786 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3788 " filter\t\t- If set, only events passing filter are traced\n"
3789 " events/<system>/<event>/\t- Directory containing control files for\n"
3791 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3792 " filter\t\t- If set, only events passing filter are traced\n"
3793 " trigger\t\t- If set, a command to perform when event is hit\n"
3794 "\t Format: <trigger>[:count][if <filter>]\n"
3795 "\t trigger: traceon, traceoff\n"
3796 "\t enable_event:<system>:<event>\n"
3797 "\t disable_event:<system>:<event>\n"
3798 #ifdef CONFIG_HIST_TRIGGERS
3799 "\t enable_hist:<system>:<event>\n"
3800 "\t disable_hist:<system>:<event>\n"
3802 #ifdef CONFIG_STACKTRACE
3805 #ifdef CONFIG_TRACER_SNAPSHOT
3808 #ifdef CONFIG_HIST_TRIGGERS
3809 "\t\t hist (see below)\n"
3811 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3812 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3813 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3814 "\t events/block/block_unplug/trigger\n"
3815 "\t The first disables tracing every time block_unplug is hit.\n"
3816 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3817 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3818 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3819 "\t Like function triggers, the counter is only decremented if it\n"
3820 "\t enabled or disabled tracing.\n"
3821 "\t To remove a trigger without a count:\n"
3822 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3823 "\t To remove a trigger with a count:\n"
3824 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3825 "\t Filters can be ignored when removing a trigger.\n"
3826 #ifdef CONFIG_HIST_TRIGGERS
3827 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
3828 "\t Format: hist:keys=<field1[,field2,...]>\n"
3829 "\t [:values=<field1[,field2,...]>]\n"
3830 "\t [:sort=<field1[,field2,...]>]\n"
3831 "\t [:size=#entries]\n"
3832 "\t [:pause][:continue][:clear]\n"
3833 "\t [:name=histname1]\n"
3834 "\t [if <filter>]\n\n"
3835 "\t When a matching event is hit, an entry is added to a hash\n"
3836 "\t table using the key(s) and value(s) named, and the value of a\n"
3837 "\t sum called 'hitcount' is incremented. Keys and values\n"
3838 "\t correspond to fields in the event's format description. Keys\n"
3839 "\t can be any field, or the special string 'stacktrace'.\n"
3840 "\t Compound keys consisting of up to two fields can be specified\n"
3841 "\t by the 'keys' keyword. Values must correspond to numeric\n"
3842 "\t fields. Sort keys consisting of up to two fields can be\n"
3843 "\t specified using the 'sort' keyword. The sort direction can\n"
3844 "\t be modified by appending '.descending' or '.ascending' to a\n"
3845 "\t sort field. The 'size' parameter can be used to specify more\n"
3846 "\t or fewer than the default 2048 entries for the hashtable size.\n"
3847 "\t If a hist trigger is given a name using the 'name' parameter,\n"
3848 "\t its histogram data will be shared with other triggers of the\n"
3849 "\t same name, and trigger hits will update this common data.\n\n"
3850 "\t Reading the 'hist' file for the event will dump the hash\n"
3851 "\t table in its entirety to stdout. If there are multiple hist\n"
3852 "\t triggers attached to an event, there will be a table for each\n"
3853 "\t trigger in the output. The table displayed for a named\n"
3854 "\t trigger will be the same as any other instance having the\n"
3855 "\t same name. The default format used to display a given field\n"
3856 "\t can be modified by appending any of the following modifiers\n"
3857 "\t to the field name, as applicable:\n\n"
3858 "\t .hex display a number as a hex value\n"
3859 "\t .sym display an address as a symbol\n"
3860 "\t .sym-offset display an address as a symbol and offset\n"
3861 "\t .execname display a common_pid as a program name\n"
3862 "\t .syscall display a syscall id as a syscall name\n\n"
3863 "\t .log2 display log2 value rather than raw number\n\n"
3864 "\t The 'pause' parameter can be used to pause an existing hist\n"
3865 "\t trigger or to start a hist trigger but not log any events\n"
3866 "\t until told to do so. 'continue' can be used to start or\n"
3867 "\t restart a paused hist trigger.\n\n"
3868 "\t The 'clear' parameter will clear the contents of a running\n"
3869 "\t hist trigger and leave its current paused/active state\n"
3871 "\t The enable_hist and disable_hist triggers can be used to\n"
3872 "\t have one event conditionally start and stop another event's\n"
3873 "\t already-attached hist trigger. The syntax is analagous to\n"
3874 "\t the enable_event and disable_event triggers.\n"
3879 tracing_readme_read(struct file *filp, char __user *ubuf,
3880 size_t cnt, loff_t *ppos)
3882 return simple_read_from_buffer(ubuf, cnt, ppos,
3883 readme_msg, strlen(readme_msg));
3886 static const struct file_operations tracing_readme_fops = {
3887 .open = tracing_open_generic,
3888 .read = tracing_readme_read,
3889 .llseek = generic_file_llseek,
3892 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3894 unsigned int *ptr = v;
3896 if (*pos || m->count)
3901 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3903 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3912 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3918 arch_spin_lock(&trace_cmdline_lock);
3920 v = &savedcmd->map_cmdline_to_pid[0];
3922 v = saved_cmdlines_next(m, v, &l);
3930 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3932 arch_spin_unlock(&trace_cmdline_lock);
3936 static int saved_cmdlines_show(struct seq_file *m, void *v)
3938 char buf[TASK_COMM_LEN];
3939 unsigned int *pid = v;
3941 __trace_find_cmdline(*pid, buf);
3942 seq_printf(m, "%d %s\n", *pid, buf);
3946 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3947 .start = saved_cmdlines_start,
3948 .next = saved_cmdlines_next,
3949 .stop = saved_cmdlines_stop,
3950 .show = saved_cmdlines_show,
3953 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3955 if (tracing_disabled)
3958 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3961 static const struct file_operations tracing_saved_cmdlines_fops = {
3962 .open = tracing_saved_cmdlines_open,
3964 .llseek = seq_lseek,
3965 .release = seq_release,
3969 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3970 size_t cnt, loff_t *ppos)
3975 arch_spin_lock(&trace_cmdline_lock);
3976 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3977 arch_spin_unlock(&trace_cmdline_lock);
3979 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3982 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3984 kfree(s->saved_cmdlines);
3985 kfree(s->map_cmdline_to_pid);
3989 static int tracing_resize_saved_cmdlines(unsigned int val)
3991 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3993 s = kmalloc(sizeof(*s), GFP_KERNEL);
3997 if (allocate_cmdlines_buffer(val, s) < 0) {
4002 arch_spin_lock(&trace_cmdline_lock);
4003 savedcmd_temp = savedcmd;
4005 arch_spin_unlock(&trace_cmdline_lock);
4006 free_saved_cmdlines_buffer(savedcmd_temp);
4012 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4013 size_t cnt, loff_t *ppos)
4018 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4022 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4023 if (!val || val > PID_MAX_DEFAULT)
4026 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4035 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4036 .open = tracing_open_generic,
4037 .read = tracing_saved_cmdlines_size_read,
4038 .write = tracing_saved_cmdlines_size_write,
4041 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
4042 static union trace_enum_map_item *
4043 update_enum_map(union trace_enum_map_item *ptr)
4045 if (!ptr->map.enum_string) {
4046 if (ptr->tail.next) {
4047 ptr = ptr->tail.next;
4048 /* Set ptr to the next real item (skip head) */
4056 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4058 union trace_enum_map_item *ptr = v;
4061 * Paranoid! If ptr points to end, we don't want to increment past it.
4062 * This really should never happen.
4064 ptr = update_enum_map(ptr);
4065 if (WARN_ON_ONCE(!ptr))
4072 ptr = update_enum_map(ptr);
4077 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4079 union trace_enum_map_item *v;
4082 mutex_lock(&trace_enum_mutex);
4084 v = trace_enum_maps;
4088 while (v && l < *pos) {
4089 v = enum_map_next(m, v, &l);
4095 static void enum_map_stop(struct seq_file *m, void *v)
4097 mutex_unlock(&trace_enum_mutex);
4100 static int enum_map_show(struct seq_file *m, void *v)
4102 union trace_enum_map_item *ptr = v;
4104 seq_printf(m, "%s %ld (%s)\n",
4105 ptr->map.enum_string, ptr->map.enum_value,
4111 static const struct seq_operations tracing_enum_map_seq_ops = {
4112 .start = enum_map_start,
4113 .next = enum_map_next,
4114 .stop = enum_map_stop,
4115 .show = enum_map_show,
4118 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4120 if (tracing_disabled)
4123 return seq_open(filp, &tracing_enum_map_seq_ops);
4126 static const struct file_operations tracing_enum_map_fops = {
4127 .open = tracing_enum_map_open,
4129 .llseek = seq_lseek,
4130 .release = seq_release,
4133 static inline union trace_enum_map_item *
4134 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4136 /* Return tail of array given the head */
4137 return ptr + ptr->head.length + 1;
4141 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4144 struct trace_enum_map **stop;
4145 struct trace_enum_map **map;
4146 union trace_enum_map_item *map_array;
4147 union trace_enum_map_item *ptr;
4152 * The trace_enum_maps contains the map plus a head and tail item,
4153 * where the head holds the module and length of array, and the
4154 * tail holds a pointer to the next list.
4156 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4158 pr_warn("Unable to allocate trace enum mapping\n");
4162 mutex_lock(&trace_enum_mutex);
4164 if (!trace_enum_maps)
4165 trace_enum_maps = map_array;
4167 ptr = trace_enum_maps;
4169 ptr = trace_enum_jmp_to_tail(ptr);
4170 if (!ptr->tail.next)
4172 ptr = ptr->tail.next;
4175 ptr->tail.next = map_array;
4177 map_array->head.mod = mod;
4178 map_array->head.length = len;
4181 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4182 map_array->map = **map;
4185 memset(map_array, 0, sizeof(*map_array));
4187 mutex_unlock(&trace_enum_mutex);
4190 static void trace_create_enum_file(struct dentry *d_tracer)
4192 trace_create_file("enum_map", 0444, d_tracer,
4193 NULL, &tracing_enum_map_fops);
4196 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4197 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4198 static inline void trace_insert_enum_map_file(struct module *mod,
4199 struct trace_enum_map **start, int len) { }
4200 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4202 static void trace_insert_enum_map(struct module *mod,
4203 struct trace_enum_map **start, int len)
4205 struct trace_enum_map **map;
4212 trace_event_enum_update(map, len);
4214 trace_insert_enum_map_file(mod, start, len);
4218 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4219 size_t cnt, loff_t *ppos)
4221 struct trace_array *tr = filp->private_data;
4222 char buf[MAX_TRACER_SIZE+2];
4225 mutex_lock(&trace_types_lock);
4226 r = sprintf(buf, "%s\n", tr->current_trace->name);
4227 mutex_unlock(&trace_types_lock);
4229 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4232 int tracer_init(struct tracer *t, struct trace_array *tr)
4234 tracing_reset_online_cpus(&tr->trace_buffer);
4238 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4242 for_each_tracing_cpu(cpu)
4243 per_cpu_ptr(buf->data, cpu)->entries = val;
4246 #ifdef CONFIG_TRACER_MAX_TRACE
4247 /* resize @tr's buffer to the size of @size_tr's entries */
4248 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4249 struct trace_buffer *size_buf, int cpu_id)
4253 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4254 for_each_tracing_cpu(cpu) {
4255 ret = ring_buffer_resize(trace_buf->buffer,
4256 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4259 per_cpu_ptr(trace_buf->data, cpu)->entries =
4260 per_cpu_ptr(size_buf->data, cpu)->entries;
4263 ret = ring_buffer_resize(trace_buf->buffer,
4264 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4266 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4267 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4272 #endif /* CONFIG_TRACER_MAX_TRACE */
4274 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4275 unsigned long size, int cpu)
4280 * If kernel or user changes the size of the ring buffer
4281 * we use the size that was given, and we can forget about
4282 * expanding it later.
4284 ring_buffer_expanded = true;
4286 /* May be called before buffers are initialized */
4287 if (!tr->trace_buffer.buffer)
4290 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4294 #ifdef CONFIG_TRACER_MAX_TRACE
4295 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4296 !tr->current_trace->use_max_tr)
4299 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4301 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4302 &tr->trace_buffer, cpu);
4305 * AARGH! We are left with different
4306 * size max buffer!!!!
4307 * The max buffer is our "snapshot" buffer.
4308 * When a tracer needs a snapshot (one of the
4309 * latency tracers), it swaps the max buffer
4310 * with the saved snap shot. We succeeded to
4311 * update the size of the main buffer, but failed to
4312 * update the size of the max buffer. But when we tried
4313 * to reset the main buffer to the original size, we
4314 * failed there too. This is very unlikely to
4315 * happen, but if it does, warn and kill all
4319 tracing_disabled = 1;
4324 if (cpu == RING_BUFFER_ALL_CPUS)
4325 set_buffer_entries(&tr->max_buffer, size);
4327 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4330 #endif /* CONFIG_TRACER_MAX_TRACE */
4332 if (cpu == RING_BUFFER_ALL_CPUS)
4333 set_buffer_entries(&tr->trace_buffer, size);
4335 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4340 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4341 unsigned long size, int cpu_id)
4345 mutex_lock(&trace_types_lock);
4347 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4348 /* make sure, this cpu is enabled in the mask */
4349 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4355 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4360 mutex_unlock(&trace_types_lock);
4367 * tracing_update_buffers - used by tracing facility to expand ring buffers
4369 * To save on memory when the tracing is never used on a system with it
4370 * configured in. The ring buffers are set to a minimum size. But once
4371 * a user starts to use the tracing facility, then they need to grow
4372 * to their default size.
4374 * This function is to be called when a tracer is about to be used.
4376 int tracing_update_buffers(void)
4380 mutex_lock(&trace_types_lock);
4381 if (!ring_buffer_expanded)
4382 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4383 RING_BUFFER_ALL_CPUS);
4384 mutex_unlock(&trace_types_lock);
4389 struct trace_option_dentry;
4392 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4395 * Used to clear out the tracer before deletion of an instance.
4396 * Must have trace_types_lock held.
4398 static void tracing_set_nop(struct trace_array *tr)
4400 if (tr->current_trace == &nop_trace)
4403 tr->current_trace->enabled--;
4405 if (tr->current_trace->reset)
4406 tr->current_trace->reset(tr);
4408 tr->current_trace = &nop_trace;
4411 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4413 /* Only enable if the directory has been created already. */
4417 create_trace_option_files(tr, t);
4420 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4423 #ifdef CONFIG_TRACER_MAX_TRACE
4428 mutex_lock(&trace_types_lock);
4430 if (!ring_buffer_expanded) {
4431 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4432 RING_BUFFER_ALL_CPUS);
4438 for (t = trace_types; t; t = t->next) {
4439 if (strcmp(t->name, buf) == 0)
4446 if (t == tr->current_trace)
4449 /* Some tracers are only allowed for the top level buffer */
4450 if (!trace_ok_for_array(t, tr)) {
4455 /* If trace pipe files are being read, we can't change the tracer */
4456 if (tr->current_trace->ref) {
4461 trace_branch_disable();
4463 tr->current_trace->enabled--;
4465 if (tr->current_trace->reset)
4466 tr->current_trace->reset(tr);
4468 /* Current trace needs to be nop_trace before synchronize_sched */
4469 tr->current_trace = &nop_trace;
4471 #ifdef CONFIG_TRACER_MAX_TRACE
4472 had_max_tr = tr->allocated_snapshot;
4474 if (had_max_tr && !t->use_max_tr) {
4476 * We need to make sure that the update_max_tr sees that
4477 * current_trace changed to nop_trace to keep it from
4478 * swapping the buffers after we resize it.
4479 * The update_max_tr is called from interrupts disabled
4480 * so a synchronized_sched() is sufficient.
4482 synchronize_sched();
4487 #ifdef CONFIG_TRACER_MAX_TRACE
4488 if (t->use_max_tr && !had_max_tr) {
4489 ret = alloc_snapshot(tr);
4496 ret = tracer_init(t, tr);
4501 tr->current_trace = t;
4502 tr->current_trace->enabled++;
4503 trace_branch_enable(tr);
4505 mutex_unlock(&trace_types_lock);
4511 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4512 size_t cnt, loff_t *ppos)
4514 struct trace_array *tr = filp->private_data;
4515 char buf[MAX_TRACER_SIZE+1];
4522 if (cnt > MAX_TRACER_SIZE)
4523 cnt = MAX_TRACER_SIZE;
4525 if (copy_from_user(buf, ubuf, cnt))
4530 /* strip ending whitespace. */
4531 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4534 err = tracing_set_tracer(tr, buf);
4544 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4545 size_t cnt, loff_t *ppos)
4550 r = snprintf(buf, sizeof(buf), "%ld\n",
4551 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4552 if (r > sizeof(buf))
4554 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4558 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4559 size_t cnt, loff_t *ppos)
4564 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4574 tracing_thresh_read(struct file *filp, char __user *ubuf,
4575 size_t cnt, loff_t *ppos)
4577 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4581 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4582 size_t cnt, loff_t *ppos)
4584 struct trace_array *tr = filp->private_data;
4587 mutex_lock(&trace_types_lock);
4588 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4592 if (tr->current_trace->update_thresh) {
4593 ret = tr->current_trace->update_thresh(tr);
4600 mutex_unlock(&trace_types_lock);
4605 #ifdef CONFIG_TRACER_MAX_TRACE
4608 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4609 size_t cnt, loff_t *ppos)
4611 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4615 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4616 size_t cnt, loff_t *ppos)
4618 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4623 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4625 struct trace_array *tr = inode->i_private;
4626 struct trace_iterator *iter;
4629 if (tracing_disabled)
4632 if (trace_array_get(tr) < 0)
4635 mutex_lock(&trace_types_lock);
4637 /* create a buffer to store the information to pass to userspace */
4638 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4641 __trace_array_put(tr);
4645 trace_seq_init(&iter->seq);
4646 iter->trace = tr->current_trace;
4648 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4653 /* trace pipe does not show start of buffer */
4654 cpumask_setall(iter->started);
4656 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4657 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4659 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4660 if (trace_clocks[tr->clock_id].in_ns)
4661 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4664 iter->trace_buffer = &tr->trace_buffer;
4665 iter->cpu_file = tracing_get_cpu(inode);
4666 mutex_init(&iter->mutex);
4667 filp->private_data = iter;
4669 if (iter->trace->pipe_open)
4670 iter->trace->pipe_open(iter);
4672 nonseekable_open(inode, filp);
4674 tr->current_trace->ref++;
4676 mutex_unlock(&trace_types_lock);
4682 __trace_array_put(tr);
4683 mutex_unlock(&trace_types_lock);
4687 static int tracing_release_pipe(struct inode *inode, struct file *file)
4689 struct trace_iterator *iter = file->private_data;
4690 struct trace_array *tr = inode->i_private;
4692 mutex_lock(&trace_types_lock);
4694 tr->current_trace->ref--;
4696 if (iter->trace->pipe_close)
4697 iter->trace->pipe_close(iter);
4699 mutex_unlock(&trace_types_lock);
4701 free_cpumask_var(iter->started);
4702 mutex_destroy(&iter->mutex);
4705 trace_array_put(tr);
4711 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4713 struct trace_array *tr = iter->tr;
4715 /* Iterators are static, they should be filled or empty */
4716 if (trace_buffer_iter(iter, iter->cpu_file))
4717 return POLLIN | POLLRDNORM;
4719 if (tr->trace_flags & TRACE_ITER_BLOCK)
4721 * Always select as readable when in blocking mode
4723 return POLLIN | POLLRDNORM;
4725 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4730 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4732 struct trace_iterator *iter = filp->private_data;
4734 return trace_poll(iter, filp, poll_table);
4737 /* Must be called with iter->mutex held. */
4738 static int tracing_wait_pipe(struct file *filp)
4740 struct trace_iterator *iter = filp->private_data;
4743 while (trace_empty(iter)) {
4745 if ((filp->f_flags & O_NONBLOCK)) {
4750 * We block until we read something and tracing is disabled.
4751 * We still block if tracing is disabled, but we have never
4752 * read anything. This allows a user to cat this file, and
4753 * then enable tracing. But after we have read something,
4754 * we give an EOF when tracing is again disabled.
4756 * iter->pos will be 0 if we haven't read anything.
4758 if (!tracing_is_on() && iter->pos)
4761 mutex_unlock(&iter->mutex);
4763 ret = wait_on_pipe(iter, false);
4765 mutex_lock(&iter->mutex);
4778 tracing_read_pipe(struct file *filp, char __user *ubuf,
4779 size_t cnt, loff_t *ppos)
4781 struct trace_iterator *iter = filp->private_data;
4784 /* return any leftover data */
4785 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4789 trace_seq_init(&iter->seq);
4792 * Avoid more than one consumer on a single file descriptor
4793 * This is just a matter of traces coherency, the ring buffer itself
4796 mutex_lock(&iter->mutex);
4797 if (iter->trace->read) {
4798 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4804 sret = tracing_wait_pipe(filp);
4808 /* stop when tracing is finished */
4809 if (trace_empty(iter)) {
4814 if (cnt >= PAGE_SIZE)
4815 cnt = PAGE_SIZE - 1;
4817 /* reset all but tr, trace, and overruns */
4818 memset(&iter->seq, 0,
4819 sizeof(struct trace_iterator) -
4820 offsetof(struct trace_iterator, seq));
4821 cpumask_clear(iter->started);
4824 trace_event_read_lock();
4825 trace_access_lock(iter->cpu_file);
4826 while (trace_find_next_entry_inc(iter) != NULL) {
4827 enum print_line_t ret;
4828 int save_len = iter->seq.seq.len;
4830 ret = print_trace_line(iter);
4831 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4832 /* don't print partial lines */
4833 iter->seq.seq.len = save_len;
4836 if (ret != TRACE_TYPE_NO_CONSUME)
4837 trace_consume(iter);
4839 if (trace_seq_used(&iter->seq) >= cnt)
4843 * Setting the full flag means we reached the trace_seq buffer
4844 * size and we should leave by partial output condition above.
4845 * One of the trace_seq_* functions is not used properly.
4847 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4850 trace_access_unlock(iter->cpu_file);
4851 trace_event_read_unlock();
4853 /* Now copy what we have to the user */
4854 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4855 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4856 trace_seq_init(&iter->seq);
4859 * If there was nothing to send to user, in spite of consuming trace
4860 * entries, go back to wait for more entries.
4866 mutex_unlock(&iter->mutex);
4871 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4874 __free_page(spd->pages[idx]);
4877 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4879 .confirm = generic_pipe_buf_confirm,
4880 .release = generic_pipe_buf_release,
4881 .steal = generic_pipe_buf_steal,
4882 .get = generic_pipe_buf_get,
4886 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4892 /* Seq buffer is page-sized, exactly what we need. */
4894 save_len = iter->seq.seq.len;
4895 ret = print_trace_line(iter);
4897 if (trace_seq_has_overflowed(&iter->seq)) {
4898 iter->seq.seq.len = save_len;
4903 * This should not be hit, because it should only
4904 * be set if the iter->seq overflowed. But check it
4905 * anyway to be safe.
4907 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4908 iter->seq.seq.len = save_len;
4912 count = trace_seq_used(&iter->seq) - save_len;
4915 iter->seq.seq.len = save_len;
4919 if (ret != TRACE_TYPE_NO_CONSUME)
4920 trace_consume(iter);
4922 if (!trace_find_next_entry_inc(iter)) {
4932 static ssize_t tracing_splice_read_pipe(struct file *filp,
4934 struct pipe_inode_info *pipe,
4938 struct page *pages_def[PIPE_DEF_BUFFERS];
4939 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4940 struct trace_iterator *iter = filp->private_data;
4941 struct splice_pipe_desc spd = {
4943 .partial = partial_def,
4944 .nr_pages = 0, /* This gets updated below. */
4945 .nr_pages_max = PIPE_DEF_BUFFERS,
4947 .ops = &tracing_pipe_buf_ops,
4948 .spd_release = tracing_spd_release_pipe,
4954 if (splice_grow_spd(pipe, &spd))
4957 mutex_lock(&iter->mutex);
4959 if (iter->trace->splice_read) {
4960 ret = iter->trace->splice_read(iter, filp,
4961 ppos, pipe, len, flags);
4966 ret = tracing_wait_pipe(filp);
4970 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4975 trace_event_read_lock();
4976 trace_access_lock(iter->cpu_file);
4978 /* Fill as many pages as possible. */
4979 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4980 spd.pages[i] = alloc_page(GFP_KERNEL);
4984 rem = tracing_fill_pipe_page(rem, iter);
4986 /* Copy the data into the page, so we can start over. */
4987 ret = trace_seq_to_buffer(&iter->seq,
4988 page_address(spd.pages[i]),
4989 trace_seq_used(&iter->seq));
4991 __free_page(spd.pages[i]);
4994 spd.partial[i].offset = 0;
4995 spd.partial[i].len = trace_seq_used(&iter->seq);
4997 trace_seq_init(&iter->seq);
5000 trace_access_unlock(iter->cpu_file);
5001 trace_event_read_unlock();
5002 mutex_unlock(&iter->mutex);
5007 ret = splice_to_pipe(pipe, &spd);
5011 splice_shrink_spd(&spd);
5015 mutex_unlock(&iter->mutex);
5020 tracing_entries_read(struct file *filp, char __user *ubuf,
5021 size_t cnt, loff_t *ppos)
5023 struct inode *inode = file_inode(filp);
5024 struct trace_array *tr = inode->i_private;
5025 int cpu = tracing_get_cpu(inode);
5030 mutex_lock(&trace_types_lock);
5032 if (cpu == RING_BUFFER_ALL_CPUS) {
5033 int cpu, buf_size_same;
5038 /* check if all cpu sizes are same */
5039 for_each_tracing_cpu(cpu) {
5040 /* fill in the size from first enabled cpu */
5042 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5043 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5049 if (buf_size_same) {
5050 if (!ring_buffer_expanded)
5051 r = sprintf(buf, "%lu (expanded: %lu)\n",
5053 trace_buf_size >> 10);
5055 r = sprintf(buf, "%lu\n", size >> 10);
5057 r = sprintf(buf, "X\n");
5059 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5061 mutex_unlock(&trace_types_lock);
5063 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5068 tracing_entries_write(struct file *filp, const char __user *ubuf,
5069 size_t cnt, loff_t *ppos)
5071 struct inode *inode = file_inode(filp);
5072 struct trace_array *tr = inode->i_private;
5076 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5080 /* must have at least 1 entry */
5084 /* value is in KB */
5086 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5096 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5097 size_t cnt, loff_t *ppos)
5099 struct trace_array *tr = filp->private_data;
5102 unsigned long size = 0, expanded_size = 0;
5104 mutex_lock(&trace_types_lock);
5105 for_each_tracing_cpu(cpu) {
5106 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5107 if (!ring_buffer_expanded)
5108 expanded_size += trace_buf_size >> 10;
5110 if (ring_buffer_expanded)
5111 r = sprintf(buf, "%lu\n", size);
5113 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5114 mutex_unlock(&trace_types_lock);
5116 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5120 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5121 size_t cnt, loff_t *ppos)
5124 * There is no need to read what the user has written, this function
5125 * is just to make sure that there is no error when "echo" is used
5134 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5136 struct trace_array *tr = inode->i_private;
5138 /* disable tracing ? */
5139 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5140 tracer_tracing_off(tr);
5141 /* resize the ring buffer to 0 */
5142 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5144 trace_array_put(tr);
5150 tracing_mark_write(struct file *filp, const char __user *ubuf,
5151 size_t cnt, loff_t *fpos)
5153 unsigned long addr = (unsigned long)ubuf;
5154 struct trace_array *tr = filp->private_data;
5155 struct ring_buffer_event *event;
5156 struct ring_buffer *buffer;
5157 struct print_entry *entry;
5158 unsigned long irq_flags;
5159 struct page *pages[2];
5169 if (tracing_disabled)
5172 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5175 if (cnt > TRACE_BUF_SIZE)
5176 cnt = TRACE_BUF_SIZE;
5179 * Userspace is injecting traces into the kernel trace buffer.
5180 * We want to be as non intrusive as possible.
5181 * To do so, we do not want to allocate any special buffers
5182 * or take any locks, but instead write the userspace data
5183 * straight into the ring buffer.
5185 * First we need to pin the userspace buffer into memory,
5186 * which, most likely it is, because it just referenced it.
5187 * But there's no guarantee that it is. By using get_user_pages_fast()
5188 * and kmap_atomic/kunmap_atomic() we can get access to the
5189 * pages directly. We then write the data directly into the
5192 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5194 /* check if we cross pages */
5195 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5198 offset = addr & (PAGE_SIZE - 1);
5201 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5202 if (ret < nr_pages) {
5204 put_page(pages[ret]);
5209 for (i = 0; i < nr_pages; i++)
5210 map_page[i] = kmap_atomic(pages[i]);
5212 local_save_flags(irq_flags);
5213 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5214 buffer = tr->trace_buffer.buffer;
5215 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5216 irq_flags, preempt_count());
5218 /* Ring buffer disabled, return as if not open for write */
5223 entry = ring_buffer_event_data(event);
5224 entry->ip = _THIS_IP_;
5226 if (nr_pages == 2) {
5227 len = PAGE_SIZE - offset;
5228 memcpy(&entry->buf, map_page[0] + offset, len);
5229 memcpy(&entry->buf[len], map_page[1], cnt - len);
5231 memcpy(&entry->buf, map_page[0] + offset, cnt);
5233 if (entry->buf[cnt - 1] != '\n') {
5234 entry->buf[cnt] = '\n';
5235 entry->buf[cnt + 1] = '\0';
5237 entry->buf[cnt] = '\0';
5239 __buffer_unlock_commit(buffer, event);
5246 for (i = nr_pages - 1; i >= 0; i--) {
5247 kunmap_atomic(map_page[i]);
5254 static int tracing_clock_show(struct seq_file *m, void *v)
5256 struct trace_array *tr = m->private;
5259 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5261 "%s%s%s%s", i ? " " : "",
5262 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5263 i == tr->clock_id ? "]" : "");
5269 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5273 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5274 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5277 if (i == ARRAY_SIZE(trace_clocks))
5280 mutex_lock(&trace_types_lock);
5284 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5287 * New clock may not be consistent with the previous clock.
5288 * Reset the buffer so that it doesn't have incomparable timestamps.
5290 tracing_reset_online_cpus(&tr->trace_buffer);
5292 #ifdef CONFIG_TRACER_MAX_TRACE
5293 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5294 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5295 tracing_reset_online_cpus(&tr->max_buffer);
5298 mutex_unlock(&trace_types_lock);
5303 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5304 size_t cnt, loff_t *fpos)
5306 struct seq_file *m = filp->private_data;
5307 struct trace_array *tr = m->private;
5309 const char *clockstr;
5312 if (cnt >= sizeof(buf))
5315 if (copy_from_user(buf, ubuf, cnt))
5320 clockstr = strstrip(buf);
5322 ret = tracing_set_clock(tr, clockstr);
5331 static int tracing_clock_open(struct inode *inode, struct file *file)
5333 struct trace_array *tr = inode->i_private;
5336 if (tracing_disabled)
5339 if (trace_array_get(tr))
5342 ret = single_open(file, tracing_clock_show, inode->i_private);
5344 trace_array_put(tr);
5349 struct ftrace_buffer_info {
5350 struct trace_iterator iter;
5355 #ifdef CONFIG_TRACER_SNAPSHOT
5356 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5358 struct trace_array *tr = inode->i_private;
5359 struct trace_iterator *iter;
5363 if (trace_array_get(tr) < 0)
5366 if (file->f_mode & FMODE_READ) {
5367 iter = __tracing_open(inode, file, true);
5369 ret = PTR_ERR(iter);
5371 /* Writes still need the seq_file to hold the private data */
5373 m = kzalloc(sizeof(*m), GFP_KERNEL);
5376 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5384 iter->trace_buffer = &tr->max_buffer;
5385 iter->cpu_file = tracing_get_cpu(inode);
5387 file->private_data = m;
5391 trace_array_put(tr);
5397 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5400 struct seq_file *m = filp->private_data;
5401 struct trace_iterator *iter = m->private;
5402 struct trace_array *tr = iter->tr;
5406 ret = tracing_update_buffers();
5410 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5414 mutex_lock(&trace_types_lock);
5416 if (tr->current_trace->use_max_tr) {
5423 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5427 if (tr->allocated_snapshot)
5431 /* Only allow per-cpu swap if the ring buffer supports it */
5432 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5433 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5438 if (!tr->allocated_snapshot) {
5439 ret = alloc_snapshot(tr);
5443 local_irq_disable();
5444 /* Now, we're going to swap */
5445 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5446 update_max_tr(tr, current, smp_processor_id());
5448 update_max_tr_single(tr, current, iter->cpu_file);
5452 if (tr->allocated_snapshot) {
5453 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5454 tracing_reset_online_cpus(&tr->max_buffer);
5456 tracing_reset(&tr->max_buffer, iter->cpu_file);
5466 mutex_unlock(&trace_types_lock);
5470 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5472 struct seq_file *m = file->private_data;
5475 ret = tracing_release(inode, file);
5477 if (file->f_mode & FMODE_READ)
5480 /* If write only, the seq_file is just a stub */
5488 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5489 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5490 size_t count, loff_t *ppos);
5491 static int tracing_buffers_release(struct inode *inode, struct file *file);
5492 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5493 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5495 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5497 struct ftrace_buffer_info *info;
5500 ret = tracing_buffers_open(inode, filp);
5504 info = filp->private_data;
5506 if (info->iter.trace->use_max_tr) {
5507 tracing_buffers_release(inode, filp);
5511 info->iter.snapshot = true;
5512 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5517 #endif /* CONFIG_TRACER_SNAPSHOT */
5520 static const struct file_operations tracing_thresh_fops = {
5521 .open = tracing_open_generic,
5522 .read = tracing_thresh_read,
5523 .write = tracing_thresh_write,
5524 .llseek = generic_file_llseek,
5527 #ifdef CONFIG_TRACER_MAX_TRACE
5528 static const struct file_operations tracing_max_lat_fops = {
5529 .open = tracing_open_generic,
5530 .read = tracing_max_lat_read,
5531 .write = tracing_max_lat_write,
5532 .llseek = generic_file_llseek,
5536 static const struct file_operations set_tracer_fops = {
5537 .open = tracing_open_generic,
5538 .read = tracing_set_trace_read,
5539 .write = tracing_set_trace_write,
5540 .llseek = generic_file_llseek,
5543 static const struct file_operations tracing_pipe_fops = {
5544 .open = tracing_open_pipe,
5545 .poll = tracing_poll_pipe,
5546 .read = tracing_read_pipe,
5547 .splice_read = tracing_splice_read_pipe,
5548 .release = tracing_release_pipe,
5549 .llseek = no_llseek,
5552 static const struct file_operations tracing_entries_fops = {
5553 .open = tracing_open_generic_tr,
5554 .read = tracing_entries_read,
5555 .write = tracing_entries_write,
5556 .llseek = generic_file_llseek,
5557 .release = tracing_release_generic_tr,
5560 static const struct file_operations tracing_total_entries_fops = {
5561 .open = tracing_open_generic_tr,
5562 .read = tracing_total_entries_read,
5563 .llseek = generic_file_llseek,
5564 .release = tracing_release_generic_tr,
5567 static const struct file_operations tracing_free_buffer_fops = {
5568 .open = tracing_open_generic_tr,
5569 .write = tracing_free_buffer_write,
5570 .release = tracing_free_buffer_release,
5573 static const struct file_operations tracing_mark_fops = {
5574 .open = tracing_open_generic_tr,
5575 .write = tracing_mark_write,
5576 .llseek = generic_file_llseek,
5577 .release = tracing_release_generic_tr,
5580 static const struct file_operations trace_clock_fops = {
5581 .open = tracing_clock_open,
5583 .llseek = seq_lseek,
5584 .release = tracing_single_release_tr,
5585 .write = tracing_clock_write,
5588 #ifdef CONFIG_TRACER_SNAPSHOT
5589 static const struct file_operations snapshot_fops = {
5590 .open = tracing_snapshot_open,
5592 .write = tracing_snapshot_write,
5593 .llseek = tracing_lseek,
5594 .release = tracing_snapshot_release,
5597 static const struct file_operations snapshot_raw_fops = {
5598 .open = snapshot_raw_open,
5599 .read = tracing_buffers_read,
5600 .release = tracing_buffers_release,
5601 .splice_read = tracing_buffers_splice_read,
5602 .llseek = no_llseek,
5605 #endif /* CONFIG_TRACER_SNAPSHOT */
5607 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5609 struct trace_array *tr = inode->i_private;
5610 struct ftrace_buffer_info *info;
5613 if (tracing_disabled)
5616 if (trace_array_get(tr) < 0)
5619 info = kzalloc(sizeof(*info), GFP_KERNEL);
5621 trace_array_put(tr);
5625 mutex_lock(&trace_types_lock);
5628 info->iter.cpu_file = tracing_get_cpu(inode);
5629 info->iter.trace = tr->current_trace;
5630 info->iter.trace_buffer = &tr->trace_buffer;
5632 /* Force reading ring buffer for first read */
5633 info->read = (unsigned int)-1;
5635 filp->private_data = info;
5637 tr->current_trace->ref++;
5639 mutex_unlock(&trace_types_lock);
5641 ret = nonseekable_open(inode, filp);
5643 trace_array_put(tr);
5649 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5651 struct ftrace_buffer_info *info = filp->private_data;
5652 struct trace_iterator *iter = &info->iter;
5654 return trace_poll(iter, filp, poll_table);
5658 tracing_buffers_read(struct file *filp, char __user *ubuf,
5659 size_t count, loff_t *ppos)
5661 struct ftrace_buffer_info *info = filp->private_data;
5662 struct trace_iterator *iter = &info->iter;
5669 #ifdef CONFIG_TRACER_MAX_TRACE
5670 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5675 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5680 /* Do we have previous read data to read? */
5681 if (info->read < PAGE_SIZE)
5685 trace_access_lock(iter->cpu_file);
5686 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5690 trace_access_unlock(iter->cpu_file);
5693 if (trace_empty(iter)) {
5694 if ((filp->f_flags & O_NONBLOCK))
5697 ret = wait_on_pipe(iter, false);
5708 size = PAGE_SIZE - info->read;
5712 ret = copy_to_user(ubuf, info->spare + info->read, size);
5724 static int tracing_buffers_release(struct inode *inode, struct file *file)
5726 struct ftrace_buffer_info *info = file->private_data;
5727 struct trace_iterator *iter = &info->iter;
5729 mutex_lock(&trace_types_lock);
5731 iter->tr->current_trace->ref--;
5733 __trace_array_put(iter->tr);
5736 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5739 mutex_unlock(&trace_types_lock);
5745 struct ring_buffer *buffer;
5750 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5751 struct pipe_buffer *buf)
5753 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5758 ring_buffer_free_read_page(ref->buffer, ref->page);
5763 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5764 struct pipe_buffer *buf)
5766 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5771 /* Pipe buffer operations for a buffer. */
5772 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5774 .confirm = generic_pipe_buf_confirm,
5775 .release = buffer_pipe_buf_release,
5776 .steal = generic_pipe_buf_steal,
5777 .get = buffer_pipe_buf_get,
5781 * Callback from splice_to_pipe(), if we need to release some pages
5782 * at the end of the spd in case we error'ed out in filling the pipe.
5784 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5786 struct buffer_ref *ref =
5787 (struct buffer_ref *)spd->partial[i].private;
5792 ring_buffer_free_read_page(ref->buffer, ref->page);
5794 spd->partial[i].private = 0;
5798 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5799 struct pipe_inode_info *pipe, size_t len,
5802 struct ftrace_buffer_info *info = file->private_data;
5803 struct trace_iterator *iter = &info->iter;
5804 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5805 struct page *pages_def[PIPE_DEF_BUFFERS];
5806 struct splice_pipe_desc spd = {
5808 .partial = partial_def,
5809 .nr_pages_max = PIPE_DEF_BUFFERS,
5811 .ops = &buffer_pipe_buf_ops,
5812 .spd_release = buffer_spd_release,
5814 struct buffer_ref *ref;
5815 int entries, size, i;
5818 #ifdef CONFIG_TRACER_MAX_TRACE
5819 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5823 if (splice_grow_spd(pipe, &spd))
5826 if (*ppos & (PAGE_SIZE - 1))
5829 if (len & (PAGE_SIZE - 1)) {
5830 if (len < PAGE_SIZE)
5836 trace_access_lock(iter->cpu_file);
5837 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5839 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5843 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5850 ref->buffer = iter->trace_buffer->buffer;
5851 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5858 r = ring_buffer_read_page(ref->buffer, &ref->page,
5859 len, iter->cpu_file, 1);
5861 ring_buffer_free_read_page(ref->buffer, ref->page);
5867 * zero out any left over data, this is going to
5870 size = ring_buffer_page_len(ref->page);
5871 if (size < PAGE_SIZE)
5872 memset(ref->page + size, 0, PAGE_SIZE - size);
5874 page = virt_to_page(ref->page);
5876 spd.pages[i] = page;
5877 spd.partial[i].len = PAGE_SIZE;
5878 spd.partial[i].offset = 0;
5879 spd.partial[i].private = (unsigned long)ref;
5883 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5886 trace_access_unlock(iter->cpu_file);
5889 /* did we read anything? */
5890 if (!spd.nr_pages) {
5894 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5897 ret = wait_on_pipe(iter, true);
5904 ret = splice_to_pipe(pipe, &spd);
5905 splice_shrink_spd(&spd);
5910 static const struct file_operations tracing_buffers_fops = {
5911 .open = tracing_buffers_open,
5912 .read = tracing_buffers_read,
5913 .poll = tracing_buffers_poll,
5914 .release = tracing_buffers_release,
5915 .splice_read = tracing_buffers_splice_read,
5916 .llseek = no_llseek,
5920 tracing_stats_read(struct file *filp, char __user *ubuf,
5921 size_t count, loff_t *ppos)
5923 struct inode *inode = file_inode(filp);
5924 struct trace_array *tr = inode->i_private;
5925 struct trace_buffer *trace_buf = &tr->trace_buffer;
5926 int cpu = tracing_get_cpu(inode);
5927 struct trace_seq *s;
5929 unsigned long long t;
5930 unsigned long usec_rem;
5932 s = kmalloc(sizeof(*s), GFP_KERNEL);
5938 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5939 trace_seq_printf(s, "entries: %ld\n", cnt);
5941 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5942 trace_seq_printf(s, "overrun: %ld\n", cnt);
5944 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5945 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5947 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5948 trace_seq_printf(s, "bytes: %ld\n", cnt);
5950 if (trace_clocks[tr->clock_id].in_ns) {
5951 /* local or global for trace_clock */
5952 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5953 usec_rem = do_div(t, USEC_PER_SEC);
5954 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5957 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5958 usec_rem = do_div(t, USEC_PER_SEC);
5959 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5961 /* counter or tsc mode for trace_clock */
5962 trace_seq_printf(s, "oldest event ts: %llu\n",
5963 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5965 trace_seq_printf(s, "now ts: %llu\n",
5966 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5969 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5970 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5972 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5973 trace_seq_printf(s, "read events: %ld\n", cnt);
5975 count = simple_read_from_buffer(ubuf, count, ppos,
5976 s->buffer, trace_seq_used(s));
5983 static const struct file_operations tracing_stats_fops = {
5984 .open = tracing_open_generic_tr,
5985 .read = tracing_stats_read,
5986 .llseek = generic_file_llseek,
5987 .release = tracing_release_generic_tr,
5990 #ifdef CONFIG_DYNAMIC_FTRACE
5992 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5998 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5999 size_t cnt, loff_t *ppos)
6001 static char ftrace_dyn_info_buffer[1024];
6002 static DEFINE_MUTEX(dyn_info_mutex);
6003 unsigned long *p = filp->private_data;
6004 char *buf = ftrace_dyn_info_buffer;
6005 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
6008 mutex_lock(&dyn_info_mutex);
6009 r = sprintf(buf, "%ld ", *p);
6011 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
6014 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6016 mutex_unlock(&dyn_info_mutex);
6021 static const struct file_operations tracing_dyn_info_fops = {
6022 .open = tracing_open_generic,
6023 .read = tracing_read_dyn_info,
6024 .llseek = generic_file_llseek,
6026 #endif /* CONFIG_DYNAMIC_FTRACE */
6028 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6030 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6036 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6038 unsigned long *count = (long *)data;
6050 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6051 struct ftrace_probe_ops *ops, void *data)
6053 long count = (long)data;
6055 seq_printf(m, "%ps:", (void *)ip);
6057 seq_puts(m, "snapshot");
6060 seq_puts(m, ":unlimited\n");
6062 seq_printf(m, ":count=%ld\n", count);
6067 static struct ftrace_probe_ops snapshot_probe_ops = {
6068 .func = ftrace_snapshot,
6069 .print = ftrace_snapshot_print,
6072 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6073 .func = ftrace_count_snapshot,
6074 .print = ftrace_snapshot_print,
6078 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6079 char *glob, char *cmd, char *param, int enable)
6081 struct ftrace_probe_ops *ops;
6082 void *count = (void *)-1;
6086 /* hash funcs only work with set_ftrace_filter */
6090 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6092 if (glob[0] == '!') {
6093 unregister_ftrace_function_probe_func(glob+1, ops);
6100 number = strsep(¶m, ":");
6102 if (!strlen(number))
6106 * We use the callback data field (which is a pointer)
6109 ret = kstrtoul(number, 0, (unsigned long *)&count);
6114 ret = register_ftrace_function_probe(glob, ops, count);
6117 alloc_snapshot(&global_trace);
6119 return ret < 0 ? ret : 0;
6122 static struct ftrace_func_command ftrace_snapshot_cmd = {
6124 .func = ftrace_trace_snapshot_callback,
6127 static __init int register_snapshot_cmd(void)
6129 return register_ftrace_command(&ftrace_snapshot_cmd);
6132 static inline __init int register_snapshot_cmd(void) { return 0; }
6133 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6135 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6137 if (WARN_ON(!tr->dir))
6138 return ERR_PTR(-ENODEV);
6140 /* Top directory uses NULL as the parent */
6141 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6144 /* All sub buffers have a descriptor */
6148 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6150 struct dentry *d_tracer;
6153 return tr->percpu_dir;
6155 d_tracer = tracing_get_dentry(tr);
6156 if (IS_ERR(d_tracer))
6159 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6161 WARN_ONCE(!tr->percpu_dir,
6162 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6164 return tr->percpu_dir;
6167 static struct dentry *
6168 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6169 void *data, long cpu, const struct file_operations *fops)
6171 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6173 if (ret) /* See tracing_get_cpu() */
6174 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6179 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6181 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6182 struct dentry *d_cpu;
6183 char cpu_dir[30]; /* 30 characters should be more than enough */
6188 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6189 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6191 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6195 /* per cpu trace_pipe */
6196 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6197 tr, cpu, &tracing_pipe_fops);
6200 trace_create_cpu_file("trace", 0644, d_cpu,
6201 tr, cpu, &tracing_fops);
6203 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6204 tr, cpu, &tracing_buffers_fops);
6206 trace_create_cpu_file("stats", 0444, d_cpu,
6207 tr, cpu, &tracing_stats_fops);
6209 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6210 tr, cpu, &tracing_entries_fops);
6212 #ifdef CONFIG_TRACER_SNAPSHOT
6213 trace_create_cpu_file("snapshot", 0644, d_cpu,
6214 tr, cpu, &snapshot_fops);
6216 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6217 tr, cpu, &snapshot_raw_fops);
6221 #ifdef CONFIG_FTRACE_SELFTEST
6222 /* Let selftest have access to static functions in this file */
6223 #include "trace_selftest.c"
6227 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6230 struct trace_option_dentry *topt = filp->private_data;
6233 if (topt->flags->val & topt->opt->bit)
6238 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6242 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6245 struct trace_option_dentry *topt = filp->private_data;
6249 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6253 if (val != 0 && val != 1)
6256 if (!!(topt->flags->val & topt->opt->bit) != val) {
6257 mutex_lock(&trace_types_lock);
6258 ret = __set_tracer_option(topt->tr, topt->flags,
6260 mutex_unlock(&trace_types_lock);
6271 static const struct file_operations trace_options_fops = {
6272 .open = tracing_open_generic,
6273 .read = trace_options_read,
6274 .write = trace_options_write,
6275 .llseek = generic_file_llseek,
6279 * In order to pass in both the trace_array descriptor as well as the index
6280 * to the flag that the trace option file represents, the trace_array
6281 * has a character array of trace_flags_index[], which holds the index
6282 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6283 * The address of this character array is passed to the flag option file
6284 * read/write callbacks.
6286 * In order to extract both the index and the trace_array descriptor,
6287 * get_tr_index() uses the following algorithm.
6291 * As the pointer itself contains the address of the index (remember
6294 * Then to get the trace_array descriptor, by subtracting that index
6295 * from the ptr, we get to the start of the index itself.
6297 * ptr - idx == &index[0]
6299 * Then a simple container_of() from that pointer gets us to the
6300 * trace_array descriptor.
6302 static void get_tr_index(void *data, struct trace_array **ptr,
6303 unsigned int *pindex)
6305 *pindex = *(unsigned char *)data;
6307 *ptr = container_of(data - *pindex, struct trace_array,
6312 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6315 void *tr_index = filp->private_data;
6316 struct trace_array *tr;
6320 get_tr_index(tr_index, &tr, &index);
6322 if (tr->trace_flags & (1 << index))
6327 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6331 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6334 void *tr_index = filp->private_data;
6335 struct trace_array *tr;
6340 get_tr_index(tr_index, &tr, &index);
6342 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6346 if (val != 0 && val != 1)
6349 mutex_lock(&trace_types_lock);
6350 ret = set_tracer_flag(tr, 1 << index, val);
6351 mutex_unlock(&trace_types_lock);
6361 static const struct file_operations trace_options_core_fops = {
6362 .open = tracing_open_generic,
6363 .read = trace_options_core_read,
6364 .write = trace_options_core_write,
6365 .llseek = generic_file_llseek,
6368 struct dentry *trace_create_file(const char *name,
6370 struct dentry *parent,
6372 const struct file_operations *fops)
6376 ret = tracefs_create_file(name, mode, parent, data, fops);
6378 pr_warn("Could not create tracefs '%s' entry\n", name);
6384 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6386 struct dentry *d_tracer;
6391 d_tracer = tracing_get_dentry(tr);
6392 if (IS_ERR(d_tracer))
6395 tr->options = tracefs_create_dir("options", d_tracer);
6397 pr_warn("Could not create tracefs directory 'options'\n");
6405 create_trace_option_file(struct trace_array *tr,
6406 struct trace_option_dentry *topt,
6407 struct tracer_flags *flags,
6408 struct tracer_opt *opt)
6410 struct dentry *t_options;
6412 t_options = trace_options_init_dentry(tr);
6416 topt->flags = flags;
6420 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6421 &trace_options_fops);
6426 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6428 struct trace_option_dentry *topts;
6429 struct trace_options *tr_topts;
6430 struct tracer_flags *flags;
6431 struct tracer_opt *opts;
6438 flags = tracer->flags;
6440 if (!flags || !flags->opts)
6444 * If this is an instance, only create flags for tracers
6445 * the instance may have.
6447 if (!trace_ok_for_array(tracer, tr))
6450 for (i = 0; i < tr->nr_topts; i++) {
6451 /* Make sure there's no duplicate flags. */
6452 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6458 for (cnt = 0; opts[cnt].name; cnt++)
6461 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6465 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6472 tr->topts = tr_topts;
6473 tr->topts[tr->nr_topts].tracer = tracer;
6474 tr->topts[tr->nr_topts].topts = topts;
6477 for (cnt = 0; opts[cnt].name; cnt++) {
6478 create_trace_option_file(tr, &topts[cnt], flags,
6480 WARN_ONCE(topts[cnt].entry == NULL,
6481 "Failed to create trace option: %s",
6486 static struct dentry *
6487 create_trace_option_core_file(struct trace_array *tr,
6488 const char *option, long index)
6490 struct dentry *t_options;
6492 t_options = trace_options_init_dentry(tr);
6496 return trace_create_file(option, 0644, t_options,
6497 (void *)&tr->trace_flags_index[index],
6498 &trace_options_core_fops);
6501 static void create_trace_options_dir(struct trace_array *tr)
6503 struct dentry *t_options;
6504 bool top_level = tr == &global_trace;
6507 t_options = trace_options_init_dentry(tr);
6511 for (i = 0; trace_options[i]; i++) {
6513 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6514 create_trace_option_core_file(tr, trace_options[i], i);
6519 rb_simple_read(struct file *filp, char __user *ubuf,
6520 size_t cnt, loff_t *ppos)
6522 struct trace_array *tr = filp->private_data;
6526 r = tracer_tracing_is_on(tr);
6527 r = sprintf(buf, "%d\n", r);
6529 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6533 rb_simple_write(struct file *filp, const char __user *ubuf,
6534 size_t cnt, loff_t *ppos)
6536 struct trace_array *tr = filp->private_data;
6537 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6541 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6546 mutex_lock(&trace_types_lock);
6548 tracer_tracing_on(tr);
6549 if (tr->current_trace->start)
6550 tr->current_trace->start(tr);
6552 tracer_tracing_off(tr);
6553 if (tr->current_trace->stop)
6554 tr->current_trace->stop(tr);
6556 mutex_unlock(&trace_types_lock);
6564 static const struct file_operations rb_simple_fops = {
6565 .open = tracing_open_generic_tr,
6566 .read = rb_simple_read,
6567 .write = rb_simple_write,
6568 .release = tracing_release_generic_tr,
6569 .llseek = default_llseek,
6572 struct dentry *trace_instance_dir;
6575 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6578 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6580 enum ring_buffer_flags rb_flags;
6582 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6586 buf->buffer = ring_buffer_alloc(size, rb_flags);
6590 buf->data = alloc_percpu(struct trace_array_cpu);
6592 ring_buffer_free(buf->buffer);
6596 /* Allocate the first page for all buffers */
6597 set_buffer_entries(&tr->trace_buffer,
6598 ring_buffer_size(tr->trace_buffer.buffer, 0));
6603 static int allocate_trace_buffers(struct trace_array *tr, int size)
6607 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6611 #ifdef CONFIG_TRACER_MAX_TRACE
6612 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6613 allocate_snapshot ? size : 1);
6615 ring_buffer_free(tr->trace_buffer.buffer);
6616 free_percpu(tr->trace_buffer.data);
6619 tr->allocated_snapshot = allocate_snapshot;
6622 * Only the top level trace array gets its snapshot allocated
6623 * from the kernel command line.
6625 allocate_snapshot = false;
6630 static void free_trace_buffer(struct trace_buffer *buf)
6633 ring_buffer_free(buf->buffer);
6635 free_percpu(buf->data);
6640 static void free_trace_buffers(struct trace_array *tr)
6645 free_trace_buffer(&tr->trace_buffer);
6647 #ifdef CONFIG_TRACER_MAX_TRACE
6648 free_trace_buffer(&tr->max_buffer);
6652 static void init_trace_flags_index(struct trace_array *tr)
6656 /* Used by the trace options files */
6657 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6658 tr->trace_flags_index[i] = i;
6661 static void __update_tracer_options(struct trace_array *tr)
6665 for (t = trace_types; t; t = t->next)
6666 add_tracer_options(tr, t);
6669 static void update_tracer_options(struct trace_array *tr)
6671 mutex_lock(&trace_types_lock);
6672 __update_tracer_options(tr);
6673 mutex_unlock(&trace_types_lock);
6676 static int instance_mkdir(const char *name)
6678 struct trace_array *tr;
6681 mutex_lock(&trace_types_lock);
6684 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6685 if (tr->name && strcmp(tr->name, name) == 0)
6690 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6694 tr->name = kstrdup(name, GFP_KERNEL);
6698 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6701 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
6703 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6705 raw_spin_lock_init(&tr->start_lock);
6707 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6709 tr->current_trace = &nop_trace;
6711 INIT_LIST_HEAD(&tr->systems);
6712 INIT_LIST_HEAD(&tr->events);
6714 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6717 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6721 ret = event_trace_add_tracer(tr->dir, tr);
6723 tracefs_remove_recursive(tr->dir);
6727 init_tracer_tracefs(tr, tr->dir);
6728 init_trace_flags_index(tr);
6729 __update_tracer_options(tr);
6731 list_add(&tr->list, &ftrace_trace_arrays);
6733 mutex_unlock(&trace_types_lock);
6738 free_trace_buffers(tr);
6739 free_cpumask_var(tr->tracing_cpumask);
6744 mutex_unlock(&trace_types_lock);
6750 static int instance_rmdir(const char *name)
6752 struct trace_array *tr;
6757 mutex_lock(&trace_types_lock);
6760 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6761 if (tr->name && strcmp(tr->name, name) == 0) {
6770 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6773 list_del(&tr->list);
6775 /* Disable all the flags that were enabled coming in */
6776 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
6777 if ((1 << i) & ZEROED_TRACE_FLAGS)
6778 set_tracer_flag(tr, 1 << i, 0);
6781 tracing_set_nop(tr);
6782 event_trace_del_tracer(tr);
6783 ftrace_destroy_function_files(tr);
6784 tracefs_remove_recursive(tr->dir);
6785 free_trace_buffers(tr);
6787 for (i = 0; i < tr->nr_topts; i++) {
6788 kfree(tr->topts[i].topts);
6798 mutex_unlock(&trace_types_lock);
6803 static __init void create_trace_instances(struct dentry *d_tracer)
6805 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6808 if (WARN_ON(!trace_instance_dir))
6813 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6817 trace_create_file("available_tracers", 0444, d_tracer,
6818 tr, &show_traces_fops);
6820 trace_create_file("current_tracer", 0644, d_tracer,
6821 tr, &set_tracer_fops);
6823 trace_create_file("tracing_cpumask", 0644, d_tracer,
6824 tr, &tracing_cpumask_fops);
6826 trace_create_file("trace_options", 0644, d_tracer,
6827 tr, &tracing_iter_fops);
6829 trace_create_file("trace", 0644, d_tracer,
6832 trace_create_file("trace_pipe", 0444, d_tracer,
6833 tr, &tracing_pipe_fops);
6835 trace_create_file("buffer_size_kb", 0644, d_tracer,
6836 tr, &tracing_entries_fops);
6838 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6839 tr, &tracing_total_entries_fops);
6841 trace_create_file("free_buffer", 0200, d_tracer,
6842 tr, &tracing_free_buffer_fops);
6844 trace_create_file("trace_marker", 0220, d_tracer,
6845 tr, &tracing_mark_fops);
6847 trace_create_file("trace_clock", 0644, d_tracer, tr,
6850 trace_create_file("tracing_on", 0644, d_tracer,
6851 tr, &rb_simple_fops);
6853 create_trace_options_dir(tr);
6855 #ifdef CONFIG_TRACER_MAX_TRACE
6856 trace_create_file("tracing_max_latency", 0644, d_tracer,
6857 &tr->max_latency, &tracing_max_lat_fops);
6860 if (ftrace_create_function_files(tr, d_tracer))
6861 WARN(1, "Could not allocate function filter files");
6863 #ifdef CONFIG_TRACER_SNAPSHOT
6864 trace_create_file("snapshot", 0644, d_tracer,
6865 tr, &snapshot_fops);
6868 for_each_tracing_cpu(cpu)
6869 tracing_init_tracefs_percpu(tr, cpu);
6873 static struct vfsmount *trace_automount(void *ingore)
6875 struct vfsmount *mnt;
6876 struct file_system_type *type;
6879 * To maintain backward compatibility for tools that mount
6880 * debugfs to get to the tracing facility, tracefs is automatically
6881 * mounted to the debugfs/tracing directory.
6883 type = get_fs_type("tracefs");
6886 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6887 put_filesystem(type);
6896 * tracing_init_dentry - initialize top level trace array
6898 * This is called when creating files or directories in the tracing
6899 * directory. It is called via fs_initcall() by any of the boot up code
6900 * and expects to return the dentry of the top level tracing directory.
6902 struct dentry *tracing_init_dentry(void)
6904 struct trace_array *tr = &global_trace;
6906 /* The top level trace array uses NULL as parent */
6910 if (WARN_ON(!tracefs_initialized()) ||
6911 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6912 WARN_ON(!debugfs_initialized())))
6913 return ERR_PTR(-ENODEV);
6916 * As there may still be users that expect the tracing
6917 * files to exist in debugfs/tracing, we must automount
6918 * the tracefs file system there, so older tools still
6919 * work with the newer kerenl.
6921 tr->dir = debugfs_create_automount("tracing", NULL,
6922 trace_automount, NULL);
6924 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6925 return ERR_PTR(-ENOMEM);
6931 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6932 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6934 static void __init trace_enum_init(void)
6938 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6939 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6942 #ifdef CONFIG_MODULES
6943 static void trace_module_add_enums(struct module *mod)
6945 if (!mod->num_trace_enums)
6949 * Modules with bad taint do not have events created, do
6950 * not bother with enums either.
6952 if (trace_module_has_bad_taint(mod))
6955 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6958 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6959 static void trace_module_remove_enums(struct module *mod)
6961 union trace_enum_map_item *map;
6962 union trace_enum_map_item **last = &trace_enum_maps;
6964 if (!mod->num_trace_enums)
6967 mutex_lock(&trace_enum_mutex);
6969 map = trace_enum_maps;
6972 if (map->head.mod == mod)
6974 map = trace_enum_jmp_to_tail(map);
6975 last = &map->tail.next;
6976 map = map->tail.next;
6981 *last = trace_enum_jmp_to_tail(map)->tail.next;
6984 mutex_unlock(&trace_enum_mutex);
6987 static inline void trace_module_remove_enums(struct module *mod) { }
6988 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6990 static int trace_module_notify(struct notifier_block *self,
6991 unsigned long val, void *data)
6993 struct module *mod = data;
6996 case MODULE_STATE_COMING:
6997 trace_module_add_enums(mod);
6999 case MODULE_STATE_GOING:
7000 trace_module_remove_enums(mod);
7007 static struct notifier_block trace_module_nb = {
7008 .notifier_call = trace_module_notify,
7011 #endif /* CONFIG_MODULES */
7013 static __init int tracer_init_tracefs(void)
7015 struct dentry *d_tracer;
7017 trace_access_lock_init();
7019 d_tracer = tracing_init_dentry();
7020 if (IS_ERR(d_tracer))
7023 init_tracer_tracefs(&global_trace, d_tracer);
7025 trace_create_file("tracing_thresh", 0644, d_tracer,
7026 &global_trace, &tracing_thresh_fops);
7028 trace_create_file("README", 0444, d_tracer,
7029 NULL, &tracing_readme_fops);
7031 trace_create_file("saved_cmdlines", 0444, d_tracer,
7032 NULL, &tracing_saved_cmdlines_fops);
7034 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7035 NULL, &tracing_saved_cmdlines_size_fops);
7039 trace_create_enum_file(d_tracer);
7041 #ifdef CONFIG_MODULES
7042 register_module_notifier(&trace_module_nb);
7045 #ifdef CONFIG_DYNAMIC_FTRACE
7046 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7047 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7050 create_trace_instances(d_tracer);
7052 update_tracer_options(&global_trace);
7057 static int trace_panic_handler(struct notifier_block *this,
7058 unsigned long event, void *unused)
7060 if (ftrace_dump_on_oops)
7061 ftrace_dump(ftrace_dump_on_oops);
7065 static struct notifier_block trace_panic_notifier = {
7066 .notifier_call = trace_panic_handler,
7068 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7071 static int trace_die_handler(struct notifier_block *self,
7077 if (ftrace_dump_on_oops)
7078 ftrace_dump(ftrace_dump_on_oops);
7086 static struct notifier_block trace_die_notifier = {
7087 .notifier_call = trace_die_handler,
7092 * printk is set to max of 1024, we really don't need it that big.
7093 * Nothing should be printing 1000 characters anyway.
7095 #define TRACE_MAX_PRINT 1000
7098 * Define here KERN_TRACE so that we have one place to modify
7099 * it if we decide to change what log level the ftrace dump
7102 #define KERN_TRACE KERN_EMERG
7105 trace_printk_seq(struct trace_seq *s)
7107 /* Probably should print a warning here. */
7108 if (s->seq.len >= TRACE_MAX_PRINT)
7109 s->seq.len = TRACE_MAX_PRINT;
7112 * More paranoid code. Although the buffer size is set to
7113 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7114 * an extra layer of protection.
7116 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7117 s->seq.len = s->seq.size - 1;
7119 /* should be zero ended, but we are paranoid. */
7120 s->buffer[s->seq.len] = 0;
7122 printk(KERN_TRACE "%s", s->buffer);
7127 void trace_init_global_iter(struct trace_iterator *iter)
7129 iter->tr = &global_trace;
7130 iter->trace = iter->tr->current_trace;
7131 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7132 iter->trace_buffer = &global_trace.trace_buffer;
7134 if (iter->trace && iter->trace->open)
7135 iter->trace->open(iter);
7137 /* Annotate start of buffers if we had overruns */
7138 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7139 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7141 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7142 if (trace_clocks[iter->tr->clock_id].in_ns)
7143 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7146 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7148 /* use static because iter can be a bit big for the stack */
7149 static struct trace_iterator iter;
7150 static atomic_t dump_running;
7151 struct trace_array *tr = &global_trace;
7152 unsigned int old_userobj;
7153 unsigned long flags;
7156 /* Only allow one dump user at a time. */
7157 if (atomic_inc_return(&dump_running) != 1) {
7158 atomic_dec(&dump_running);
7163 * Always turn off tracing when we dump.
7164 * We don't need to show trace output of what happens
7165 * between multiple crashes.
7167 * If the user does a sysrq-z, then they can re-enable
7168 * tracing with echo 1 > tracing_on.
7172 local_irq_save(flags);
7174 /* Simulate the iterator */
7175 trace_init_global_iter(&iter);
7177 for_each_tracing_cpu(cpu) {
7178 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7181 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7183 /* don't look at user memory in panic mode */
7184 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7186 switch (oops_dump_mode) {
7188 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7191 iter.cpu_file = raw_smp_processor_id();
7196 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7197 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7200 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7202 /* Did function tracer already get disabled? */
7203 if (ftrace_is_dead()) {
7204 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7205 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7209 * We need to stop all tracing on all CPUS to read the
7210 * the next buffer. This is a bit expensive, but is
7211 * not done often. We fill all what we can read,
7212 * and then release the locks again.
7215 while (!trace_empty(&iter)) {
7218 printk(KERN_TRACE "---------------------------------\n");
7222 /* reset all but tr, trace, and overruns */
7223 memset(&iter.seq, 0,
7224 sizeof(struct trace_iterator) -
7225 offsetof(struct trace_iterator, seq));
7226 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7229 if (trace_find_next_entry_inc(&iter) != NULL) {
7232 ret = print_trace_line(&iter);
7233 if (ret != TRACE_TYPE_NO_CONSUME)
7234 trace_consume(&iter);
7236 touch_nmi_watchdog();
7238 trace_printk_seq(&iter.seq);
7242 printk(KERN_TRACE " (ftrace buffer empty)\n");
7244 printk(KERN_TRACE "---------------------------------\n");
7247 tr->trace_flags |= old_userobj;
7249 for_each_tracing_cpu(cpu) {
7250 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7252 atomic_dec(&dump_running);
7253 local_irq_restore(flags);
7255 EXPORT_SYMBOL_GPL(ftrace_dump);
7257 __init static int tracer_alloc_buffers(void)
7263 * Make sure we don't accidently add more trace options
7264 * than we have bits for.
7266 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7268 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7271 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7272 goto out_free_buffer_mask;
7274 /* Only allocate trace_printk buffers if a trace_printk exists */
7275 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7276 /* Must be called before global_trace.buffer is allocated */
7277 trace_printk_init_buffers();
7279 /* To save memory, keep the ring buffer size to its minimum */
7280 if (ring_buffer_expanded)
7281 ring_buf_size = trace_buf_size;
7285 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7286 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7288 raw_spin_lock_init(&global_trace.start_lock);
7290 /* Used for event triggers */
7291 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7293 goto out_free_cpumask;
7295 if (trace_create_savedcmd() < 0)
7296 goto out_free_temp_buffer;
7298 /* TODO: make the number of buffers hot pluggable with CPUS */
7299 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7300 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7302 goto out_free_savedcmd;
7305 if (global_trace.buffer_disabled)
7308 if (trace_boot_clock) {
7309 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7311 pr_warn("Trace clock %s not defined, going back to default\n",
7316 * register_tracer() might reference current_trace, so it
7317 * needs to be set before we register anything. This is
7318 * just a bootstrap of current_trace anyway.
7320 global_trace.current_trace = &nop_trace;
7322 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7324 ftrace_init_global_array_ops(&global_trace);
7326 init_trace_flags_index(&global_trace);
7328 register_tracer(&nop_trace);
7330 /* All seems OK, enable tracing */
7331 tracing_disabled = 0;
7333 atomic_notifier_chain_register(&panic_notifier_list,
7334 &trace_panic_notifier);
7336 register_die_notifier(&trace_die_notifier);
7338 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7340 INIT_LIST_HEAD(&global_trace.systems);
7341 INIT_LIST_HEAD(&global_trace.events);
7342 list_add(&global_trace.list, &ftrace_trace_arrays);
7344 apply_trace_boot_options();
7346 register_snapshot_cmd();
7351 free_saved_cmdlines_buffer(savedcmd);
7352 out_free_temp_buffer:
7353 ring_buffer_free(temp_buffer);
7355 free_cpumask_var(global_trace.tracing_cpumask);
7356 out_free_buffer_mask:
7357 free_cpumask_var(tracing_buffer_mask);
7362 void __init trace_init(void)
7364 if (tracepoint_printk) {
7365 tracepoint_print_iter =
7366 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7367 if (WARN_ON(!tracepoint_print_iter))
7368 tracepoint_printk = 0;
7370 tracer_alloc_buffers();
7374 __init static int clear_boot_tracer(void)
7377 * The default tracer at boot buffer is an init section.
7378 * This function is called in lateinit. If we did not
7379 * find the boot tracer, then clear it out, to prevent
7380 * later registration from accessing the buffer that is
7381 * about to be freed.
7383 if (!default_bootup_tracer)
7386 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7387 default_bootup_tracer);
7388 default_bootup_tracer = NULL;
7393 fs_initcall(tracer_init_tracefs);
7394 late_initcall(clear_boot_tracer);