tracing: Put back comma for empty fields in boot string parsing
[cascardo/linux.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
42 #include <linux/fs.h>
43 #include <linux/sched/rt.h>
44
45 #include "trace.h"
46 #include "trace_output.h"
47
48 /*
49  * On boot up, the ring buffer is set to the minimum size, so that
50  * we do not waste memory on systems that are not using tracing.
51  */
52 bool ring_buffer_expanded;
53
54 /*
55  * We need to change this state when a selftest is running.
56  * A selftest will lurk into the ring-buffer to count the
57  * entries inserted during the selftest although some concurrent
58  * insertions into the ring-buffer such as trace_printk could occurred
59  * at the same time, giving false positive or negative results.
60  */
61 static bool __read_mostly tracing_selftest_running;
62
63 /*
64  * If a tracer is running, we do not want to run SELFTEST.
65  */
66 bool __read_mostly tracing_selftest_disabled;
67
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
71
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
74         { }
75 };
76
77 static struct tracer_flags dummy_tracer_flags = {
78         .val = 0,
79         .opts = dummy_tracer_opt
80 };
81
82 static int
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84 {
85         return 0;
86 }
87
88 /*
89  * To prevent the comm cache from being overwritten when no
90  * tracing is active, only save the comm when a trace event
91  * occurred.
92  */
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95 /*
96  * Kill all tracing for good (never come back).
97  * It is initialized to 1 but will turn to zero if the initialization
98  * of the tracer is successful. But that is the only place that sets
99  * this back to zero.
100  */
101 static int tracing_disabled = 1;
102
103 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
104
105 cpumask_var_t __read_mostly     tracing_buffer_mask;
106
107 /*
108  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109  *
110  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111  * is set, then ftrace_dump is called. This will output the contents
112  * of the ftrace buffers to the console.  This is very useful for
113  * capturing traces that lead to crashes and outputing it to a
114  * serial console.
115  *
116  * It is default off, but you can enable it with either specifying
117  * "ftrace_dump_on_oops" in the kernel command line, or setting
118  * /proc/sys/kernel/ftrace_dump_on_oops
119  * Set 1 if you want to dump buffers of all CPUs
120  * Set 2 if you want to dump the buffer of the CPU that triggered oops
121  */
122
123 enum ftrace_dump_mode ftrace_dump_on_oops;
124
125 /* When set, tracing will stop when a WARN*() is hit */
126 int __disable_trace_on_warning;
127
128 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
129 /* Map of enums to their values, for "enum_map" file */
130 struct trace_enum_map_head {
131         struct module                   *mod;
132         unsigned long                   length;
133 };
134
135 union trace_enum_map_item;
136
137 struct trace_enum_map_tail {
138         /*
139          * "end" is first and points to NULL as it must be different
140          * than "mod" or "enum_string"
141          */
142         union trace_enum_map_item       *next;
143         const char                      *end;   /* points to NULL */
144 };
145
146 static DEFINE_MUTEX(trace_enum_mutex);
147
148 /*
149  * The trace_enum_maps are saved in an array with two extra elements,
150  * one at the beginning, and one at the end. The beginning item contains
151  * the count of the saved maps (head.length), and the module they
152  * belong to if not built in (head.mod). The ending item contains a
153  * pointer to the next array of saved enum_map items.
154  */
155 union trace_enum_map_item {
156         struct trace_enum_map           map;
157         struct trace_enum_map_head      head;
158         struct trace_enum_map_tail      tail;
159 };
160
161 static union trace_enum_map_item *trace_enum_maps;
162 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
164 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
165
166 #define MAX_TRACER_SIZE         100
167 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
168 static char *default_bootup_tracer;
169
170 static bool allocate_snapshot;
171
172 static int __init set_cmdline_ftrace(char *str)
173 {
174         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
175         default_bootup_tracer = bootup_tracer_buf;
176         /* We are using ftrace early, expand it */
177         ring_buffer_expanded = true;
178         return 1;
179 }
180 __setup("ftrace=", set_cmdline_ftrace);
181
182 static int __init set_ftrace_dump_on_oops(char *str)
183 {
184         if (*str++ != '=' || !*str) {
185                 ftrace_dump_on_oops = DUMP_ALL;
186                 return 1;
187         }
188
189         if (!strcmp("orig_cpu", str)) {
190                 ftrace_dump_on_oops = DUMP_ORIG;
191                 return 1;
192         }
193
194         return 0;
195 }
196 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
197
198 static int __init stop_trace_on_warning(char *str)
199 {
200         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201                 __disable_trace_on_warning = 1;
202         return 1;
203 }
204 __setup("traceoff_on_warning", stop_trace_on_warning);
205
206 static int __init boot_alloc_snapshot(char *str)
207 {
208         allocate_snapshot = true;
209         /* We also need the main ring buffer expanded */
210         ring_buffer_expanded = true;
211         return 1;
212 }
213 __setup("alloc_snapshot", boot_alloc_snapshot);
214
215
216 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217
218 static int __init set_trace_boot_options(char *str)
219 {
220         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
221         return 0;
222 }
223 __setup("trace_options=", set_trace_boot_options);
224
225 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
226 static char *trace_boot_clock __initdata;
227
228 static int __init set_trace_boot_clock(char *str)
229 {
230         strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
231         trace_boot_clock = trace_boot_clock_buf;
232         return 0;
233 }
234 __setup("trace_clock=", set_trace_boot_clock);
235
236 static int __init set_tracepoint_printk(char *str)
237 {
238         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239                 tracepoint_printk = 1;
240         return 1;
241 }
242 __setup("tp_printk", set_tracepoint_printk);
243
244 unsigned long long ns2usecs(cycle_t nsec)
245 {
246         nsec += 500;
247         do_div(nsec, 1000);
248         return nsec;
249 }
250
251 /* trace_flags holds trace_options default values */
252 #define TRACE_DEFAULT_FLAGS                                             \
253         (FUNCTION_DEFAULT_FLAGS |                                       \
254          TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |                  \
255          TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |                \
256          TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |                 \
257          TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258
259 /* trace_options that are only supported by global_trace */
260 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |                      \
261                TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262
263
264 /*
265  * The global_trace is the descriptor that holds the tracing
266  * buffers for the live tracing. For each CPU, it contains
267  * a link list of pages that will store trace entries. The
268  * page descriptor of the pages in the memory is used to hold
269  * the link list by linking the lru item in the page descriptor
270  * to each of the pages in the buffer per CPU.
271  *
272  * For each active CPU there is a data field that holds the
273  * pages for the buffer for that CPU. Each CPU has the same number
274  * of pages allocated for its buffer.
275  */
276 static struct trace_array global_trace = {
277         .trace_flags = TRACE_DEFAULT_FLAGS,
278 };
279
280 LIST_HEAD(ftrace_trace_arrays);
281
282 int trace_array_get(struct trace_array *this_tr)
283 {
284         struct trace_array *tr;
285         int ret = -ENODEV;
286
287         mutex_lock(&trace_types_lock);
288         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
289                 if (tr == this_tr) {
290                         tr->ref++;
291                         ret = 0;
292                         break;
293                 }
294         }
295         mutex_unlock(&trace_types_lock);
296
297         return ret;
298 }
299
300 static void __trace_array_put(struct trace_array *this_tr)
301 {
302         WARN_ON(!this_tr->ref);
303         this_tr->ref--;
304 }
305
306 void trace_array_put(struct trace_array *this_tr)
307 {
308         mutex_lock(&trace_types_lock);
309         __trace_array_put(this_tr);
310         mutex_unlock(&trace_types_lock);
311 }
312
313 int filter_check_discard(struct trace_event_file *file, void *rec,
314                          struct ring_buffer *buffer,
315                          struct ring_buffer_event *event)
316 {
317         if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
318             !filter_match_preds(file->filter, rec)) {
319                 ring_buffer_discard_commit(buffer, event);
320                 return 1;
321         }
322
323         return 0;
324 }
325 EXPORT_SYMBOL_GPL(filter_check_discard);
326
327 int call_filter_check_discard(struct trace_event_call *call, void *rec,
328                               struct ring_buffer *buffer,
329                               struct ring_buffer_event *event)
330 {
331         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
332             !filter_match_preds(call->filter, rec)) {
333                 ring_buffer_discard_commit(buffer, event);
334                 return 1;
335         }
336
337         return 0;
338 }
339 EXPORT_SYMBOL_GPL(call_filter_check_discard);
340
341 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
342 {
343         u64 ts;
344
345         /* Early boot up does not have a buffer yet */
346         if (!buf->buffer)
347                 return trace_clock_local();
348
349         ts = ring_buffer_time_stamp(buf->buffer, cpu);
350         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
351
352         return ts;
353 }
354
355 cycle_t ftrace_now(int cpu)
356 {
357         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
358 }
359
360 /**
361  * tracing_is_enabled - Show if global_trace has been disabled
362  *
363  * Shows if the global trace has been enabled or not. It uses the
364  * mirror flag "buffer_disabled" to be used in fast paths such as for
365  * the irqsoff tracer. But it may be inaccurate due to races. If you
366  * need to know the accurate state, use tracing_is_on() which is a little
367  * slower, but accurate.
368  */
369 int tracing_is_enabled(void)
370 {
371         /*
372          * For quick access (irqsoff uses this in fast path), just
373          * return the mirror variable of the state of the ring buffer.
374          * It's a little racy, but we don't really care.
375          */
376         smp_rmb();
377         return !global_trace.buffer_disabled;
378 }
379
380 /*
381  * trace_buf_size is the size in bytes that is allocated
382  * for a buffer. Note, the number of bytes is always rounded
383  * to page size.
384  *
385  * This number is purposely set to a low number of 16384.
386  * If the dump on oops happens, it will be much appreciated
387  * to not have to wait for all that output. Anyway this can be
388  * boot time and run time configurable.
389  */
390 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
391
392 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
393
394 /* trace_types holds a link list of available tracers. */
395 static struct tracer            *trace_types __read_mostly;
396
397 /*
398  * trace_types_lock is used to protect the trace_types list.
399  */
400 DEFINE_MUTEX(trace_types_lock);
401
402 /*
403  * serialize the access of the ring buffer
404  *
405  * ring buffer serializes readers, but it is low level protection.
406  * The validity of the events (which returns by ring_buffer_peek() ..etc)
407  * are not protected by ring buffer.
408  *
409  * The content of events may become garbage if we allow other process consumes
410  * these events concurrently:
411  *   A) the page of the consumed events may become a normal page
412  *      (not reader page) in ring buffer, and this page will be rewrited
413  *      by events producer.
414  *   B) The page of the consumed events may become a page for splice_read,
415  *      and this page will be returned to system.
416  *
417  * These primitives allow multi process access to different cpu ring buffer
418  * concurrently.
419  *
420  * These primitives don't distinguish read-only and read-consume access.
421  * Multi read-only access are also serialized.
422  */
423
424 #ifdef CONFIG_SMP
425 static DECLARE_RWSEM(all_cpu_access_lock);
426 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
427
428 static inline void trace_access_lock(int cpu)
429 {
430         if (cpu == RING_BUFFER_ALL_CPUS) {
431                 /* gain it for accessing the whole ring buffer. */
432                 down_write(&all_cpu_access_lock);
433         } else {
434                 /* gain it for accessing a cpu ring buffer. */
435
436                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
437                 down_read(&all_cpu_access_lock);
438
439                 /* Secondly block other access to this @cpu ring buffer. */
440                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
441         }
442 }
443
444 static inline void trace_access_unlock(int cpu)
445 {
446         if (cpu == RING_BUFFER_ALL_CPUS) {
447                 up_write(&all_cpu_access_lock);
448         } else {
449                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
450                 up_read(&all_cpu_access_lock);
451         }
452 }
453
454 static inline void trace_access_lock_init(void)
455 {
456         int cpu;
457
458         for_each_possible_cpu(cpu)
459                 mutex_init(&per_cpu(cpu_access_lock, cpu));
460 }
461
462 #else
463
464 static DEFINE_MUTEX(access_lock);
465
466 static inline void trace_access_lock(int cpu)
467 {
468         (void)cpu;
469         mutex_lock(&access_lock);
470 }
471
472 static inline void trace_access_unlock(int cpu)
473 {
474         (void)cpu;
475         mutex_unlock(&access_lock);
476 }
477
478 static inline void trace_access_lock_init(void)
479 {
480 }
481
482 #endif
483
484 #ifdef CONFIG_STACKTRACE
485 static void __ftrace_trace_stack(struct ring_buffer *buffer,
486                                  unsigned long flags,
487                                  int skip, int pc, struct pt_regs *regs);
488 static inline void ftrace_trace_stack(struct trace_array *tr,
489                                       struct ring_buffer *buffer,
490                                       unsigned long flags,
491                                       int skip, int pc, struct pt_regs *regs);
492
493 #else
494 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
495                                         unsigned long flags,
496                                         int skip, int pc, struct pt_regs *regs)
497 {
498 }
499 static inline void ftrace_trace_stack(struct trace_array *tr,
500                                       struct ring_buffer *buffer,
501                                       unsigned long flags,
502                                       int skip, int pc, struct pt_regs *regs)
503 {
504 }
505
506 #endif
507
508 static void tracer_tracing_on(struct trace_array *tr)
509 {
510         if (tr->trace_buffer.buffer)
511                 ring_buffer_record_on(tr->trace_buffer.buffer);
512         /*
513          * This flag is looked at when buffers haven't been allocated
514          * yet, or by some tracers (like irqsoff), that just want to
515          * know if the ring buffer has been disabled, but it can handle
516          * races of where it gets disabled but we still do a record.
517          * As the check is in the fast path of the tracers, it is more
518          * important to be fast than accurate.
519          */
520         tr->buffer_disabled = 0;
521         /* Make the flag seen by readers */
522         smp_wmb();
523 }
524
525 /**
526  * tracing_on - enable tracing buffers
527  *
528  * This function enables tracing buffers that may have been
529  * disabled with tracing_off.
530  */
531 void tracing_on(void)
532 {
533         tracer_tracing_on(&global_trace);
534 }
535 EXPORT_SYMBOL_GPL(tracing_on);
536
537 /**
538  * __trace_puts - write a constant string into the trace buffer.
539  * @ip:    The address of the caller
540  * @str:   The constant string to write
541  * @size:  The size of the string.
542  */
543 int __trace_puts(unsigned long ip, const char *str, int size)
544 {
545         struct ring_buffer_event *event;
546         struct ring_buffer *buffer;
547         struct print_entry *entry;
548         unsigned long irq_flags;
549         int alloc;
550         int pc;
551
552         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
553                 return 0;
554
555         pc = preempt_count();
556
557         if (unlikely(tracing_selftest_running || tracing_disabled))
558                 return 0;
559
560         alloc = sizeof(*entry) + size + 2; /* possible \n added */
561
562         local_save_flags(irq_flags);
563         buffer = global_trace.trace_buffer.buffer;
564         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
565                                           irq_flags, pc);
566         if (!event)
567                 return 0;
568
569         entry = ring_buffer_event_data(event);
570         entry->ip = ip;
571
572         memcpy(&entry->buf, str, size);
573
574         /* Add a newline if necessary */
575         if (entry->buf[size - 1] != '\n') {
576                 entry->buf[size] = '\n';
577                 entry->buf[size + 1] = '\0';
578         } else
579                 entry->buf[size] = '\0';
580
581         __buffer_unlock_commit(buffer, event);
582         ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
583
584         return size;
585 }
586 EXPORT_SYMBOL_GPL(__trace_puts);
587
588 /**
589  * __trace_bputs - write the pointer to a constant string into trace buffer
590  * @ip:    The address of the caller
591  * @str:   The constant string to write to the buffer to
592  */
593 int __trace_bputs(unsigned long ip, const char *str)
594 {
595         struct ring_buffer_event *event;
596         struct ring_buffer *buffer;
597         struct bputs_entry *entry;
598         unsigned long irq_flags;
599         int size = sizeof(struct bputs_entry);
600         int pc;
601
602         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
603                 return 0;
604
605         pc = preempt_count();
606
607         if (unlikely(tracing_selftest_running || tracing_disabled))
608                 return 0;
609
610         local_save_flags(irq_flags);
611         buffer = global_trace.trace_buffer.buffer;
612         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
613                                           irq_flags, pc);
614         if (!event)
615                 return 0;
616
617         entry = ring_buffer_event_data(event);
618         entry->ip                       = ip;
619         entry->str                      = str;
620
621         __buffer_unlock_commit(buffer, event);
622         ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
623
624         return 1;
625 }
626 EXPORT_SYMBOL_GPL(__trace_bputs);
627
628 #ifdef CONFIG_TRACER_SNAPSHOT
629 /**
630  * trace_snapshot - take a snapshot of the current buffer.
631  *
632  * This causes a swap between the snapshot buffer and the current live
633  * tracing buffer. You can use this to take snapshots of the live
634  * trace when some condition is triggered, but continue to trace.
635  *
636  * Note, make sure to allocate the snapshot with either
637  * a tracing_snapshot_alloc(), or by doing it manually
638  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
639  *
640  * If the snapshot buffer is not allocated, it will stop tracing.
641  * Basically making a permanent snapshot.
642  */
643 void tracing_snapshot(void)
644 {
645         struct trace_array *tr = &global_trace;
646         struct tracer *tracer = tr->current_trace;
647         unsigned long flags;
648
649         if (in_nmi()) {
650                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
651                 internal_trace_puts("*** snapshot is being ignored        ***\n");
652                 return;
653         }
654
655         if (!tr->allocated_snapshot) {
656                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
657                 internal_trace_puts("*** stopping trace here!   ***\n");
658                 tracing_off();
659                 return;
660         }
661
662         /* Note, snapshot can not be used when the tracer uses it */
663         if (tracer->use_max_tr) {
664                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
665                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
666                 return;
667         }
668
669         local_irq_save(flags);
670         update_max_tr(tr, current, smp_processor_id());
671         local_irq_restore(flags);
672 }
673 EXPORT_SYMBOL_GPL(tracing_snapshot);
674
675 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
676                                         struct trace_buffer *size_buf, int cpu_id);
677 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
678
679 static int alloc_snapshot(struct trace_array *tr)
680 {
681         int ret;
682
683         if (!tr->allocated_snapshot) {
684
685                 /* allocate spare buffer */
686                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
687                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
688                 if (ret < 0)
689                         return ret;
690
691                 tr->allocated_snapshot = true;
692         }
693
694         return 0;
695 }
696
697 static void free_snapshot(struct trace_array *tr)
698 {
699         /*
700          * We don't free the ring buffer. instead, resize it because
701          * The max_tr ring buffer has some state (e.g. ring->clock) and
702          * we want preserve it.
703          */
704         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
705         set_buffer_entries(&tr->max_buffer, 1);
706         tracing_reset_online_cpus(&tr->max_buffer);
707         tr->allocated_snapshot = false;
708 }
709
710 /**
711  * tracing_alloc_snapshot - allocate snapshot buffer.
712  *
713  * This only allocates the snapshot buffer if it isn't already
714  * allocated - it doesn't also take a snapshot.
715  *
716  * This is meant to be used in cases where the snapshot buffer needs
717  * to be set up for events that can't sleep but need to be able to
718  * trigger a snapshot.
719  */
720 int tracing_alloc_snapshot(void)
721 {
722         struct trace_array *tr = &global_trace;
723         int ret;
724
725         ret = alloc_snapshot(tr);
726         WARN_ON(ret < 0);
727
728         return ret;
729 }
730 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
731
732 /**
733  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
734  *
735  * This is similar to trace_snapshot(), but it will allocate the
736  * snapshot buffer if it isn't already allocated. Use this only
737  * where it is safe to sleep, as the allocation may sleep.
738  *
739  * This causes a swap between the snapshot buffer and the current live
740  * tracing buffer. You can use this to take snapshots of the live
741  * trace when some condition is triggered, but continue to trace.
742  */
743 void tracing_snapshot_alloc(void)
744 {
745         int ret;
746
747         ret = tracing_alloc_snapshot();
748         if (ret < 0)
749                 return;
750
751         tracing_snapshot();
752 }
753 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
754 #else
755 void tracing_snapshot(void)
756 {
757         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
758 }
759 EXPORT_SYMBOL_GPL(tracing_snapshot);
760 int tracing_alloc_snapshot(void)
761 {
762         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
763         return -ENODEV;
764 }
765 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
766 void tracing_snapshot_alloc(void)
767 {
768         /* Give warning */
769         tracing_snapshot();
770 }
771 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
772 #endif /* CONFIG_TRACER_SNAPSHOT */
773
774 static void tracer_tracing_off(struct trace_array *tr)
775 {
776         if (tr->trace_buffer.buffer)
777                 ring_buffer_record_off(tr->trace_buffer.buffer);
778         /*
779          * This flag is looked at when buffers haven't been allocated
780          * yet, or by some tracers (like irqsoff), that just want to
781          * know if the ring buffer has been disabled, but it can handle
782          * races of where it gets disabled but we still do a record.
783          * As the check is in the fast path of the tracers, it is more
784          * important to be fast than accurate.
785          */
786         tr->buffer_disabled = 1;
787         /* Make the flag seen by readers */
788         smp_wmb();
789 }
790
791 /**
792  * tracing_off - turn off tracing buffers
793  *
794  * This function stops the tracing buffers from recording data.
795  * It does not disable any overhead the tracers themselves may
796  * be causing. This function simply causes all recording to
797  * the ring buffers to fail.
798  */
799 void tracing_off(void)
800 {
801         tracer_tracing_off(&global_trace);
802 }
803 EXPORT_SYMBOL_GPL(tracing_off);
804
805 void disable_trace_on_warning(void)
806 {
807         if (__disable_trace_on_warning)
808                 tracing_off();
809 }
810
811 /**
812  * tracer_tracing_is_on - show real state of ring buffer enabled
813  * @tr : the trace array to know if ring buffer is enabled
814  *
815  * Shows real state of the ring buffer if it is enabled or not.
816  */
817 static int tracer_tracing_is_on(struct trace_array *tr)
818 {
819         if (tr->trace_buffer.buffer)
820                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
821         return !tr->buffer_disabled;
822 }
823
824 /**
825  * tracing_is_on - show state of ring buffers enabled
826  */
827 int tracing_is_on(void)
828 {
829         return tracer_tracing_is_on(&global_trace);
830 }
831 EXPORT_SYMBOL_GPL(tracing_is_on);
832
833 static int __init set_buf_size(char *str)
834 {
835         unsigned long buf_size;
836
837         if (!str)
838                 return 0;
839         buf_size = memparse(str, &str);
840         /* nr_entries can not be zero */
841         if (buf_size == 0)
842                 return 0;
843         trace_buf_size = buf_size;
844         return 1;
845 }
846 __setup("trace_buf_size=", set_buf_size);
847
848 static int __init set_tracing_thresh(char *str)
849 {
850         unsigned long threshold;
851         int ret;
852
853         if (!str)
854                 return 0;
855         ret = kstrtoul(str, 0, &threshold);
856         if (ret < 0)
857                 return 0;
858         tracing_thresh = threshold * 1000;
859         return 1;
860 }
861 __setup("tracing_thresh=", set_tracing_thresh);
862
863 unsigned long nsecs_to_usecs(unsigned long nsecs)
864 {
865         return nsecs / 1000;
866 }
867
868 /*
869  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
870  * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
871  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
872  * of strings in the order that the enums were defined.
873  */
874 #undef C
875 #define C(a, b) b
876
877 /* These must match the bit postions in trace_iterator_flags */
878 static const char *trace_options[] = {
879         TRACE_FLAGS
880         NULL
881 };
882
883 static struct {
884         u64 (*func)(void);
885         const char *name;
886         int in_ns;              /* is this clock in nanoseconds? */
887 } trace_clocks[] = {
888         { trace_clock_local,            "local",        1 },
889         { trace_clock_global,           "global",       1 },
890         { trace_clock_counter,          "counter",      0 },
891         { trace_clock_jiffies,          "uptime",       0 },
892         { trace_clock,                  "perf",         1 },
893         { ktime_get_mono_fast_ns,       "mono",         1 },
894         { ktime_get_raw_fast_ns,        "mono_raw",     1 },
895         ARCH_TRACE_CLOCKS
896 };
897
898 /*
899  * trace_parser_get_init - gets the buffer for trace parser
900  */
901 int trace_parser_get_init(struct trace_parser *parser, int size)
902 {
903         memset(parser, 0, sizeof(*parser));
904
905         parser->buffer = kmalloc(size, GFP_KERNEL);
906         if (!parser->buffer)
907                 return 1;
908
909         parser->size = size;
910         return 0;
911 }
912
913 /*
914  * trace_parser_put - frees the buffer for trace parser
915  */
916 void trace_parser_put(struct trace_parser *parser)
917 {
918         kfree(parser->buffer);
919 }
920
921 /*
922  * trace_get_user - reads the user input string separated by  space
923  * (matched by isspace(ch))
924  *
925  * For each string found the 'struct trace_parser' is updated,
926  * and the function returns.
927  *
928  * Returns number of bytes read.
929  *
930  * See kernel/trace/trace.h for 'struct trace_parser' details.
931  */
932 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
933         size_t cnt, loff_t *ppos)
934 {
935         char ch;
936         size_t read = 0;
937         ssize_t ret;
938
939         if (!*ppos)
940                 trace_parser_clear(parser);
941
942         ret = get_user(ch, ubuf++);
943         if (ret)
944                 goto out;
945
946         read++;
947         cnt--;
948
949         /*
950          * The parser is not finished with the last write,
951          * continue reading the user input without skipping spaces.
952          */
953         if (!parser->cont) {
954                 /* skip white space */
955                 while (cnt && isspace(ch)) {
956                         ret = get_user(ch, ubuf++);
957                         if (ret)
958                                 goto out;
959                         read++;
960                         cnt--;
961                 }
962
963                 /* only spaces were written */
964                 if (isspace(ch)) {
965                         *ppos += read;
966                         ret = read;
967                         goto out;
968                 }
969
970                 parser->idx = 0;
971         }
972
973         /* read the non-space input */
974         while (cnt && !isspace(ch)) {
975                 if (parser->idx < parser->size - 1)
976                         parser->buffer[parser->idx++] = ch;
977                 else {
978                         ret = -EINVAL;
979                         goto out;
980                 }
981                 ret = get_user(ch, ubuf++);
982                 if (ret)
983                         goto out;
984                 read++;
985                 cnt--;
986         }
987
988         /* We either got finished input or we have to wait for another call. */
989         if (isspace(ch)) {
990                 parser->buffer[parser->idx] = 0;
991                 parser->cont = false;
992         } else if (parser->idx < parser->size - 1) {
993                 parser->cont = true;
994                 parser->buffer[parser->idx++] = ch;
995         } else {
996                 ret = -EINVAL;
997                 goto out;
998         }
999
1000         *ppos += read;
1001         ret = read;
1002
1003 out:
1004         return ret;
1005 }
1006
1007 /* TODO add a seq_buf_to_buffer() */
1008 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1009 {
1010         int len;
1011
1012         if (trace_seq_used(s) <= s->seq.readpos)
1013                 return -EBUSY;
1014
1015         len = trace_seq_used(s) - s->seq.readpos;
1016         if (cnt > len)
1017                 cnt = len;
1018         memcpy(buf, s->buffer + s->seq.readpos, cnt);
1019
1020         s->seq.readpos += cnt;
1021         return cnt;
1022 }
1023
1024 unsigned long __read_mostly     tracing_thresh;
1025
1026 #ifdef CONFIG_TRACER_MAX_TRACE
1027 /*
1028  * Copy the new maximum trace into the separate maximum-trace
1029  * structure. (this way the maximum trace is permanently saved,
1030  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1031  */
1032 static void
1033 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1034 {
1035         struct trace_buffer *trace_buf = &tr->trace_buffer;
1036         struct trace_buffer *max_buf = &tr->max_buffer;
1037         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1038         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1039
1040         max_buf->cpu = cpu;
1041         max_buf->time_start = data->preempt_timestamp;
1042
1043         max_data->saved_latency = tr->max_latency;
1044         max_data->critical_start = data->critical_start;
1045         max_data->critical_end = data->critical_end;
1046
1047         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1048         max_data->pid = tsk->pid;
1049         /*
1050          * If tsk == current, then use current_uid(), as that does not use
1051          * RCU. The irq tracer can be called out of RCU scope.
1052          */
1053         if (tsk == current)
1054                 max_data->uid = current_uid();
1055         else
1056                 max_data->uid = task_uid(tsk);
1057
1058         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1059         max_data->policy = tsk->policy;
1060         max_data->rt_priority = tsk->rt_priority;
1061
1062         /* record this tasks comm */
1063         tracing_record_cmdline(tsk);
1064 }
1065
1066 /**
1067  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1068  * @tr: tracer
1069  * @tsk: the task with the latency
1070  * @cpu: The cpu that initiated the trace.
1071  *
1072  * Flip the buffers between the @tr and the max_tr and record information
1073  * about which task was the cause of this latency.
1074  */
1075 void
1076 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1077 {
1078         struct ring_buffer *buf;
1079
1080         if (tr->stop_count)
1081                 return;
1082
1083         WARN_ON_ONCE(!irqs_disabled());
1084
1085         if (!tr->allocated_snapshot) {
1086                 /* Only the nop tracer should hit this when disabling */
1087                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1088                 return;
1089         }
1090
1091         arch_spin_lock(&tr->max_lock);
1092
1093         buf = tr->trace_buffer.buffer;
1094         tr->trace_buffer.buffer = tr->max_buffer.buffer;
1095         tr->max_buffer.buffer = buf;
1096
1097         __update_max_tr(tr, tsk, cpu);
1098         arch_spin_unlock(&tr->max_lock);
1099 }
1100
1101 /**
1102  * update_max_tr_single - only copy one trace over, and reset the rest
1103  * @tr - tracer
1104  * @tsk - task with the latency
1105  * @cpu - the cpu of the buffer to copy.
1106  *
1107  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1108  */
1109 void
1110 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1111 {
1112         int ret;
1113
1114         if (tr->stop_count)
1115                 return;
1116
1117         WARN_ON_ONCE(!irqs_disabled());
1118         if (!tr->allocated_snapshot) {
1119                 /* Only the nop tracer should hit this when disabling */
1120                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1121                 return;
1122         }
1123
1124         arch_spin_lock(&tr->max_lock);
1125
1126         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1127
1128         if (ret == -EBUSY) {
1129                 /*
1130                  * We failed to swap the buffer due to a commit taking
1131                  * place on this CPU. We fail to record, but we reset
1132                  * the max trace buffer (no one writes directly to it)
1133                  * and flag that it failed.
1134                  */
1135                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1136                         "Failed to swap buffers due to commit in progress\n");
1137         }
1138
1139         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1140
1141         __update_max_tr(tr, tsk, cpu);
1142         arch_spin_unlock(&tr->max_lock);
1143 }
1144 #endif /* CONFIG_TRACER_MAX_TRACE */
1145
1146 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1147 {
1148         /* Iterators are static, they should be filled or empty */
1149         if (trace_buffer_iter(iter, iter->cpu_file))
1150                 return 0;
1151
1152         return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1153                                 full);
1154 }
1155
1156 #ifdef CONFIG_FTRACE_STARTUP_TEST
1157 static int run_tracer_selftest(struct tracer *type)
1158 {
1159         struct trace_array *tr = &global_trace;
1160         struct tracer *saved_tracer = tr->current_trace;
1161         int ret;
1162
1163         if (!type->selftest || tracing_selftest_disabled)
1164                 return 0;
1165
1166         /*
1167          * Run a selftest on this tracer.
1168          * Here we reset the trace buffer, and set the current
1169          * tracer to be this tracer. The tracer can then run some
1170          * internal tracing to verify that everything is in order.
1171          * If we fail, we do not register this tracer.
1172          */
1173         tracing_reset_online_cpus(&tr->trace_buffer);
1174
1175         tr->current_trace = type;
1176
1177 #ifdef CONFIG_TRACER_MAX_TRACE
1178         if (type->use_max_tr) {
1179                 /* If we expanded the buffers, make sure the max is expanded too */
1180                 if (ring_buffer_expanded)
1181                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1182                                            RING_BUFFER_ALL_CPUS);
1183                 tr->allocated_snapshot = true;
1184         }
1185 #endif
1186
1187         /* the test is responsible for initializing and enabling */
1188         pr_info("Testing tracer %s: ", type->name);
1189         ret = type->selftest(type, tr);
1190         /* the test is responsible for resetting too */
1191         tr->current_trace = saved_tracer;
1192         if (ret) {
1193                 printk(KERN_CONT "FAILED!\n");
1194                 /* Add the warning after printing 'FAILED' */
1195                 WARN_ON(1);
1196                 return -1;
1197         }
1198         /* Only reset on passing, to avoid touching corrupted buffers */
1199         tracing_reset_online_cpus(&tr->trace_buffer);
1200
1201 #ifdef CONFIG_TRACER_MAX_TRACE
1202         if (type->use_max_tr) {
1203                 tr->allocated_snapshot = false;
1204
1205                 /* Shrink the max buffer again */
1206                 if (ring_buffer_expanded)
1207                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1208                                            RING_BUFFER_ALL_CPUS);
1209         }
1210 #endif
1211
1212         printk(KERN_CONT "PASSED\n");
1213         return 0;
1214 }
1215 #else
1216 static inline int run_tracer_selftest(struct tracer *type)
1217 {
1218         return 0;
1219 }
1220 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1221
1222 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1223
1224 static void __init apply_trace_boot_options(void);
1225
1226 /**
1227  * register_tracer - register a tracer with the ftrace system.
1228  * @type - the plugin for the tracer
1229  *
1230  * Register a new plugin tracer.
1231  */
1232 int __init register_tracer(struct tracer *type)
1233 {
1234         struct tracer *t;
1235         int ret = 0;
1236
1237         if (!type->name) {
1238                 pr_info("Tracer must have a name\n");
1239                 return -1;
1240         }
1241
1242         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1243                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1244                 return -1;
1245         }
1246
1247         mutex_lock(&trace_types_lock);
1248
1249         tracing_selftest_running = true;
1250
1251         for (t = trace_types; t; t = t->next) {
1252                 if (strcmp(type->name, t->name) == 0) {
1253                         /* already found */
1254                         pr_info("Tracer %s already registered\n",
1255                                 type->name);
1256                         ret = -1;
1257                         goto out;
1258                 }
1259         }
1260
1261         if (!type->set_flag)
1262                 type->set_flag = &dummy_set_flag;
1263         if (!type->flags)
1264                 type->flags = &dummy_tracer_flags;
1265         else
1266                 if (!type->flags->opts)
1267                         type->flags->opts = dummy_tracer_opt;
1268
1269         ret = run_tracer_selftest(type);
1270         if (ret < 0)
1271                 goto out;
1272
1273         type->next = trace_types;
1274         trace_types = type;
1275         add_tracer_options(&global_trace, type);
1276
1277  out:
1278         tracing_selftest_running = false;
1279         mutex_unlock(&trace_types_lock);
1280
1281         if (ret || !default_bootup_tracer)
1282                 goto out_unlock;
1283
1284         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1285                 goto out_unlock;
1286
1287         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1288         /* Do we want this tracer to start on bootup? */
1289         tracing_set_tracer(&global_trace, type->name);
1290         default_bootup_tracer = NULL;
1291
1292         apply_trace_boot_options();
1293
1294         /* disable other selftests, since this will break it. */
1295         tracing_selftest_disabled = true;
1296 #ifdef CONFIG_FTRACE_STARTUP_TEST
1297         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1298                type->name);
1299 #endif
1300
1301  out_unlock:
1302         return ret;
1303 }
1304
1305 void tracing_reset(struct trace_buffer *buf, int cpu)
1306 {
1307         struct ring_buffer *buffer = buf->buffer;
1308
1309         if (!buffer)
1310                 return;
1311
1312         ring_buffer_record_disable(buffer);
1313
1314         /* Make sure all commits have finished */
1315         synchronize_sched();
1316         ring_buffer_reset_cpu(buffer, cpu);
1317
1318         ring_buffer_record_enable(buffer);
1319 }
1320
1321 void tracing_reset_online_cpus(struct trace_buffer *buf)
1322 {
1323         struct ring_buffer *buffer = buf->buffer;
1324         int cpu;
1325
1326         if (!buffer)
1327                 return;
1328
1329         ring_buffer_record_disable(buffer);
1330
1331         /* Make sure all commits have finished */
1332         synchronize_sched();
1333
1334         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1335
1336         for_each_online_cpu(cpu)
1337                 ring_buffer_reset_cpu(buffer, cpu);
1338
1339         ring_buffer_record_enable(buffer);
1340 }
1341
1342 /* Must have trace_types_lock held */
1343 void tracing_reset_all_online_cpus(void)
1344 {
1345         struct trace_array *tr;
1346
1347         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1348                 tracing_reset_online_cpus(&tr->trace_buffer);
1349 #ifdef CONFIG_TRACER_MAX_TRACE
1350                 tracing_reset_online_cpus(&tr->max_buffer);
1351 #endif
1352         }
1353 }
1354
1355 #define SAVED_CMDLINES_DEFAULT 128
1356 #define NO_CMDLINE_MAP UINT_MAX
1357 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1358 struct saved_cmdlines_buffer {
1359         unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1360         unsigned *map_cmdline_to_pid;
1361         unsigned cmdline_num;
1362         int cmdline_idx;
1363         char *saved_cmdlines;
1364 };
1365 static struct saved_cmdlines_buffer *savedcmd;
1366
1367 /* temporary disable recording */
1368 static atomic_t trace_record_cmdline_disabled __read_mostly;
1369
1370 static inline char *get_saved_cmdlines(int idx)
1371 {
1372         return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1373 }
1374
1375 static inline void set_cmdline(int idx, const char *cmdline)
1376 {
1377         memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1378 }
1379
1380 static int allocate_cmdlines_buffer(unsigned int val,
1381                                     struct saved_cmdlines_buffer *s)
1382 {
1383         s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1384                                         GFP_KERNEL);
1385         if (!s->map_cmdline_to_pid)
1386                 return -ENOMEM;
1387
1388         s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1389         if (!s->saved_cmdlines) {
1390                 kfree(s->map_cmdline_to_pid);
1391                 return -ENOMEM;
1392         }
1393
1394         s->cmdline_idx = 0;
1395         s->cmdline_num = val;
1396         memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1397                sizeof(s->map_pid_to_cmdline));
1398         memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1399                val * sizeof(*s->map_cmdline_to_pid));
1400
1401         return 0;
1402 }
1403
1404 static int trace_create_savedcmd(void)
1405 {
1406         int ret;
1407
1408         savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1409         if (!savedcmd)
1410                 return -ENOMEM;
1411
1412         ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1413         if (ret < 0) {
1414                 kfree(savedcmd);
1415                 savedcmd = NULL;
1416                 return -ENOMEM;
1417         }
1418
1419         return 0;
1420 }
1421
1422 int is_tracing_stopped(void)
1423 {
1424         return global_trace.stop_count;
1425 }
1426
1427 /**
1428  * tracing_start - quick start of the tracer
1429  *
1430  * If tracing is enabled but was stopped by tracing_stop,
1431  * this will start the tracer back up.
1432  */
1433 void tracing_start(void)
1434 {
1435         struct ring_buffer *buffer;
1436         unsigned long flags;
1437
1438         if (tracing_disabled)
1439                 return;
1440
1441         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1442         if (--global_trace.stop_count) {
1443                 if (global_trace.stop_count < 0) {
1444                         /* Someone screwed up their debugging */
1445                         WARN_ON_ONCE(1);
1446                         global_trace.stop_count = 0;
1447                 }
1448                 goto out;
1449         }
1450
1451         /* Prevent the buffers from switching */
1452         arch_spin_lock(&global_trace.max_lock);
1453
1454         buffer = global_trace.trace_buffer.buffer;
1455         if (buffer)
1456                 ring_buffer_record_enable(buffer);
1457
1458 #ifdef CONFIG_TRACER_MAX_TRACE
1459         buffer = global_trace.max_buffer.buffer;
1460         if (buffer)
1461                 ring_buffer_record_enable(buffer);
1462 #endif
1463
1464         arch_spin_unlock(&global_trace.max_lock);
1465
1466  out:
1467         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1468 }
1469
1470 static void tracing_start_tr(struct trace_array *tr)
1471 {
1472         struct ring_buffer *buffer;
1473         unsigned long flags;
1474
1475         if (tracing_disabled)
1476                 return;
1477
1478         /* If global, we need to also start the max tracer */
1479         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480                 return tracing_start();
1481
1482         raw_spin_lock_irqsave(&tr->start_lock, flags);
1483
1484         if (--tr->stop_count) {
1485                 if (tr->stop_count < 0) {
1486                         /* Someone screwed up their debugging */
1487                         WARN_ON_ONCE(1);
1488                         tr->stop_count = 0;
1489                 }
1490                 goto out;
1491         }
1492
1493         buffer = tr->trace_buffer.buffer;
1494         if (buffer)
1495                 ring_buffer_record_enable(buffer);
1496
1497  out:
1498         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1499 }
1500
1501 /**
1502  * tracing_stop - quick stop of the tracer
1503  *
1504  * Light weight way to stop tracing. Use in conjunction with
1505  * tracing_start.
1506  */
1507 void tracing_stop(void)
1508 {
1509         struct ring_buffer *buffer;
1510         unsigned long flags;
1511
1512         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1513         if (global_trace.stop_count++)
1514                 goto out;
1515
1516         /* Prevent the buffers from switching */
1517         arch_spin_lock(&global_trace.max_lock);
1518
1519         buffer = global_trace.trace_buffer.buffer;
1520         if (buffer)
1521                 ring_buffer_record_disable(buffer);
1522
1523 #ifdef CONFIG_TRACER_MAX_TRACE
1524         buffer = global_trace.max_buffer.buffer;
1525         if (buffer)
1526                 ring_buffer_record_disable(buffer);
1527 #endif
1528
1529         arch_spin_unlock(&global_trace.max_lock);
1530
1531  out:
1532         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1533 }
1534
1535 static void tracing_stop_tr(struct trace_array *tr)
1536 {
1537         struct ring_buffer *buffer;
1538         unsigned long flags;
1539
1540         /* If global, we need to also stop the max tracer */
1541         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1542                 return tracing_stop();
1543
1544         raw_spin_lock_irqsave(&tr->start_lock, flags);
1545         if (tr->stop_count++)
1546                 goto out;
1547
1548         buffer = tr->trace_buffer.buffer;
1549         if (buffer)
1550                 ring_buffer_record_disable(buffer);
1551
1552  out:
1553         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1554 }
1555
1556 void trace_stop_cmdline_recording(void);
1557
1558 static int trace_save_cmdline(struct task_struct *tsk)
1559 {
1560         unsigned pid, idx;
1561
1562         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1563                 return 0;
1564
1565         /*
1566          * It's not the end of the world if we don't get
1567          * the lock, but we also don't want to spin
1568          * nor do we want to disable interrupts,
1569          * so if we miss here, then better luck next time.
1570          */
1571         if (!arch_spin_trylock(&trace_cmdline_lock))
1572                 return 0;
1573
1574         idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1575         if (idx == NO_CMDLINE_MAP) {
1576                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1577
1578                 /*
1579                  * Check whether the cmdline buffer at idx has a pid
1580                  * mapped. We are going to overwrite that entry so we
1581                  * need to clear the map_pid_to_cmdline. Otherwise we
1582                  * would read the new comm for the old pid.
1583                  */
1584                 pid = savedcmd->map_cmdline_to_pid[idx];
1585                 if (pid != NO_CMDLINE_MAP)
1586                         savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1587
1588                 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1589                 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1590
1591                 savedcmd->cmdline_idx = idx;
1592         }
1593
1594         set_cmdline(idx, tsk->comm);
1595
1596         arch_spin_unlock(&trace_cmdline_lock);
1597
1598         return 1;
1599 }
1600
1601 static void __trace_find_cmdline(int pid, char comm[])
1602 {
1603         unsigned map;
1604
1605         if (!pid) {
1606                 strcpy(comm, "<idle>");
1607                 return;
1608         }
1609
1610         if (WARN_ON_ONCE(pid < 0)) {
1611                 strcpy(comm, "<XXX>");
1612                 return;
1613         }
1614
1615         if (pid > PID_MAX_DEFAULT) {
1616                 strcpy(comm, "<...>");
1617                 return;
1618         }
1619
1620         map = savedcmd->map_pid_to_cmdline[pid];
1621         if (map != NO_CMDLINE_MAP)
1622                 strcpy(comm, get_saved_cmdlines(map));
1623         else
1624                 strcpy(comm, "<...>");
1625 }
1626
1627 void trace_find_cmdline(int pid, char comm[])
1628 {
1629         preempt_disable();
1630         arch_spin_lock(&trace_cmdline_lock);
1631
1632         __trace_find_cmdline(pid, comm);
1633
1634         arch_spin_unlock(&trace_cmdline_lock);
1635         preempt_enable();
1636 }
1637
1638 void tracing_record_cmdline(struct task_struct *tsk)
1639 {
1640         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1641                 return;
1642
1643         if (!__this_cpu_read(trace_cmdline_save))
1644                 return;
1645
1646         if (trace_save_cmdline(tsk))
1647                 __this_cpu_write(trace_cmdline_save, false);
1648 }
1649
1650 void
1651 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1652                              int pc)
1653 {
1654         struct task_struct *tsk = current;
1655
1656         entry->preempt_count            = pc & 0xff;
1657         entry->pid                      = (tsk) ? tsk->pid : 0;
1658         entry->flags =
1659 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1660                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1661 #else
1662                 TRACE_FLAG_IRQS_NOSUPPORT |
1663 #endif
1664                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1665                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1666                 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1667                 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1668 }
1669 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1670
1671 struct ring_buffer_event *
1672 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1673                           int type,
1674                           unsigned long len,
1675                           unsigned long flags, int pc)
1676 {
1677         struct ring_buffer_event *event;
1678
1679         event = ring_buffer_lock_reserve(buffer, len);
1680         if (event != NULL) {
1681                 struct trace_entry *ent = ring_buffer_event_data(event);
1682
1683                 tracing_generic_entry_update(ent, flags, pc);
1684                 ent->type = type;
1685         }
1686
1687         return event;
1688 }
1689
1690 void
1691 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1692 {
1693         __this_cpu_write(trace_cmdline_save, true);
1694         ring_buffer_unlock_commit(buffer, event);
1695 }
1696
1697 void trace_buffer_unlock_commit(struct trace_array *tr,
1698                                 struct ring_buffer *buffer,
1699                                 struct ring_buffer_event *event,
1700                                 unsigned long flags, int pc)
1701 {
1702         __buffer_unlock_commit(buffer, event);
1703
1704         ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1705         ftrace_trace_userstack(buffer, flags, pc);
1706 }
1707 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1708
1709 static struct ring_buffer *temp_buffer;
1710
1711 struct ring_buffer_event *
1712 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1713                           struct trace_event_file *trace_file,
1714                           int type, unsigned long len,
1715                           unsigned long flags, int pc)
1716 {
1717         struct ring_buffer_event *entry;
1718
1719         *current_rb = trace_file->tr->trace_buffer.buffer;
1720         entry = trace_buffer_lock_reserve(*current_rb,
1721                                          type, len, flags, pc);
1722         /*
1723          * If tracing is off, but we have triggers enabled
1724          * we still need to look at the event data. Use the temp_buffer
1725          * to store the trace event for the tigger to use. It's recusive
1726          * safe and will not be recorded anywhere.
1727          */
1728         if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1729                 *current_rb = temp_buffer;
1730                 entry = trace_buffer_lock_reserve(*current_rb,
1731                                                   type, len, flags, pc);
1732         }
1733         return entry;
1734 }
1735 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1736
1737 struct ring_buffer_event *
1738 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1739                                   int type, unsigned long len,
1740                                   unsigned long flags, int pc)
1741 {
1742         *current_rb = global_trace.trace_buffer.buffer;
1743         return trace_buffer_lock_reserve(*current_rb,
1744                                          type, len, flags, pc);
1745 }
1746 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1747
1748 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1749                                      struct ring_buffer *buffer,
1750                                      struct ring_buffer_event *event,
1751                                      unsigned long flags, int pc,
1752                                      struct pt_regs *regs)
1753 {
1754         __buffer_unlock_commit(buffer, event);
1755
1756         ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
1757         ftrace_trace_userstack(buffer, flags, pc);
1758 }
1759 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1760
1761 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1762                                          struct ring_buffer_event *event)
1763 {
1764         ring_buffer_discard_commit(buffer, event);
1765 }
1766 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1767
1768 void
1769 trace_function(struct trace_array *tr,
1770                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1771                int pc)
1772 {
1773         struct trace_event_call *call = &event_function;
1774         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1775         struct ring_buffer_event *event;
1776         struct ftrace_entry *entry;
1777
1778         /* If we are reading the ring buffer, don't trace */
1779         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1780                 return;
1781
1782         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1783                                           flags, pc);
1784         if (!event)
1785                 return;
1786         entry   = ring_buffer_event_data(event);
1787         entry->ip                       = ip;
1788         entry->parent_ip                = parent_ip;
1789
1790         if (!call_filter_check_discard(call, entry, buffer, event))
1791                 __buffer_unlock_commit(buffer, event);
1792 }
1793
1794 #ifdef CONFIG_STACKTRACE
1795
1796 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797 struct ftrace_stack {
1798         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1799 };
1800
1801 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
1804 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1805                                  unsigned long flags,
1806                                  int skip, int pc, struct pt_regs *regs)
1807 {
1808         struct trace_event_call *call = &event_kernel_stack;
1809         struct ring_buffer_event *event;
1810         struct stack_entry *entry;
1811         struct stack_trace trace;
1812         int use_stack;
1813         int size = FTRACE_STACK_ENTRIES;
1814
1815         trace.nr_entries        = 0;
1816         trace.skip              = skip;
1817
1818         /*
1819          * Since events can happen in NMIs there's no safe way to
1820          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821          * or NMI comes in, it will just have to use the default
1822          * FTRACE_STACK_SIZE.
1823          */
1824         preempt_disable_notrace();
1825
1826         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1827         /*
1828          * We don't need any atomic variables, just a barrier.
1829          * If an interrupt comes in, we don't care, because it would
1830          * have exited and put the counter back to what we want.
1831          * We just need a barrier to keep gcc from moving things
1832          * around.
1833          */
1834         barrier();
1835         if (use_stack == 1) {
1836                 trace.entries           = this_cpu_ptr(ftrace_stack.calls);
1837                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1838
1839                 if (regs)
1840                         save_stack_trace_regs(regs, &trace);
1841                 else
1842                         save_stack_trace(&trace);
1843
1844                 if (trace.nr_entries > size)
1845                         size = trace.nr_entries;
1846         } else
1847                 /* From now on, use_stack is a boolean */
1848                 use_stack = 0;
1849
1850         size *= sizeof(unsigned long);
1851
1852         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853                                           sizeof(*entry) + size, flags, pc);
1854         if (!event)
1855                 goto out;
1856         entry = ring_buffer_event_data(event);
1857
1858         memset(&entry->caller, 0, size);
1859
1860         if (use_stack)
1861                 memcpy(&entry->caller, trace.entries,
1862                        trace.nr_entries * sizeof(unsigned long));
1863         else {
1864                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1865                 trace.entries           = entry->caller;
1866                 if (regs)
1867                         save_stack_trace_regs(regs, &trace);
1868                 else
1869                         save_stack_trace(&trace);
1870         }
1871
1872         entry->size = trace.nr_entries;
1873
1874         if (!call_filter_check_discard(call, entry, buffer, event))
1875                 __buffer_unlock_commit(buffer, event);
1876
1877  out:
1878         /* Again, don't let gcc optimize things here */
1879         barrier();
1880         __this_cpu_dec(ftrace_stack_reserve);
1881         preempt_enable_notrace();
1882
1883 }
1884
1885 static inline void ftrace_trace_stack(struct trace_array *tr,
1886                                       struct ring_buffer *buffer,
1887                                       unsigned long flags,
1888                                       int skip, int pc, struct pt_regs *regs)
1889 {
1890         if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1891                 return;
1892
1893         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1894 }
1895
1896 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897                    int pc)
1898 {
1899         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1900 }
1901
1902 /**
1903  * trace_dump_stack - record a stack back trace in the trace buffer
1904  * @skip: Number of functions to skip (helper handlers)
1905  */
1906 void trace_dump_stack(int skip)
1907 {
1908         unsigned long flags;
1909
1910         if (tracing_disabled || tracing_selftest_running)
1911                 return;
1912
1913         local_save_flags(flags);
1914
1915         /*
1916          * Skip 3 more, seems to get us at the caller of
1917          * this function.
1918          */
1919         skip += 3;
1920         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921                              flags, skip, preempt_count(), NULL);
1922 }
1923
1924 static DEFINE_PER_CPU(int, user_stack_count);
1925
1926 void
1927 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1928 {
1929         struct trace_event_call *call = &event_user_stack;
1930         struct ring_buffer_event *event;
1931         struct userstack_entry *entry;
1932         struct stack_trace trace;
1933
1934         if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1935                 return;
1936
1937         /*
1938          * NMIs can not handle page faults, even with fix ups.
1939          * The save user stack can (and often does) fault.
1940          */
1941         if (unlikely(in_nmi()))
1942                 return;
1943
1944         /*
1945          * prevent recursion, since the user stack tracing may
1946          * trigger other kernel events.
1947          */
1948         preempt_disable();
1949         if (__this_cpu_read(user_stack_count))
1950                 goto out;
1951
1952         __this_cpu_inc(user_stack_count);
1953
1954         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1955                                           sizeof(*entry), flags, pc);
1956         if (!event)
1957                 goto out_drop_count;
1958         entry   = ring_buffer_event_data(event);
1959
1960         entry->tgid             = current->tgid;
1961         memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963         trace.nr_entries        = 0;
1964         trace.max_entries       = FTRACE_STACK_ENTRIES;
1965         trace.skip              = 0;
1966         trace.entries           = entry->caller;
1967
1968         save_stack_trace_user(&trace);
1969         if (!call_filter_check_discard(call, entry, buffer, event))
1970                 __buffer_unlock_commit(buffer, event);
1971
1972  out_drop_count:
1973         __this_cpu_dec(user_stack_count);
1974  out:
1975         preempt_enable();
1976 }
1977
1978 #ifdef UNUSED
1979 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1980 {
1981         ftrace_trace_userstack(tr, flags, preempt_count());
1982 }
1983 #endif /* UNUSED */
1984
1985 #endif /* CONFIG_STACKTRACE */
1986
1987 /* created for use with alloc_percpu */
1988 struct trace_buffer_struct {
1989         char buffer[TRACE_BUF_SIZE];
1990 };
1991
1992 static struct trace_buffer_struct *trace_percpu_buffer;
1993 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997 /*
1998  * The buffer used is dependent on the context. There is a per cpu
1999  * buffer for normal context, softirq contex, hard irq context and
2000  * for NMI context. Thise allows for lockless recording.
2001  *
2002  * Note, if the buffers failed to be allocated, then this returns NULL
2003  */
2004 static char *get_trace_buf(void)
2005 {
2006         struct trace_buffer_struct *percpu_buffer;
2007
2008         /*
2009          * If we have allocated per cpu buffers, then we do not
2010          * need to do any locking.
2011          */
2012         if (in_nmi())
2013                 percpu_buffer = trace_percpu_nmi_buffer;
2014         else if (in_irq())
2015                 percpu_buffer = trace_percpu_irq_buffer;
2016         else if (in_softirq())
2017                 percpu_buffer = trace_percpu_sirq_buffer;
2018         else
2019                 percpu_buffer = trace_percpu_buffer;
2020
2021         if (!percpu_buffer)
2022                 return NULL;
2023
2024         return this_cpu_ptr(&percpu_buffer->buffer[0]);
2025 }
2026
2027 static int alloc_percpu_trace_buffer(void)
2028 {
2029         struct trace_buffer_struct *buffers;
2030         struct trace_buffer_struct *sirq_buffers;
2031         struct trace_buffer_struct *irq_buffers;
2032         struct trace_buffer_struct *nmi_buffers;
2033
2034         buffers = alloc_percpu(struct trace_buffer_struct);
2035         if (!buffers)
2036                 goto err_warn;
2037
2038         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039         if (!sirq_buffers)
2040                 goto err_sirq;
2041
2042         irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043         if (!irq_buffers)
2044                 goto err_irq;
2045
2046         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047         if (!nmi_buffers)
2048                 goto err_nmi;
2049
2050         trace_percpu_buffer = buffers;
2051         trace_percpu_sirq_buffer = sirq_buffers;
2052         trace_percpu_irq_buffer = irq_buffers;
2053         trace_percpu_nmi_buffer = nmi_buffers;
2054
2055         return 0;
2056
2057  err_nmi:
2058         free_percpu(irq_buffers);
2059  err_irq:
2060         free_percpu(sirq_buffers);
2061  err_sirq:
2062         free_percpu(buffers);
2063  err_warn:
2064         WARN(1, "Could not allocate percpu trace_printk buffer");
2065         return -ENOMEM;
2066 }
2067
2068 static int buffers_allocated;
2069
2070 void trace_printk_init_buffers(void)
2071 {
2072         if (buffers_allocated)
2073                 return;
2074
2075         if (alloc_percpu_trace_buffer())
2076                 return;
2077
2078         /* trace_printk() is for debug use only. Don't use it in production. */
2079
2080         pr_warning("\n");
2081         pr_warning("**********************************************************\n");
2082         pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2083         pr_warning("**                                                      **\n");
2084         pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
2085         pr_warning("**                                                      **\n");
2086         pr_warning("** This means that this is a DEBUG kernel and it is     **\n");
2087         pr_warning("** unsafe for production use.                           **\n");
2088         pr_warning("**                                                      **\n");
2089         pr_warning("** If you see this message and you are not debugging    **\n");
2090         pr_warning("** the kernel, report this immediately to your vendor!  **\n");
2091         pr_warning("**                                                      **\n");
2092         pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2093         pr_warning("**********************************************************\n");
2094
2095         /* Expand the buffers to set size */
2096         tracing_update_buffers();
2097
2098         buffers_allocated = 1;
2099
2100         /*
2101          * trace_printk_init_buffers() can be called by modules.
2102          * If that happens, then we need to start cmdline recording
2103          * directly here. If the global_trace.buffer is already
2104          * allocated here, then this was called by module code.
2105          */
2106         if (global_trace.trace_buffer.buffer)
2107                 tracing_start_cmdline_record();
2108 }
2109
2110 void trace_printk_start_comm(void)
2111 {
2112         /* Start tracing comms if trace printk is set */
2113         if (!buffers_allocated)
2114                 return;
2115         tracing_start_cmdline_record();
2116 }
2117
2118 static void trace_printk_start_stop_comm(int enabled)
2119 {
2120         if (!buffers_allocated)
2121                 return;
2122
2123         if (enabled)
2124                 tracing_start_cmdline_record();
2125         else
2126                 tracing_stop_cmdline_record();
2127 }
2128
2129 /**
2130  * trace_vbprintk - write binary msg to tracing buffer
2131  *
2132  */
2133 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2134 {
2135         struct trace_event_call *call = &event_bprint;
2136         struct ring_buffer_event *event;
2137         struct ring_buffer *buffer;
2138         struct trace_array *tr = &global_trace;
2139         struct bprint_entry *entry;
2140         unsigned long flags;
2141         char *tbuffer;
2142         int len = 0, size, pc;
2143
2144         if (unlikely(tracing_selftest_running || tracing_disabled))
2145                 return 0;
2146
2147         /* Don't pollute graph traces with trace_vprintk internals */
2148         pause_graph_tracing();
2149
2150         pc = preempt_count();
2151         preempt_disable_notrace();
2152
2153         tbuffer = get_trace_buf();
2154         if (!tbuffer) {
2155                 len = 0;
2156                 goto out;
2157         }
2158
2159         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2160
2161         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162                 goto out;
2163
2164         local_save_flags(flags);
2165         size = sizeof(*entry) + sizeof(u32) * len;
2166         buffer = tr->trace_buffer.buffer;
2167         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168                                           flags, pc);
2169         if (!event)
2170                 goto out;
2171         entry = ring_buffer_event_data(event);
2172         entry->ip                       = ip;
2173         entry->fmt                      = fmt;
2174
2175         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2176         if (!call_filter_check_discard(call, entry, buffer, event)) {
2177                 __buffer_unlock_commit(buffer, event);
2178                 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2179         }
2180
2181 out:
2182         preempt_enable_notrace();
2183         unpause_graph_tracing();
2184
2185         return len;
2186 }
2187 EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
2189 static int
2190 __trace_array_vprintk(struct ring_buffer *buffer,
2191                       unsigned long ip, const char *fmt, va_list args)
2192 {
2193         struct trace_event_call *call = &event_print;
2194         struct ring_buffer_event *event;
2195         int len = 0, size, pc;
2196         struct print_entry *entry;
2197         unsigned long flags;
2198         char *tbuffer;
2199
2200         if (tracing_disabled || tracing_selftest_running)
2201                 return 0;
2202
2203         /* Don't pollute graph traces with trace_vprintk internals */
2204         pause_graph_tracing();
2205
2206         pc = preempt_count();
2207         preempt_disable_notrace();
2208
2209
2210         tbuffer = get_trace_buf();
2211         if (!tbuffer) {
2212                 len = 0;
2213                 goto out;
2214         }
2215
2216         len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2217
2218         local_save_flags(flags);
2219         size = sizeof(*entry) + len + 1;
2220         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2221                                           flags, pc);
2222         if (!event)
2223                 goto out;
2224         entry = ring_buffer_event_data(event);
2225         entry->ip = ip;
2226
2227         memcpy(&entry->buf, tbuffer, len + 1);
2228         if (!call_filter_check_discard(call, entry, buffer, event)) {
2229                 __buffer_unlock_commit(buffer, event);
2230                 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2231         }
2232  out:
2233         preempt_enable_notrace();
2234         unpause_graph_tracing();
2235
2236         return len;
2237 }
2238
2239 int trace_array_vprintk(struct trace_array *tr,
2240                         unsigned long ip, const char *fmt, va_list args)
2241 {
2242         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243 }
2244
2245 int trace_array_printk(struct trace_array *tr,
2246                        unsigned long ip, const char *fmt, ...)
2247 {
2248         int ret;
2249         va_list ap;
2250
2251         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2252                 return 0;
2253
2254         va_start(ap, fmt);
2255         ret = trace_array_vprintk(tr, ip, fmt, ap);
2256         va_end(ap);
2257         return ret;
2258 }
2259
2260 int trace_array_printk_buf(struct ring_buffer *buffer,
2261                            unsigned long ip, const char *fmt, ...)
2262 {
2263         int ret;
2264         va_list ap;
2265
2266         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2267                 return 0;
2268
2269         va_start(ap, fmt);
2270         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271         va_end(ap);
2272         return ret;
2273 }
2274
2275 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276 {
2277         return trace_array_vprintk(&global_trace, ip, fmt, args);
2278 }
2279 EXPORT_SYMBOL_GPL(trace_vprintk);
2280
2281 static void trace_iterator_increment(struct trace_iterator *iter)
2282 {
2283         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
2285         iter->idx++;
2286         if (buf_iter)
2287                 ring_buffer_read(buf_iter, NULL);
2288 }
2289
2290 static struct trace_entry *
2291 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292                 unsigned long *lost_events)
2293 {
2294         struct ring_buffer_event *event;
2295         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2296
2297         if (buf_iter)
2298                 event = ring_buffer_iter_peek(buf_iter, ts);
2299         else
2300                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2301                                          lost_events);
2302
2303         if (event) {
2304                 iter->ent_size = ring_buffer_event_length(event);
2305                 return ring_buffer_event_data(event);
2306         }
2307         iter->ent_size = 0;
2308         return NULL;
2309 }
2310
2311 static struct trace_entry *
2312 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313                   unsigned long *missing_events, u64 *ent_ts)
2314 {
2315         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2316         struct trace_entry *ent, *next = NULL;
2317         unsigned long lost_events = 0, next_lost = 0;
2318         int cpu_file = iter->cpu_file;
2319         u64 next_ts = 0, ts;
2320         int next_cpu = -1;
2321         int next_size = 0;
2322         int cpu;
2323
2324         /*
2325          * If we are in a per_cpu trace file, don't bother by iterating over
2326          * all cpu and peek directly.
2327          */
2328         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2329                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330                         return NULL;
2331                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2332                 if (ent_cpu)
2333                         *ent_cpu = cpu_file;
2334
2335                 return ent;
2336         }
2337
2338         for_each_tracing_cpu(cpu) {
2339
2340                 if (ring_buffer_empty_cpu(buffer, cpu))
2341                         continue;
2342
2343                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2344
2345                 /*
2346                  * Pick the entry with the smallest timestamp:
2347                  */
2348                 if (ent && (!next || ts < next_ts)) {
2349                         next = ent;
2350                         next_cpu = cpu;
2351                         next_ts = ts;
2352                         next_lost = lost_events;
2353                         next_size = iter->ent_size;
2354                 }
2355         }
2356
2357         iter->ent_size = next_size;
2358
2359         if (ent_cpu)
2360                 *ent_cpu = next_cpu;
2361
2362         if (ent_ts)
2363                 *ent_ts = next_ts;
2364
2365         if (missing_events)
2366                 *missing_events = next_lost;
2367
2368         return next;
2369 }
2370
2371 /* Find the next real entry, without updating the iterator itself */
2372 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373                                           int *ent_cpu, u64 *ent_ts)
2374 {
2375         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2376 }
2377
2378 /* Find the next real entry, and increment the iterator to the next entry */
2379 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2380 {
2381         iter->ent = __find_next_entry(iter, &iter->cpu,
2382                                       &iter->lost_events, &iter->ts);
2383
2384         if (iter->ent)
2385                 trace_iterator_increment(iter);
2386
2387         return iter->ent ? iter : NULL;
2388 }
2389
2390 static void trace_consume(struct trace_iterator *iter)
2391 {
2392         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2393                             &iter->lost_events);
2394 }
2395
2396 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2397 {
2398         struct trace_iterator *iter = m->private;
2399         int i = (int)*pos;
2400         void *ent;
2401
2402         WARN_ON_ONCE(iter->leftover);
2403
2404         (*pos)++;
2405
2406         /* can't go backwards */
2407         if (iter->idx > i)
2408                 return NULL;
2409
2410         if (iter->idx < 0)
2411                 ent = trace_find_next_entry_inc(iter);
2412         else
2413                 ent = iter;
2414
2415         while (ent && iter->idx < i)
2416                 ent = trace_find_next_entry_inc(iter);
2417
2418         iter->pos = *pos;
2419
2420         return ent;
2421 }
2422
2423 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2424 {
2425         struct ring_buffer_event *event;
2426         struct ring_buffer_iter *buf_iter;
2427         unsigned long entries = 0;
2428         u64 ts;
2429
2430         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2431
2432         buf_iter = trace_buffer_iter(iter, cpu);
2433         if (!buf_iter)
2434                 return;
2435
2436         ring_buffer_iter_reset(buf_iter);
2437
2438         /*
2439          * We could have the case with the max latency tracers
2440          * that a reset never took place on a cpu. This is evident
2441          * by the timestamp being before the start of the buffer.
2442          */
2443         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2444                 if (ts >= iter->trace_buffer->time_start)
2445                         break;
2446                 entries++;
2447                 ring_buffer_read(buf_iter, NULL);
2448         }
2449
2450         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2451 }
2452
2453 /*
2454  * The current tracer is copied to avoid a global locking
2455  * all around.
2456  */
2457 static void *s_start(struct seq_file *m, loff_t *pos)
2458 {
2459         struct trace_iterator *iter = m->private;
2460         struct trace_array *tr = iter->tr;
2461         int cpu_file = iter->cpu_file;
2462         void *p = NULL;
2463         loff_t l = 0;
2464         int cpu;
2465
2466         /*
2467          * copy the tracer to avoid using a global lock all around.
2468          * iter->trace is a copy of current_trace, the pointer to the
2469          * name may be used instead of a strcmp(), as iter->trace->name
2470          * will point to the same string as current_trace->name.
2471          */
2472         mutex_lock(&trace_types_lock);
2473         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474                 *iter->trace = *tr->current_trace;
2475         mutex_unlock(&trace_types_lock);
2476
2477 #ifdef CONFIG_TRACER_MAX_TRACE
2478         if (iter->snapshot && iter->trace->use_max_tr)
2479                 return ERR_PTR(-EBUSY);
2480 #endif
2481
2482         if (!iter->snapshot)
2483                 atomic_inc(&trace_record_cmdline_disabled);
2484
2485         if (*pos != iter->pos) {
2486                 iter->ent = NULL;
2487                 iter->cpu = 0;
2488                 iter->idx = -1;
2489
2490                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2491                         for_each_tracing_cpu(cpu)
2492                                 tracing_iter_reset(iter, cpu);
2493                 } else
2494                         tracing_iter_reset(iter, cpu_file);
2495
2496                 iter->leftover = 0;
2497                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498                         ;
2499
2500         } else {
2501                 /*
2502                  * If we overflowed the seq_file before, then we want
2503                  * to just reuse the trace_seq buffer again.
2504                  */
2505                 if (iter->leftover)
2506                         p = iter;
2507                 else {
2508                         l = *pos - 1;
2509                         p = s_next(m, p, &l);
2510                 }
2511         }
2512
2513         trace_event_read_lock();
2514         trace_access_lock(cpu_file);
2515         return p;
2516 }
2517
2518 static void s_stop(struct seq_file *m, void *p)
2519 {
2520         struct trace_iterator *iter = m->private;
2521
2522 #ifdef CONFIG_TRACER_MAX_TRACE
2523         if (iter->snapshot && iter->trace->use_max_tr)
2524                 return;
2525 #endif
2526
2527         if (!iter->snapshot)
2528                 atomic_dec(&trace_record_cmdline_disabled);
2529
2530         trace_access_unlock(iter->cpu_file);
2531         trace_event_read_unlock();
2532 }
2533
2534 static void
2535 get_total_entries(struct trace_buffer *buf,
2536                   unsigned long *total, unsigned long *entries)
2537 {
2538         unsigned long count;
2539         int cpu;
2540
2541         *total = 0;
2542         *entries = 0;
2543
2544         for_each_tracing_cpu(cpu) {
2545                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2546                 /*
2547                  * If this buffer has skipped entries, then we hold all
2548                  * entries for the trace and we need to ignore the
2549                  * ones before the time stamp.
2550                  */
2551                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2553                         /* total is the same as the entries */
2554                         *total += count;
2555                 } else
2556                         *total += count +
2557                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2558                 *entries += count;
2559         }
2560 }
2561
2562 static void print_lat_help_header(struct seq_file *m)
2563 {
2564         seq_puts(m, "#                  _------=> CPU#            \n"
2565                     "#                 / _-----=> irqs-off        \n"
2566                     "#                | / _----=> need-resched    \n"
2567                     "#                || / _---=> hardirq/softirq \n"
2568                     "#                ||| / _--=> preempt-depth   \n"
2569                     "#                |||| /     delay            \n"
2570                     "#  cmd     pid   ||||| time  |   caller      \n"
2571                     "#     \\   /      |||||  \\    |   /         \n");
2572 }
2573
2574 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2575 {
2576         unsigned long total;
2577         unsigned long entries;
2578
2579         get_total_entries(buf, &total, &entries);
2580         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2581                    entries, total, num_online_cpus());
2582         seq_puts(m, "#\n");
2583 }
2584
2585 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2586 {
2587         print_event_info(buf, m);
2588         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
2589                     "#              | |       |          |         |\n");
2590 }
2591
2592 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2593 {
2594         print_event_info(buf, m);
2595         seq_puts(m, "#                              _-----=> irqs-off\n"
2596                     "#                             / _----=> need-resched\n"
2597                     "#                            | / _---=> hardirq/softirq\n"
2598                     "#                            || / _--=> preempt-depth\n"
2599                     "#                            ||| /     delay\n"
2600                     "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
2601                     "#              | |       |   ||||       |         |\n");
2602 }
2603
2604 void
2605 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606 {
2607         unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2608         struct trace_buffer *buf = iter->trace_buffer;
2609         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2610         struct tracer *type = iter->trace;
2611         unsigned long entries;
2612         unsigned long total;
2613         const char *name = "preemption";
2614
2615         name = type->name;
2616
2617         get_total_entries(buf, &total, &entries);
2618
2619         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2620                    name, UTS_RELEASE);
2621         seq_puts(m, "# -----------------------------------"
2622                  "---------------------------------\n");
2623         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625                    nsecs_to_usecs(data->saved_latency),
2626                    entries,
2627                    total,
2628                    buf->cpu,
2629 #if defined(CONFIG_PREEMPT_NONE)
2630                    "server",
2631 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632                    "desktop",
2633 #elif defined(CONFIG_PREEMPT)
2634                    "preempt",
2635 #else
2636                    "unknown",
2637 #endif
2638                    /* These are reserved for later use */
2639                    0, 0, 0, 0);
2640 #ifdef CONFIG_SMP
2641         seq_printf(m, " #P:%d)\n", num_online_cpus());
2642 #else
2643         seq_puts(m, ")\n");
2644 #endif
2645         seq_puts(m, "#    -----------------\n");
2646         seq_printf(m, "#    | task: %.16s-%d "
2647                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648                    data->comm, data->pid,
2649                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2650                    data->policy, data->rt_priority);
2651         seq_puts(m, "#    -----------------\n");
2652
2653         if (data->critical_start) {
2654                 seq_puts(m, "#  => started at: ");
2655                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656                 trace_print_seq(m, &iter->seq);
2657                 seq_puts(m, "\n#  => ended at:   ");
2658                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659                 trace_print_seq(m, &iter->seq);
2660                 seq_puts(m, "\n#\n");
2661         }
2662
2663         seq_puts(m, "#\n");
2664 }
2665
2666 static void test_cpu_buff_start(struct trace_iterator *iter)
2667 {
2668         struct trace_seq *s = &iter->seq;
2669         struct trace_array *tr = iter->tr;
2670
2671         if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2672                 return;
2673
2674         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675                 return;
2676
2677         if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2678                 return;
2679
2680         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2681                 return;
2682
2683         if (iter->started)
2684                 cpumask_set_cpu(iter->cpu, iter->started);
2685
2686         /* Don't print started cpu buffer for the first entry of the trace */
2687         if (iter->idx > 1)
2688                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689                                 iter->cpu);
2690 }
2691
2692 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2693 {
2694         struct trace_array *tr = iter->tr;
2695         struct trace_seq *s = &iter->seq;
2696         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2697         struct trace_entry *entry;
2698         struct trace_event *event;
2699
2700         entry = iter->ent;
2701
2702         test_cpu_buff_start(iter);
2703
2704         event = ftrace_find_event(entry->type);
2705
2706         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708                         trace_print_lat_context(iter);
2709                 else
2710                         trace_print_context(iter);
2711         }
2712
2713         if (trace_seq_has_overflowed(s))
2714                 return TRACE_TYPE_PARTIAL_LINE;
2715
2716         if (event)
2717                 return event->funcs->trace(iter, sym_flags, event);
2718
2719         trace_seq_printf(s, "Unknown type %d\n", entry->type);
2720
2721         return trace_handle_return(s);
2722 }
2723
2724 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2725 {
2726         struct trace_array *tr = iter->tr;
2727         struct trace_seq *s = &iter->seq;
2728         struct trace_entry *entry;
2729         struct trace_event *event;
2730
2731         entry = iter->ent;
2732
2733         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2734                 trace_seq_printf(s, "%d %d %llu ",
2735                                  entry->pid, iter->cpu, iter->ts);
2736
2737         if (trace_seq_has_overflowed(s))
2738                 return TRACE_TYPE_PARTIAL_LINE;
2739
2740         event = ftrace_find_event(entry->type);
2741         if (event)
2742                 return event->funcs->raw(iter, 0, event);
2743
2744         trace_seq_printf(s, "%d ?\n", entry->type);
2745
2746         return trace_handle_return(s);
2747 }
2748
2749 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2750 {
2751         struct trace_array *tr = iter->tr;
2752         struct trace_seq *s = &iter->seq;
2753         unsigned char newline = '\n';
2754         struct trace_entry *entry;
2755         struct trace_event *event;
2756
2757         entry = iter->ent;
2758
2759         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2760                 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761                 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762                 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763                 if (trace_seq_has_overflowed(s))
2764                         return TRACE_TYPE_PARTIAL_LINE;
2765         }
2766
2767         event = ftrace_find_event(entry->type);
2768         if (event) {
2769                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2770                 if (ret != TRACE_TYPE_HANDLED)
2771                         return ret;
2772         }
2773
2774         SEQ_PUT_FIELD(s, newline);
2775
2776         return trace_handle_return(s);
2777 }
2778
2779 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2780 {
2781         struct trace_array *tr = iter->tr;
2782         struct trace_seq *s = &iter->seq;
2783         struct trace_entry *entry;
2784         struct trace_event *event;
2785
2786         entry = iter->ent;
2787
2788         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2789                 SEQ_PUT_FIELD(s, entry->pid);
2790                 SEQ_PUT_FIELD(s, iter->cpu);
2791                 SEQ_PUT_FIELD(s, iter->ts);
2792                 if (trace_seq_has_overflowed(s))
2793                         return TRACE_TYPE_PARTIAL_LINE;
2794         }
2795
2796         event = ftrace_find_event(entry->type);
2797         return event ? event->funcs->binary(iter, 0, event) :
2798                 TRACE_TYPE_HANDLED;
2799 }
2800
2801 int trace_empty(struct trace_iterator *iter)
2802 {
2803         struct ring_buffer_iter *buf_iter;
2804         int cpu;
2805
2806         /* If we are looking at one CPU buffer, only check that one */
2807         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2808                 cpu = iter->cpu_file;
2809                 buf_iter = trace_buffer_iter(iter, cpu);
2810                 if (buf_iter) {
2811                         if (!ring_buffer_iter_empty(buf_iter))
2812                                 return 0;
2813                 } else {
2814                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2815                                 return 0;
2816                 }
2817                 return 1;
2818         }
2819
2820         for_each_tracing_cpu(cpu) {
2821                 buf_iter = trace_buffer_iter(iter, cpu);
2822                 if (buf_iter) {
2823                         if (!ring_buffer_iter_empty(buf_iter))
2824                                 return 0;
2825                 } else {
2826                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2827                                 return 0;
2828                 }
2829         }
2830
2831         return 1;
2832 }
2833
2834 /*  Called with trace_event_read_lock() held. */
2835 enum print_line_t print_trace_line(struct trace_iterator *iter)
2836 {
2837         struct trace_array *tr = iter->tr;
2838         unsigned long trace_flags = tr->trace_flags;
2839         enum print_line_t ret;
2840
2841         if (iter->lost_events) {
2842                 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843                                  iter->cpu, iter->lost_events);
2844                 if (trace_seq_has_overflowed(&iter->seq))
2845                         return TRACE_TYPE_PARTIAL_LINE;
2846         }
2847
2848         if (iter->trace && iter->trace->print_line) {
2849                 ret = iter->trace->print_line(iter);
2850                 if (ret != TRACE_TYPE_UNHANDLED)
2851                         return ret;
2852         }
2853
2854         if (iter->ent->type == TRACE_BPUTS &&
2855                         trace_flags & TRACE_ITER_PRINTK &&
2856                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857                 return trace_print_bputs_msg_only(iter);
2858
2859         if (iter->ent->type == TRACE_BPRINT &&
2860                         trace_flags & TRACE_ITER_PRINTK &&
2861                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2862                 return trace_print_bprintk_msg_only(iter);
2863
2864         if (iter->ent->type == TRACE_PRINT &&
2865                         trace_flags & TRACE_ITER_PRINTK &&
2866                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2867                 return trace_print_printk_msg_only(iter);
2868
2869         if (trace_flags & TRACE_ITER_BIN)
2870                 return print_bin_fmt(iter);
2871
2872         if (trace_flags & TRACE_ITER_HEX)
2873                 return print_hex_fmt(iter);
2874
2875         if (trace_flags & TRACE_ITER_RAW)
2876                 return print_raw_fmt(iter);
2877
2878         return print_trace_fmt(iter);
2879 }
2880
2881 void trace_latency_header(struct seq_file *m)
2882 {
2883         struct trace_iterator *iter = m->private;
2884         struct trace_array *tr = iter->tr;
2885
2886         /* print nothing if the buffers are empty */
2887         if (trace_empty(iter))
2888                 return;
2889
2890         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891                 print_trace_header(m, iter);
2892
2893         if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2894                 print_lat_help_header(m);
2895 }
2896
2897 void trace_default_header(struct seq_file *m)
2898 {
2899         struct trace_iterator *iter = m->private;
2900         struct trace_array *tr = iter->tr;
2901         unsigned long trace_flags = tr->trace_flags;
2902
2903         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904                 return;
2905
2906         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907                 /* print nothing if the buffers are empty */
2908                 if (trace_empty(iter))
2909                         return;
2910                 print_trace_header(m, iter);
2911                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912                         print_lat_help_header(m);
2913         } else {
2914                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2916                                 print_func_help_header_irq(iter->trace_buffer, m);
2917                         else
2918                                 print_func_help_header(iter->trace_buffer, m);
2919                 }
2920         }
2921 }
2922
2923 static void test_ftrace_alive(struct seq_file *m)
2924 {
2925         if (!ftrace_is_dead())
2926                 return;
2927         seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928                     "#          MAY BE MISSING FUNCTION EVENTS\n");
2929 }
2930
2931 #ifdef CONFIG_TRACER_MAX_TRACE
2932 static void show_snapshot_main_help(struct seq_file *m)
2933 {
2934         seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935                     "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936                     "#                      Takes a snapshot of the main buffer.\n"
2937                     "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938                     "#                      (Doesn't have to be '2' works with any number that\n"
2939                     "#                       is not a '0' or '1')\n");
2940 }
2941
2942 static void show_snapshot_percpu_help(struct seq_file *m)
2943 {
2944         seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946         seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947                     "#                      Takes a snapshot of the main buffer for this cpu.\n");
2948 #else
2949         seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950                     "#                     Must use main snapshot file to allocate.\n");
2951 #endif
2952         seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953                     "#                      (Doesn't have to be '2' works with any number that\n"
2954                     "#                       is not a '0' or '1')\n");
2955 }
2956
2957 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958 {
2959         if (iter->tr->allocated_snapshot)
2960                 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2961         else
2962                 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2963
2964         seq_puts(m, "# Snapshot commands:\n");
2965         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966                 show_snapshot_main_help(m);
2967         else
2968                 show_snapshot_percpu_help(m);
2969 }
2970 #else
2971 /* Should never be called */
2972 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973 #endif
2974
2975 static int s_show(struct seq_file *m, void *v)
2976 {
2977         struct trace_iterator *iter = v;
2978         int ret;
2979
2980         if (iter->ent == NULL) {
2981                 if (iter->tr) {
2982                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983                         seq_puts(m, "#\n");
2984                         test_ftrace_alive(m);
2985                 }
2986                 if (iter->snapshot && trace_empty(iter))
2987                         print_snapshot_help(m, iter);
2988                 else if (iter->trace && iter->trace->print_header)
2989                         iter->trace->print_header(m);
2990                 else
2991                         trace_default_header(m);
2992
2993         } else if (iter->leftover) {
2994                 /*
2995                  * If we filled the seq_file buffer earlier, we
2996                  * want to just show it now.
2997                  */
2998                 ret = trace_print_seq(m, &iter->seq);
2999
3000                 /* ret should this time be zero, but you never know */
3001                 iter->leftover = ret;
3002
3003         } else {
3004                 print_trace_line(iter);
3005                 ret = trace_print_seq(m, &iter->seq);
3006                 /*
3007                  * If we overflow the seq_file buffer, then it will
3008                  * ask us for this data again at start up.
3009                  * Use that instead.
3010                  *  ret is 0 if seq_file write succeeded.
3011                  *        -1 otherwise.
3012                  */
3013                 iter->leftover = ret;
3014         }
3015
3016         return 0;
3017 }
3018
3019 /*
3020  * Should be used after trace_array_get(), trace_types_lock
3021  * ensures that i_cdev was already initialized.
3022  */
3023 static inline int tracing_get_cpu(struct inode *inode)
3024 {
3025         if (inode->i_cdev) /* See trace_create_cpu_file() */
3026                 return (long)inode->i_cdev - 1;
3027         return RING_BUFFER_ALL_CPUS;
3028 }
3029
3030 static const struct seq_operations tracer_seq_ops = {
3031         .start          = s_start,
3032         .next           = s_next,
3033         .stop           = s_stop,
3034         .show           = s_show,
3035 };
3036
3037 static struct trace_iterator *
3038 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3039 {
3040         struct trace_array *tr = inode->i_private;
3041         struct trace_iterator *iter;
3042         int cpu;
3043
3044         if (tracing_disabled)
3045                 return ERR_PTR(-ENODEV);
3046
3047         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3048         if (!iter)
3049                 return ERR_PTR(-ENOMEM);
3050
3051         iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3052                                     GFP_KERNEL);
3053         if (!iter->buffer_iter)
3054                 goto release;
3055
3056         /*
3057          * We make a copy of the current tracer to avoid concurrent
3058          * changes on it while we are reading.
3059          */
3060         mutex_lock(&trace_types_lock);
3061         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3062         if (!iter->trace)
3063                 goto fail;
3064
3065         *iter->trace = *tr->current_trace;
3066
3067         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3068                 goto fail;
3069
3070         iter->tr = tr;
3071
3072 #ifdef CONFIG_TRACER_MAX_TRACE
3073         /* Currently only the top directory has a snapshot */
3074         if (tr->current_trace->print_max || snapshot)
3075                 iter->trace_buffer = &tr->max_buffer;
3076         else
3077 #endif
3078                 iter->trace_buffer = &tr->trace_buffer;
3079         iter->snapshot = snapshot;
3080         iter->pos = -1;
3081         iter->cpu_file = tracing_get_cpu(inode);
3082         mutex_init(&iter->mutex);
3083
3084         /* Notify the tracer early; before we stop tracing. */
3085         if (iter->trace && iter->trace->open)
3086                 iter->trace->open(iter);
3087
3088         /* Annotate start of buffers if we had overruns */
3089         if (ring_buffer_overruns(iter->trace_buffer->buffer))
3090                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
3092         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3093         if (trace_clocks[tr->clock_id].in_ns)
3094                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
3096         /* stop the trace while dumping if we are not opening "snapshot" */
3097         if (!iter->snapshot)
3098                 tracing_stop_tr(tr);
3099
3100         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3101                 for_each_tracing_cpu(cpu) {
3102                         iter->buffer_iter[cpu] =
3103                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3104                 }
3105                 ring_buffer_read_prepare_sync();
3106                 for_each_tracing_cpu(cpu) {
3107                         ring_buffer_read_start(iter->buffer_iter[cpu]);
3108                         tracing_iter_reset(iter, cpu);
3109                 }
3110         } else {
3111                 cpu = iter->cpu_file;
3112                 iter->buffer_iter[cpu] =
3113                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3114                 ring_buffer_read_prepare_sync();
3115                 ring_buffer_read_start(iter->buffer_iter[cpu]);
3116                 tracing_iter_reset(iter, cpu);
3117         }
3118
3119         mutex_unlock(&trace_types_lock);
3120
3121         return iter;
3122
3123  fail:
3124         mutex_unlock(&trace_types_lock);
3125         kfree(iter->trace);
3126         kfree(iter->buffer_iter);
3127 release:
3128         seq_release_private(inode, file);
3129         return ERR_PTR(-ENOMEM);
3130 }
3131
3132 int tracing_open_generic(struct inode *inode, struct file *filp)
3133 {
3134         if (tracing_disabled)
3135                 return -ENODEV;
3136
3137         filp->private_data = inode->i_private;
3138         return 0;
3139 }
3140
3141 bool tracing_is_disabled(void)
3142 {
3143         return (tracing_disabled) ? true: false;
3144 }
3145
3146 /*
3147  * Open and update trace_array ref count.
3148  * Must have the current trace_array passed to it.
3149  */
3150 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3151 {
3152         struct trace_array *tr = inode->i_private;
3153
3154         if (tracing_disabled)
3155                 return -ENODEV;
3156
3157         if (trace_array_get(tr) < 0)
3158                 return -ENODEV;
3159
3160         filp->private_data = inode->i_private;
3161
3162         return 0;
3163 }
3164
3165 static int tracing_release(struct inode *inode, struct file *file)
3166 {
3167         struct trace_array *tr = inode->i_private;
3168         struct seq_file *m = file->private_data;
3169         struct trace_iterator *iter;
3170         int cpu;
3171
3172         if (!(file->f_mode & FMODE_READ)) {
3173                 trace_array_put(tr);
3174                 return 0;
3175         }
3176
3177         /* Writes do not use seq_file */
3178         iter = m->private;
3179         mutex_lock(&trace_types_lock);
3180
3181         for_each_tracing_cpu(cpu) {
3182                 if (iter->buffer_iter[cpu])
3183                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184         }
3185
3186         if (iter->trace && iter->trace->close)
3187                 iter->trace->close(iter);
3188
3189         if (!iter->snapshot)
3190                 /* reenable tracing if it was previously enabled */
3191                 tracing_start_tr(tr);
3192
3193         __trace_array_put(tr);
3194
3195         mutex_unlock(&trace_types_lock);
3196
3197         mutex_destroy(&iter->mutex);
3198         free_cpumask_var(iter->started);
3199         kfree(iter->trace);
3200         kfree(iter->buffer_iter);
3201         seq_release_private(inode, file);
3202
3203         return 0;
3204 }
3205
3206 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207 {
3208         struct trace_array *tr = inode->i_private;
3209
3210         trace_array_put(tr);
3211         return 0;
3212 }
3213
3214 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215 {
3216         struct trace_array *tr = inode->i_private;
3217
3218         trace_array_put(tr);
3219
3220         return single_release(inode, file);
3221 }
3222
3223 static int tracing_open(struct inode *inode, struct file *file)
3224 {
3225         struct trace_array *tr = inode->i_private;
3226         struct trace_iterator *iter;
3227         int ret = 0;
3228
3229         if (trace_array_get(tr) < 0)
3230                 return -ENODEV;
3231
3232         /* If this file was open for write, then erase contents */
3233         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234                 int cpu = tracing_get_cpu(inode);
3235
3236                 if (cpu == RING_BUFFER_ALL_CPUS)
3237                         tracing_reset_online_cpus(&tr->trace_buffer);
3238                 else
3239                         tracing_reset(&tr->trace_buffer, cpu);
3240         }
3241
3242         if (file->f_mode & FMODE_READ) {
3243                 iter = __tracing_open(inode, file, false);
3244                 if (IS_ERR(iter))
3245                         ret = PTR_ERR(iter);
3246                 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3247                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248         }
3249
3250         if (ret < 0)
3251                 trace_array_put(tr);
3252
3253         return ret;
3254 }
3255
3256 /*
3257  * Some tracers are not suitable for instance buffers.
3258  * A tracer is always available for the global array (toplevel)
3259  * or if it explicitly states that it is.
3260  */
3261 static bool
3262 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263 {
3264         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265 }
3266
3267 /* Find the next tracer that this trace array may use */
3268 static struct tracer *
3269 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270 {
3271         while (t && !trace_ok_for_array(t, tr))
3272                 t = t->next;
3273
3274         return t;
3275 }
3276
3277 static void *
3278 t_next(struct seq_file *m, void *v, loff_t *pos)
3279 {
3280         struct trace_array *tr = m->private;
3281         struct tracer *t = v;
3282
3283         (*pos)++;
3284
3285         if (t)
3286                 t = get_tracer_for_array(tr, t->next);
3287
3288         return t;
3289 }
3290
3291 static void *t_start(struct seq_file *m, loff_t *pos)
3292 {
3293         struct trace_array *tr = m->private;
3294         struct tracer *t;
3295         loff_t l = 0;
3296
3297         mutex_lock(&trace_types_lock);
3298
3299         t = get_tracer_for_array(tr, trace_types);
3300         for (; t && l < *pos; t = t_next(m, t, &l))
3301                         ;
3302
3303         return t;
3304 }
3305
3306 static void t_stop(struct seq_file *m, void *p)
3307 {
3308         mutex_unlock(&trace_types_lock);
3309 }
3310
3311 static int t_show(struct seq_file *m, void *v)
3312 {
3313         struct tracer *t = v;
3314
3315         if (!t)
3316                 return 0;
3317
3318         seq_puts(m, t->name);
3319         if (t->next)
3320                 seq_putc(m, ' ');
3321         else
3322                 seq_putc(m, '\n');
3323
3324         return 0;
3325 }
3326
3327 static const struct seq_operations show_traces_seq_ops = {
3328         .start          = t_start,
3329         .next           = t_next,
3330         .stop           = t_stop,
3331         .show           = t_show,
3332 };
3333
3334 static int show_traces_open(struct inode *inode, struct file *file)
3335 {
3336         struct trace_array *tr = inode->i_private;
3337         struct seq_file *m;
3338         int ret;
3339
3340         if (tracing_disabled)
3341                 return -ENODEV;
3342
3343         ret = seq_open(file, &show_traces_seq_ops);
3344         if (ret)
3345                 return ret;
3346
3347         m = file->private_data;
3348         m->private = tr;
3349
3350         return 0;
3351 }
3352
3353 static ssize_t
3354 tracing_write_stub(struct file *filp, const char __user *ubuf,
3355                    size_t count, loff_t *ppos)
3356 {
3357         return count;
3358 }
3359
3360 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3361 {
3362         int ret;
3363
3364         if (file->f_mode & FMODE_READ)
3365                 ret = seq_lseek(file, offset, whence);
3366         else
3367                 file->f_pos = ret = 0;
3368
3369         return ret;
3370 }
3371
3372 static const struct file_operations tracing_fops = {
3373         .open           = tracing_open,
3374         .read           = seq_read,
3375         .write          = tracing_write_stub,
3376         .llseek         = tracing_lseek,
3377         .release        = tracing_release,
3378 };
3379
3380 static const struct file_operations show_traces_fops = {
3381         .open           = show_traces_open,
3382         .read           = seq_read,
3383         .release        = seq_release,
3384         .llseek         = seq_lseek,
3385 };
3386
3387 /*
3388  * The tracer itself will not take this lock, but still we want
3389  * to provide a consistent cpumask to user-space:
3390  */
3391 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393 /*
3394  * Temporary storage for the character representation of the
3395  * CPU bitmask (and one more byte for the newline):
3396  */
3397 static char mask_str[NR_CPUS + 1];
3398
3399 static ssize_t
3400 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401                      size_t count, loff_t *ppos)
3402 {
3403         struct trace_array *tr = file_inode(filp)->i_private;
3404         int len;
3405
3406         mutex_lock(&tracing_cpumask_update_lock);
3407
3408         len = snprintf(mask_str, count, "%*pb\n",
3409                        cpumask_pr_args(tr->tracing_cpumask));
3410         if (len >= count) {
3411                 count = -EINVAL;
3412                 goto out_err;
3413         }
3414         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416 out_err:
3417         mutex_unlock(&tracing_cpumask_update_lock);
3418
3419         return count;
3420 }
3421
3422 static ssize_t
3423 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424                       size_t count, loff_t *ppos)
3425 {
3426         struct trace_array *tr = file_inode(filp)->i_private;
3427         cpumask_var_t tracing_cpumask_new;
3428         int err, cpu;
3429
3430         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431                 return -ENOMEM;
3432
3433         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3434         if (err)
3435                 goto err_unlock;
3436
3437         mutex_lock(&tracing_cpumask_update_lock);
3438
3439         local_irq_disable();
3440         arch_spin_lock(&tr->max_lock);
3441         for_each_tracing_cpu(cpu) {
3442                 /*
3443                  * Increase/decrease the disabled counter if we are
3444                  * about to flip a bit in the cpumask:
3445                  */
3446                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3447                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3448                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3450                 }
3451                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3452                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3453                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3455                 }
3456         }
3457         arch_spin_unlock(&tr->max_lock);
3458         local_irq_enable();
3459
3460         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3461
3462         mutex_unlock(&tracing_cpumask_update_lock);
3463         free_cpumask_var(tracing_cpumask_new);
3464
3465         return count;
3466
3467 err_unlock:
3468         free_cpumask_var(tracing_cpumask_new);
3469
3470         return err;
3471 }
3472
3473 static const struct file_operations tracing_cpumask_fops = {
3474         .open           = tracing_open_generic_tr,
3475         .read           = tracing_cpumask_read,
3476         .write          = tracing_cpumask_write,
3477         .release        = tracing_release_generic_tr,
3478         .llseek         = generic_file_llseek,
3479 };
3480
3481 static int tracing_trace_options_show(struct seq_file *m, void *v)
3482 {
3483         struct tracer_opt *trace_opts;
3484         struct trace_array *tr = m->private;
3485         u32 tracer_flags;
3486         int i;
3487
3488         mutex_lock(&trace_types_lock);
3489         tracer_flags = tr->current_trace->flags->val;
3490         trace_opts = tr->current_trace->flags->opts;
3491
3492         for (i = 0; trace_options[i]; i++) {
3493                 if (tr->trace_flags & (1 << i))
3494                         seq_printf(m, "%s\n", trace_options[i]);
3495                 else
3496                         seq_printf(m, "no%s\n", trace_options[i]);
3497         }
3498
3499         for (i = 0; trace_opts[i].name; i++) {
3500                 if (tracer_flags & trace_opts[i].bit)
3501                         seq_printf(m, "%s\n", trace_opts[i].name);
3502                 else
3503                         seq_printf(m, "no%s\n", trace_opts[i].name);
3504         }
3505         mutex_unlock(&trace_types_lock);
3506
3507         return 0;
3508 }
3509
3510 static int __set_tracer_option(struct trace_array *tr,
3511                                struct tracer_flags *tracer_flags,
3512                                struct tracer_opt *opts, int neg)
3513 {
3514         struct tracer *trace = tr->current_trace;
3515         int ret;
3516
3517         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3518         if (ret)
3519                 return ret;
3520
3521         if (neg)
3522                 tracer_flags->val &= ~opts->bit;
3523         else
3524                 tracer_flags->val |= opts->bit;
3525         return 0;
3526 }
3527
3528 /* Try to assign a tracer specific option */
3529 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3530 {
3531         struct tracer *trace = tr->current_trace;
3532         struct tracer_flags *tracer_flags = trace->flags;
3533         struct tracer_opt *opts = NULL;
3534         int i;
3535
3536         for (i = 0; tracer_flags->opts[i].name; i++) {
3537                 opts = &tracer_flags->opts[i];
3538
3539                 if (strcmp(cmp, opts->name) == 0)
3540                         return __set_tracer_option(tr, trace->flags, opts, neg);
3541         }
3542
3543         return -EINVAL;
3544 }
3545
3546 /* Some tracers require overwrite to stay enabled */
3547 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548 {
3549         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550                 return -1;
3551
3552         return 0;
3553 }
3554
3555 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3556 {
3557         /* do nothing if flag is already set */
3558         if (!!(tr->trace_flags & mask) == !!enabled)
3559                 return 0;
3560
3561         /* Give the tracer a chance to approve the change */
3562         if (tr->current_trace->flag_changed)
3563                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3564                         return -EINVAL;
3565
3566         if (enabled)
3567                 tr->trace_flags |= mask;
3568         else
3569                 tr->trace_flags &= ~mask;
3570
3571         if (mask == TRACE_ITER_RECORD_CMD)
3572                 trace_event_enable_cmd_record(enabled);
3573
3574         if (mask == TRACE_ITER_OVERWRITE) {
3575                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3576 #ifdef CONFIG_TRACER_MAX_TRACE
3577                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3578 #endif
3579         }
3580
3581         if (mask == TRACE_ITER_PRINTK) {
3582                 trace_printk_start_stop_comm(enabled);
3583                 trace_printk_control(enabled);
3584         }
3585
3586         return 0;
3587 }
3588
3589 static int trace_set_options(struct trace_array *tr, char *option)
3590 {
3591         char *cmp;
3592         int neg = 0;
3593         int ret = -ENODEV;
3594         int i;
3595         size_t orig_len = strlen(option);
3596
3597         cmp = strstrip(option);
3598
3599         if (strncmp(cmp, "no", 2) == 0) {
3600                 neg = 1;
3601                 cmp += 2;
3602         }
3603
3604         mutex_lock(&trace_types_lock);
3605
3606         for (i = 0; trace_options[i]; i++) {
3607                 if (strcmp(cmp, trace_options[i]) == 0) {
3608                         ret = set_tracer_flag(tr, 1 << i, !neg);
3609                         break;
3610                 }
3611         }
3612
3613         /* If no option could be set, test the specific tracer options */
3614         if (!trace_options[i])
3615                 ret = set_tracer_option(tr, cmp, neg);
3616
3617         mutex_unlock(&trace_types_lock);
3618
3619         /*
3620          * If the first trailing whitespace is replaced with '\0' by strstrip,
3621          * turn it back into a space.
3622          */
3623         if (orig_len > strlen(option))
3624                 option[strlen(option)] = ' ';
3625
3626         return ret;
3627 }
3628
3629 static void __init apply_trace_boot_options(void)
3630 {
3631         char *buf = trace_boot_options_buf;
3632         char *option;
3633
3634         while (true) {
3635                 option = strsep(&buf, ",");
3636
3637                 if (!option)
3638                         break;
3639
3640                 if (*option)
3641                         trace_set_options(&global_trace, option);
3642
3643                 /* Put back the comma to allow this to be called again */
3644                 if (buf)
3645                         *(buf - 1) = ',';
3646         }
3647 }
3648
3649 static ssize_t
3650 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3651                         size_t cnt, loff_t *ppos)
3652 {
3653         struct seq_file *m = filp->private_data;
3654         struct trace_array *tr = m->private;
3655         char buf[64];
3656         int ret;
3657
3658         if (cnt >= sizeof(buf))
3659                 return -EINVAL;
3660
3661         if (copy_from_user(&buf, ubuf, cnt))
3662                 return -EFAULT;
3663
3664         buf[cnt] = 0;
3665
3666         ret = trace_set_options(tr, buf);
3667         if (ret < 0)
3668                 return ret;
3669
3670         *ppos += cnt;
3671
3672         return cnt;
3673 }
3674
3675 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3676 {
3677         struct trace_array *tr = inode->i_private;
3678         int ret;
3679
3680         if (tracing_disabled)
3681                 return -ENODEV;
3682
3683         if (trace_array_get(tr) < 0)
3684                 return -ENODEV;
3685
3686         ret = single_open(file, tracing_trace_options_show, inode->i_private);
3687         if (ret < 0)
3688                 trace_array_put(tr);
3689
3690         return ret;
3691 }
3692
3693 static const struct file_operations tracing_iter_fops = {
3694         .open           = tracing_trace_options_open,
3695         .read           = seq_read,
3696         .llseek         = seq_lseek,
3697         .release        = tracing_single_release_tr,
3698         .write          = tracing_trace_options_write,
3699 };
3700
3701 static const char readme_msg[] =
3702         "tracing mini-HOWTO:\n\n"
3703         "# echo 0 > tracing_on : quick way to disable tracing\n"
3704         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3705         " Important files:\n"
3706         "  trace\t\t\t- The static contents of the buffer\n"
3707         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3708         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3709         "  current_tracer\t- function and latency tracers\n"
3710         "  available_tracers\t- list of configured tracers for current_tracer\n"
3711         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3712         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3713         "  trace_clock\t\t-change the clock used to order events\n"
3714         "       local:   Per cpu clock but may not be synced across CPUs\n"
3715         "      global:   Synced across CPUs but slows tracing down.\n"
3716         "     counter:   Not a clock, but just an increment\n"
3717         "      uptime:   Jiffy counter from time of boot\n"
3718         "        perf:   Same clock that perf events use\n"
3719 #ifdef CONFIG_X86_64
3720         "     x86-tsc:   TSC cycle counter\n"
3721 #endif
3722         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3723         "  tracing_cpumask\t- Limit which CPUs to trace\n"
3724         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3725         "\t\t\t  Remove sub-buffer with rmdir\n"
3726         "  trace_options\t\t- Set format or modify how tracing happens\n"
3727         "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3728         "\t\t\t  option name\n"
3729         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3730 #ifdef CONFIG_DYNAMIC_FTRACE
3731         "\n  available_filter_functions - list of functions that can be filtered on\n"
3732         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
3733         "\t\t\t  functions\n"
3734         "\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735         "\t     modules: Can select a group via module\n"
3736         "\t      Format: :mod:<module-name>\n"
3737         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3738         "\t    triggers: a command to perform when function is hit\n"
3739         "\t      Format: <function>:<trigger>[:count]\n"
3740         "\t     trigger: traceon, traceoff\n"
3741         "\t\t      enable_event:<system>:<event>\n"
3742         "\t\t      disable_event:<system>:<event>\n"
3743 #ifdef CONFIG_STACKTRACE
3744         "\t\t      stacktrace\n"
3745 #endif
3746 #ifdef CONFIG_TRACER_SNAPSHOT
3747         "\t\t      snapshot\n"
3748 #endif
3749         "\t\t      dump\n"
3750         "\t\t      cpudump\n"
3751         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3752         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3753         "\t     The first one will disable tracing every time do_fault is hit\n"
3754         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3755         "\t       The first time do trap is hit and it disables tracing, the\n"
3756         "\t       counter will decrement to 2. If tracing is already disabled,\n"
3757         "\t       the counter will not decrement. It only decrements when the\n"
3758         "\t       trigger did work\n"
3759         "\t     To remove trigger without count:\n"
3760         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3761         "\t     To remove trigger with a count:\n"
3762         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3763         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3764         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3765         "\t    modules: Can select a group via module command :mod:\n"
3766         "\t    Does not accept triggers\n"
3767 #endif /* CONFIG_DYNAMIC_FTRACE */
3768 #ifdef CONFIG_FUNCTION_TRACER
3769         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3770         "\t\t    (function)\n"
3771 #endif
3772 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3773         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3774         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3775         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3776 #endif
3777 #ifdef CONFIG_TRACER_SNAPSHOT
3778         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3779         "\t\t\t  snapshot buffer. Read the contents for more\n"
3780         "\t\t\t  information\n"
3781 #endif
3782 #ifdef CONFIG_STACK_TRACER
3783         "  stack_trace\t\t- Shows the max stack trace when active\n"
3784         "  stack_max_size\t- Shows current max stack size that was traced\n"
3785         "\t\t\t  Write into this file to reset the max size (trigger a\n"
3786         "\t\t\t  new trace)\n"
3787 #ifdef CONFIG_DYNAMIC_FTRACE
3788         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3789         "\t\t\t  traces\n"
3790 #endif
3791 #endif /* CONFIG_STACK_TRACER */
3792         "  events/\t\t- Directory containing all trace event subsystems:\n"
3793         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3794         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
3795         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3796         "\t\t\t  events\n"
3797         "      filter\t\t- If set, only events passing filter are traced\n"
3798         "  events/<system>/<event>/\t- Directory containing control files for\n"
3799         "\t\t\t  <event>:\n"
3800         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3801         "      filter\t\t- If set, only events passing filter are traced\n"
3802         "      trigger\t\t- If set, a command to perform when event is hit\n"
3803         "\t    Format: <trigger>[:count][if <filter>]\n"
3804         "\t   trigger: traceon, traceoff\n"
3805         "\t            enable_event:<system>:<event>\n"
3806         "\t            disable_event:<system>:<event>\n"
3807 #ifdef CONFIG_STACKTRACE
3808         "\t\t    stacktrace\n"
3809 #endif
3810 #ifdef CONFIG_TRACER_SNAPSHOT
3811         "\t\t    snapshot\n"
3812 #endif
3813         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3814         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3815         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3816         "\t                  events/block/block_unplug/trigger\n"
3817         "\t   The first disables tracing every time block_unplug is hit.\n"
3818         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3819         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3820         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3821         "\t   Like function triggers, the counter is only decremented if it\n"
3822         "\t    enabled or disabled tracing.\n"
3823         "\t   To remove a trigger without a count:\n"
3824         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
3825         "\t   To remove a trigger with a count:\n"
3826         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3827         "\t   Filters can be ignored when removing a trigger.\n"
3828 ;
3829
3830 static ssize_t
3831 tracing_readme_read(struct file *filp, char __user *ubuf,
3832                        size_t cnt, loff_t *ppos)
3833 {
3834         return simple_read_from_buffer(ubuf, cnt, ppos,
3835                                         readme_msg, strlen(readme_msg));
3836 }
3837
3838 static const struct file_operations tracing_readme_fops = {
3839         .open           = tracing_open_generic,
3840         .read           = tracing_readme_read,
3841         .llseek         = generic_file_llseek,
3842 };
3843
3844 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3845 {
3846         unsigned int *ptr = v;
3847
3848         if (*pos || m->count)
3849                 ptr++;
3850
3851         (*pos)++;
3852
3853         for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3854              ptr++) {
3855                 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3856                         continue;
3857
3858                 return ptr;
3859         }
3860
3861         return NULL;
3862 }
3863
3864 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865 {
3866         void *v;
3867         loff_t l = 0;
3868
3869         preempt_disable();
3870         arch_spin_lock(&trace_cmdline_lock);
3871
3872         v = &savedcmd->map_cmdline_to_pid[0];
3873         while (l <= *pos) {
3874                 v = saved_cmdlines_next(m, v, &l);
3875                 if (!v)
3876                         return NULL;
3877         }
3878
3879         return v;
3880 }
3881
3882 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3883 {
3884         arch_spin_unlock(&trace_cmdline_lock);
3885         preempt_enable();
3886 }
3887
3888 static int saved_cmdlines_show(struct seq_file *m, void *v)
3889 {
3890         char buf[TASK_COMM_LEN];
3891         unsigned int *pid = v;
3892
3893         __trace_find_cmdline(*pid, buf);
3894         seq_printf(m, "%d %s\n", *pid, buf);
3895         return 0;
3896 }
3897
3898 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3899         .start          = saved_cmdlines_start,
3900         .next           = saved_cmdlines_next,
3901         .stop           = saved_cmdlines_stop,
3902         .show           = saved_cmdlines_show,
3903 };
3904
3905 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3906 {
3907         if (tracing_disabled)
3908                 return -ENODEV;
3909
3910         return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3911 }
3912
3913 static const struct file_operations tracing_saved_cmdlines_fops = {
3914         .open           = tracing_saved_cmdlines_open,
3915         .read           = seq_read,
3916         .llseek         = seq_lseek,
3917         .release        = seq_release,
3918 };
3919
3920 static ssize_t
3921 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3922                                  size_t cnt, loff_t *ppos)
3923 {
3924         char buf[64];
3925         int r;
3926
3927         arch_spin_lock(&trace_cmdline_lock);
3928         r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3929         arch_spin_unlock(&trace_cmdline_lock);
3930
3931         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3932 }
3933
3934 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3935 {
3936         kfree(s->saved_cmdlines);
3937         kfree(s->map_cmdline_to_pid);
3938         kfree(s);
3939 }
3940
3941 static int tracing_resize_saved_cmdlines(unsigned int val)
3942 {
3943         struct saved_cmdlines_buffer *s, *savedcmd_temp;
3944
3945         s = kmalloc(sizeof(*s), GFP_KERNEL);
3946         if (!s)
3947                 return -ENOMEM;
3948
3949         if (allocate_cmdlines_buffer(val, s) < 0) {
3950                 kfree(s);
3951                 return -ENOMEM;
3952         }
3953
3954         arch_spin_lock(&trace_cmdline_lock);
3955         savedcmd_temp = savedcmd;
3956         savedcmd = s;
3957         arch_spin_unlock(&trace_cmdline_lock);
3958         free_saved_cmdlines_buffer(savedcmd_temp);
3959
3960         return 0;
3961 }
3962
3963 static ssize_t
3964 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3965                                   size_t cnt, loff_t *ppos)
3966 {
3967         unsigned long val;
3968         int ret;
3969
3970         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3971         if (ret)
3972                 return ret;
3973
3974         /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3975         if (!val || val > PID_MAX_DEFAULT)
3976                 return -EINVAL;
3977
3978         ret = tracing_resize_saved_cmdlines((unsigned int)val);
3979         if (ret < 0)
3980                 return ret;
3981
3982         *ppos += cnt;
3983
3984         return cnt;
3985 }
3986
3987 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3988         .open           = tracing_open_generic,
3989         .read           = tracing_saved_cmdlines_size_read,
3990         .write          = tracing_saved_cmdlines_size_write,
3991 };
3992
3993 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
3994 static union trace_enum_map_item *
3995 update_enum_map(union trace_enum_map_item *ptr)
3996 {
3997         if (!ptr->map.enum_string) {
3998                 if (ptr->tail.next) {
3999                         ptr = ptr->tail.next;
4000                         /* Set ptr to the next real item (skip head) */
4001                         ptr++;
4002                 } else
4003                         return NULL;
4004         }
4005         return ptr;
4006 }
4007
4008 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4009 {
4010         union trace_enum_map_item *ptr = v;
4011
4012         /*
4013          * Paranoid! If ptr points to end, we don't want to increment past it.
4014          * This really should never happen.
4015          */
4016         ptr = update_enum_map(ptr);
4017         if (WARN_ON_ONCE(!ptr))
4018                 return NULL;
4019
4020         ptr++;
4021
4022         (*pos)++;
4023
4024         ptr = update_enum_map(ptr);
4025
4026         return ptr;
4027 }
4028
4029 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4030 {
4031         union trace_enum_map_item *v;
4032         loff_t l = 0;
4033
4034         mutex_lock(&trace_enum_mutex);
4035
4036         v = trace_enum_maps;
4037         if (v)
4038                 v++;
4039
4040         while (v && l < *pos) {
4041                 v = enum_map_next(m, v, &l);
4042         }
4043
4044         return v;
4045 }
4046
4047 static void enum_map_stop(struct seq_file *m, void *v)
4048 {
4049         mutex_unlock(&trace_enum_mutex);
4050 }
4051
4052 static int enum_map_show(struct seq_file *m, void *v)
4053 {
4054         union trace_enum_map_item *ptr = v;
4055
4056         seq_printf(m, "%s %ld (%s)\n",
4057                    ptr->map.enum_string, ptr->map.enum_value,
4058                    ptr->map.system);
4059
4060         return 0;
4061 }
4062
4063 static const struct seq_operations tracing_enum_map_seq_ops = {
4064         .start          = enum_map_start,
4065         .next           = enum_map_next,
4066         .stop           = enum_map_stop,
4067         .show           = enum_map_show,
4068 };
4069
4070 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4071 {
4072         if (tracing_disabled)
4073                 return -ENODEV;
4074
4075         return seq_open(filp, &tracing_enum_map_seq_ops);
4076 }
4077
4078 static const struct file_operations tracing_enum_map_fops = {
4079         .open           = tracing_enum_map_open,
4080         .read           = seq_read,
4081         .llseek         = seq_lseek,
4082         .release        = seq_release,
4083 };
4084
4085 static inline union trace_enum_map_item *
4086 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4087 {
4088         /* Return tail of array given the head */
4089         return ptr + ptr->head.length + 1;
4090 }
4091
4092 static void
4093 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4094                            int len)
4095 {
4096         struct trace_enum_map **stop;
4097         struct trace_enum_map **map;
4098         union trace_enum_map_item *map_array;
4099         union trace_enum_map_item *ptr;
4100
4101         stop = start + len;
4102
4103         /*
4104          * The trace_enum_maps contains the map plus a head and tail item,
4105          * where the head holds the module and length of array, and the
4106          * tail holds a pointer to the next list.
4107          */
4108         map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4109         if (!map_array) {
4110                 pr_warning("Unable to allocate trace enum mapping\n");
4111                 return;
4112         }
4113
4114         mutex_lock(&trace_enum_mutex);
4115
4116         if (!trace_enum_maps)
4117                 trace_enum_maps = map_array;
4118         else {
4119                 ptr = trace_enum_maps;
4120                 for (;;) {
4121                         ptr = trace_enum_jmp_to_tail(ptr);
4122                         if (!ptr->tail.next)
4123                                 break;
4124                         ptr = ptr->tail.next;
4125
4126                 }
4127                 ptr->tail.next = map_array;
4128         }
4129         map_array->head.mod = mod;
4130         map_array->head.length = len;
4131         map_array++;
4132
4133         for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4134                 map_array->map = **map;
4135                 map_array++;
4136         }
4137         memset(map_array, 0, sizeof(*map_array));
4138
4139         mutex_unlock(&trace_enum_mutex);
4140 }
4141
4142 static void trace_create_enum_file(struct dentry *d_tracer)
4143 {
4144         trace_create_file("enum_map", 0444, d_tracer,
4145                           NULL, &tracing_enum_map_fops);
4146 }
4147
4148 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4149 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4150 static inline void trace_insert_enum_map_file(struct module *mod,
4151                               struct trace_enum_map **start, int len) { }
4152 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4153
4154 static void trace_insert_enum_map(struct module *mod,
4155                                   struct trace_enum_map **start, int len)
4156 {
4157         struct trace_enum_map **map;
4158
4159         if (len <= 0)
4160                 return;
4161
4162         map = start;
4163
4164         trace_event_enum_update(map, len);
4165
4166         trace_insert_enum_map_file(mod, start, len);
4167 }
4168
4169 static ssize_t
4170 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4171                        size_t cnt, loff_t *ppos)
4172 {
4173         struct trace_array *tr = filp->private_data;
4174         char buf[MAX_TRACER_SIZE+2];
4175         int r;
4176
4177         mutex_lock(&trace_types_lock);
4178         r = sprintf(buf, "%s\n", tr->current_trace->name);
4179         mutex_unlock(&trace_types_lock);
4180
4181         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4182 }
4183
4184 int tracer_init(struct tracer *t, struct trace_array *tr)
4185 {
4186         tracing_reset_online_cpus(&tr->trace_buffer);
4187         return t->init(tr);
4188 }
4189
4190 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4191 {
4192         int cpu;
4193
4194         for_each_tracing_cpu(cpu)
4195                 per_cpu_ptr(buf->data, cpu)->entries = val;
4196 }
4197
4198 #ifdef CONFIG_TRACER_MAX_TRACE
4199 /* resize @tr's buffer to the size of @size_tr's entries */
4200 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4201                                         struct trace_buffer *size_buf, int cpu_id)
4202 {
4203         int cpu, ret = 0;
4204
4205         if (cpu_id == RING_BUFFER_ALL_CPUS) {
4206                 for_each_tracing_cpu(cpu) {
4207                         ret = ring_buffer_resize(trace_buf->buffer,
4208                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4209                         if (ret < 0)
4210                                 break;
4211                         per_cpu_ptr(trace_buf->data, cpu)->entries =
4212                                 per_cpu_ptr(size_buf->data, cpu)->entries;
4213                 }
4214         } else {
4215                 ret = ring_buffer_resize(trace_buf->buffer,
4216                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4217                 if (ret == 0)
4218                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4219                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4220         }
4221
4222         return ret;
4223 }
4224 #endif /* CONFIG_TRACER_MAX_TRACE */
4225
4226 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4227                                         unsigned long size, int cpu)
4228 {
4229         int ret;
4230
4231         /*
4232          * If kernel or user changes the size of the ring buffer
4233          * we use the size that was given, and we can forget about
4234          * expanding it later.
4235          */
4236         ring_buffer_expanded = true;
4237
4238         /* May be called before buffers are initialized */
4239         if (!tr->trace_buffer.buffer)
4240                 return 0;
4241
4242         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4243         if (ret < 0)
4244                 return ret;
4245
4246 #ifdef CONFIG_TRACER_MAX_TRACE
4247         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4248             !tr->current_trace->use_max_tr)
4249                 goto out;
4250
4251         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4252         if (ret < 0) {
4253                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4254                                                      &tr->trace_buffer, cpu);
4255                 if (r < 0) {
4256                         /*
4257                          * AARGH! We are left with different
4258                          * size max buffer!!!!
4259                          * The max buffer is our "snapshot" buffer.
4260                          * When a tracer needs a snapshot (one of the
4261                          * latency tracers), it swaps the max buffer
4262                          * with the saved snap shot. We succeeded to
4263                          * update the size of the main buffer, but failed to
4264                          * update the size of the max buffer. But when we tried
4265                          * to reset the main buffer to the original size, we
4266                          * failed there too. This is very unlikely to
4267                          * happen, but if it does, warn and kill all
4268                          * tracing.
4269                          */
4270                         WARN_ON(1);
4271                         tracing_disabled = 1;
4272                 }
4273                 return ret;
4274         }
4275
4276         if (cpu == RING_BUFFER_ALL_CPUS)
4277                 set_buffer_entries(&tr->max_buffer, size);
4278         else
4279                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4280
4281  out:
4282 #endif /* CONFIG_TRACER_MAX_TRACE */
4283
4284         if (cpu == RING_BUFFER_ALL_CPUS)
4285                 set_buffer_entries(&tr->trace_buffer, size);
4286         else
4287                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4288
4289         return ret;
4290 }
4291
4292 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4293                                           unsigned long size, int cpu_id)
4294 {
4295         int ret = size;
4296
4297         mutex_lock(&trace_types_lock);
4298
4299         if (cpu_id != RING_BUFFER_ALL_CPUS) {
4300                 /* make sure, this cpu is enabled in the mask */
4301                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302                         ret = -EINVAL;
4303                         goto out;
4304                 }
4305         }
4306
4307         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4308         if (ret < 0)
4309                 ret = -ENOMEM;
4310
4311 out:
4312         mutex_unlock(&trace_types_lock);
4313
4314         return ret;
4315 }
4316
4317
4318 /**
4319  * tracing_update_buffers - used by tracing facility to expand ring buffers
4320  *
4321  * To save on memory when the tracing is never used on a system with it
4322  * configured in. The ring buffers are set to a minimum size. But once
4323  * a user starts to use the tracing facility, then they need to grow
4324  * to their default size.
4325  *
4326  * This function is to be called when a tracer is about to be used.
4327  */
4328 int tracing_update_buffers(void)
4329 {
4330         int ret = 0;
4331
4332         mutex_lock(&trace_types_lock);
4333         if (!ring_buffer_expanded)
4334                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4335                                                 RING_BUFFER_ALL_CPUS);
4336         mutex_unlock(&trace_types_lock);
4337
4338         return ret;
4339 }
4340
4341 struct trace_option_dentry;
4342
4343 static void
4344 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4345
4346 /*
4347  * Used to clear out the tracer before deletion of an instance.
4348  * Must have trace_types_lock held.
4349  */
4350 static void tracing_set_nop(struct trace_array *tr)
4351 {
4352         if (tr->current_trace == &nop_trace)
4353                 return;
4354         
4355         tr->current_trace->enabled--;
4356
4357         if (tr->current_trace->reset)
4358                 tr->current_trace->reset(tr);
4359
4360         tr->current_trace = &nop_trace;
4361 }
4362
4363 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4364 {
4365         /* Only enable if the directory has been created already. */
4366         if (!tr->dir)
4367                 return;
4368
4369         create_trace_option_files(tr, t);
4370 }
4371
4372 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4373 {
4374         struct tracer *t;
4375 #ifdef CONFIG_TRACER_MAX_TRACE
4376         bool had_max_tr;
4377 #endif
4378         int ret = 0;
4379
4380         mutex_lock(&trace_types_lock);
4381
4382         if (!ring_buffer_expanded) {
4383                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4384                                                 RING_BUFFER_ALL_CPUS);
4385                 if (ret < 0)
4386                         goto out;
4387                 ret = 0;
4388         }
4389
4390         for (t = trace_types; t; t = t->next) {
4391                 if (strcmp(t->name, buf) == 0)
4392                         break;
4393         }
4394         if (!t) {
4395                 ret = -EINVAL;
4396                 goto out;
4397         }
4398         if (t == tr->current_trace)
4399                 goto out;
4400
4401         /* Some tracers are only allowed for the top level buffer */
4402         if (!trace_ok_for_array(t, tr)) {
4403                 ret = -EINVAL;
4404                 goto out;
4405         }
4406
4407         /* If trace pipe files are being read, we can't change the tracer */
4408         if (tr->current_trace->ref) {
4409                 ret = -EBUSY;
4410                 goto out;
4411         }
4412
4413         trace_branch_disable();
4414
4415         tr->current_trace->enabled--;
4416
4417         if (tr->current_trace->reset)
4418                 tr->current_trace->reset(tr);
4419
4420         /* Current trace needs to be nop_trace before synchronize_sched */
4421         tr->current_trace = &nop_trace;
4422
4423 #ifdef CONFIG_TRACER_MAX_TRACE
4424         had_max_tr = tr->allocated_snapshot;
4425
4426         if (had_max_tr && !t->use_max_tr) {
4427                 /*
4428                  * We need to make sure that the update_max_tr sees that
4429                  * current_trace changed to nop_trace to keep it from
4430                  * swapping the buffers after we resize it.
4431                  * The update_max_tr is called from interrupts disabled
4432                  * so a synchronized_sched() is sufficient.
4433                  */
4434                 synchronize_sched();
4435                 free_snapshot(tr);
4436         }
4437 #endif
4438
4439 #ifdef CONFIG_TRACER_MAX_TRACE
4440         if (t->use_max_tr && !had_max_tr) {
4441                 ret = alloc_snapshot(tr);
4442                 if (ret < 0)
4443                         goto out;
4444         }
4445 #endif
4446
4447         if (t->init) {
4448                 ret = tracer_init(t, tr);
4449                 if (ret)
4450                         goto out;
4451         }
4452
4453         tr->current_trace = t;
4454         tr->current_trace->enabled++;
4455         trace_branch_enable(tr);
4456  out:
4457         mutex_unlock(&trace_types_lock);
4458
4459         return ret;
4460 }
4461
4462 static ssize_t
4463 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4464                         size_t cnt, loff_t *ppos)
4465 {
4466         struct trace_array *tr = filp->private_data;
4467         char buf[MAX_TRACER_SIZE+1];
4468         int i;
4469         size_t ret;
4470         int err;
4471
4472         ret = cnt;
4473
4474         if (cnt > MAX_TRACER_SIZE)
4475                 cnt = MAX_TRACER_SIZE;
4476
4477         if (copy_from_user(&buf, ubuf, cnt))
4478                 return -EFAULT;
4479
4480         buf[cnt] = 0;
4481
4482         /* strip ending whitespace. */
4483         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4484                 buf[i] = 0;
4485
4486         err = tracing_set_tracer(tr, buf);
4487         if (err)
4488                 return err;
4489
4490         *ppos += ret;
4491
4492         return ret;
4493 }
4494
4495 static ssize_t
4496 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4497                    size_t cnt, loff_t *ppos)
4498 {
4499         char buf[64];
4500         int r;
4501
4502         r = snprintf(buf, sizeof(buf), "%ld\n",
4503                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4504         if (r > sizeof(buf))
4505                 r = sizeof(buf);
4506         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4507 }
4508
4509 static ssize_t
4510 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4511                     size_t cnt, loff_t *ppos)
4512 {
4513         unsigned long val;
4514         int ret;
4515
4516         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517         if (ret)
4518                 return ret;
4519
4520         *ptr = val * 1000;
4521
4522         return cnt;
4523 }
4524
4525 static ssize_t
4526 tracing_thresh_read(struct file *filp, char __user *ubuf,
4527                     size_t cnt, loff_t *ppos)
4528 {
4529         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4530 }
4531
4532 static ssize_t
4533 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4534                      size_t cnt, loff_t *ppos)
4535 {
4536         struct trace_array *tr = filp->private_data;
4537         int ret;
4538
4539         mutex_lock(&trace_types_lock);
4540         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4541         if (ret < 0)
4542                 goto out;
4543
4544         if (tr->current_trace->update_thresh) {
4545                 ret = tr->current_trace->update_thresh(tr);
4546                 if (ret < 0)
4547                         goto out;
4548         }
4549
4550         ret = cnt;
4551 out:
4552         mutex_unlock(&trace_types_lock);
4553
4554         return ret;
4555 }
4556
4557 static ssize_t
4558 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4559                      size_t cnt, loff_t *ppos)
4560 {
4561         return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4562 }
4563
4564 static ssize_t
4565 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4566                       size_t cnt, loff_t *ppos)
4567 {
4568         return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4569 }
4570
4571 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4572 {
4573         struct trace_array *tr = inode->i_private;
4574         struct trace_iterator *iter;
4575         int ret = 0;
4576
4577         if (tracing_disabled)
4578                 return -ENODEV;
4579
4580         if (trace_array_get(tr) < 0)
4581                 return -ENODEV;
4582
4583         mutex_lock(&trace_types_lock);
4584
4585         /* create a buffer to store the information to pass to userspace */
4586         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4587         if (!iter) {
4588                 ret = -ENOMEM;
4589                 __trace_array_put(tr);
4590                 goto out;
4591         }
4592
4593         trace_seq_init(&iter->seq);
4594         iter->trace = tr->current_trace;
4595
4596         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4597                 ret = -ENOMEM;
4598                 goto fail;
4599         }
4600
4601         /* trace pipe does not show start of buffer */
4602         cpumask_setall(iter->started);
4603
4604         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4605                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4606
4607         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4608         if (trace_clocks[tr->clock_id].in_ns)
4609                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4610
4611         iter->tr = tr;
4612         iter->trace_buffer = &tr->trace_buffer;
4613         iter->cpu_file = tracing_get_cpu(inode);
4614         mutex_init(&iter->mutex);
4615         filp->private_data = iter;
4616
4617         if (iter->trace->pipe_open)
4618                 iter->trace->pipe_open(iter);
4619
4620         nonseekable_open(inode, filp);
4621
4622         tr->current_trace->ref++;
4623 out:
4624         mutex_unlock(&trace_types_lock);
4625         return ret;
4626
4627 fail:
4628         kfree(iter->trace);
4629         kfree(iter);
4630         __trace_array_put(tr);
4631         mutex_unlock(&trace_types_lock);
4632         return ret;
4633 }
4634
4635 static int tracing_release_pipe(struct inode *inode, struct file *file)
4636 {
4637         struct trace_iterator *iter = file->private_data;
4638         struct trace_array *tr = inode->i_private;
4639
4640         mutex_lock(&trace_types_lock);
4641
4642         tr->current_trace->ref--;
4643
4644         if (iter->trace->pipe_close)
4645                 iter->trace->pipe_close(iter);
4646
4647         mutex_unlock(&trace_types_lock);
4648
4649         free_cpumask_var(iter->started);
4650         mutex_destroy(&iter->mutex);
4651         kfree(iter);
4652
4653         trace_array_put(tr);
4654
4655         return 0;
4656 }
4657
4658 static unsigned int
4659 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4660 {
4661         struct trace_array *tr = iter->tr;
4662
4663         /* Iterators are static, they should be filled or empty */
4664         if (trace_buffer_iter(iter, iter->cpu_file))
4665                 return POLLIN | POLLRDNORM;
4666
4667         if (tr->trace_flags & TRACE_ITER_BLOCK)
4668                 /*
4669                  * Always select as readable when in blocking mode
4670                  */
4671                 return POLLIN | POLLRDNORM;
4672         else
4673                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4674                                              filp, poll_table);
4675 }
4676
4677 static unsigned int
4678 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4679 {
4680         struct trace_iterator *iter = filp->private_data;
4681
4682         return trace_poll(iter, filp, poll_table);
4683 }
4684
4685 /* Must be called with iter->mutex held. */
4686 static int tracing_wait_pipe(struct file *filp)
4687 {
4688         struct trace_iterator *iter = filp->private_data;
4689         int ret;
4690
4691         while (trace_empty(iter)) {
4692
4693                 if ((filp->f_flags & O_NONBLOCK)) {
4694                         return -EAGAIN;
4695                 }
4696
4697                 /*
4698                  * We block until we read something and tracing is disabled.
4699                  * We still block if tracing is disabled, but we have never
4700                  * read anything. This allows a user to cat this file, and
4701                  * then enable tracing. But after we have read something,
4702                  * we give an EOF when tracing is again disabled.
4703                  *
4704                  * iter->pos will be 0 if we haven't read anything.
4705                  */
4706                 if (!tracing_is_on() && iter->pos)
4707                         break;
4708
4709                 mutex_unlock(&iter->mutex);
4710
4711                 ret = wait_on_pipe(iter, false);
4712
4713                 mutex_lock(&iter->mutex);
4714
4715                 if (ret)
4716                         return ret;
4717         }
4718
4719         return 1;
4720 }
4721
4722 /*
4723  * Consumer reader.
4724  */
4725 static ssize_t
4726 tracing_read_pipe(struct file *filp, char __user *ubuf,
4727                   size_t cnt, loff_t *ppos)
4728 {
4729         struct trace_iterator *iter = filp->private_data;
4730         ssize_t sret;
4731
4732         /* return any leftover data */
4733         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4734         if (sret != -EBUSY)
4735                 return sret;
4736
4737         trace_seq_init(&iter->seq);
4738
4739         /*
4740          * Avoid more than one consumer on a single file descriptor
4741          * This is just a matter of traces coherency, the ring buffer itself
4742          * is protected.
4743          */
4744         mutex_lock(&iter->mutex);
4745         if (iter->trace->read) {
4746                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4747                 if (sret)
4748                         goto out;
4749         }
4750
4751 waitagain:
4752         sret = tracing_wait_pipe(filp);
4753         if (sret <= 0)
4754                 goto out;
4755
4756         /* stop when tracing is finished */
4757         if (trace_empty(iter)) {
4758                 sret = 0;
4759                 goto out;
4760         }
4761
4762         if (cnt >= PAGE_SIZE)
4763                 cnt = PAGE_SIZE - 1;
4764
4765         /* reset all but tr, trace, and overruns */
4766         memset(&iter->seq, 0,
4767                sizeof(struct trace_iterator) -
4768                offsetof(struct trace_iterator, seq));
4769         cpumask_clear(iter->started);
4770         iter->pos = -1;
4771
4772         trace_event_read_lock();
4773         trace_access_lock(iter->cpu_file);
4774         while (trace_find_next_entry_inc(iter) != NULL) {
4775                 enum print_line_t ret;
4776                 int save_len = iter->seq.seq.len;
4777
4778                 ret = print_trace_line(iter);
4779                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4780                         /* don't print partial lines */
4781                         iter->seq.seq.len = save_len;
4782                         break;
4783                 }
4784                 if (ret != TRACE_TYPE_NO_CONSUME)
4785                         trace_consume(iter);
4786
4787                 if (trace_seq_used(&iter->seq) >= cnt)
4788                         break;
4789
4790                 /*
4791                  * Setting the full flag means we reached the trace_seq buffer
4792                  * size and we should leave by partial output condition above.
4793                  * One of the trace_seq_* functions is not used properly.
4794                  */
4795                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4796                           iter->ent->type);
4797         }
4798         trace_access_unlock(iter->cpu_file);
4799         trace_event_read_unlock();
4800
4801         /* Now copy what we have to the user */
4802         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4803         if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4804                 trace_seq_init(&iter->seq);
4805
4806         /*
4807          * If there was nothing to send to user, in spite of consuming trace
4808          * entries, go back to wait for more entries.
4809          */
4810         if (sret == -EBUSY)
4811                 goto waitagain;
4812
4813 out:
4814         mutex_unlock(&iter->mutex);
4815
4816         return sret;
4817 }
4818
4819 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4820                                      unsigned int idx)
4821 {
4822         __free_page(spd->pages[idx]);
4823 }
4824
4825 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4826         .can_merge              = 0,
4827         .confirm                = generic_pipe_buf_confirm,
4828         .release                = generic_pipe_buf_release,
4829         .steal                  = generic_pipe_buf_steal,
4830         .get                    = generic_pipe_buf_get,
4831 };
4832
4833 static size_t
4834 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4835 {
4836         size_t count;
4837         int save_len;
4838         int ret;
4839
4840         /* Seq buffer is page-sized, exactly what we need. */
4841         for (;;) {
4842                 save_len = iter->seq.seq.len;
4843                 ret = print_trace_line(iter);
4844
4845                 if (trace_seq_has_overflowed(&iter->seq)) {
4846                         iter->seq.seq.len = save_len;
4847                         break;
4848                 }
4849
4850                 /*
4851                  * This should not be hit, because it should only
4852                  * be set if the iter->seq overflowed. But check it
4853                  * anyway to be safe.
4854                  */
4855                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4856                         iter->seq.seq.len = save_len;
4857                         break;
4858                 }
4859
4860                 count = trace_seq_used(&iter->seq) - save_len;
4861                 if (rem < count) {
4862                         rem = 0;
4863                         iter->seq.seq.len = save_len;
4864                         break;
4865                 }
4866
4867                 if (ret != TRACE_TYPE_NO_CONSUME)
4868                         trace_consume(iter);
4869                 rem -= count;
4870                 if (!trace_find_next_entry_inc(iter))   {
4871                         rem = 0;
4872                         iter->ent = NULL;
4873                         break;
4874                 }
4875         }
4876
4877         return rem;
4878 }
4879
4880 static ssize_t tracing_splice_read_pipe(struct file *filp,
4881                                         loff_t *ppos,
4882                                         struct pipe_inode_info *pipe,
4883                                         size_t len,
4884                                         unsigned int flags)
4885 {
4886         struct page *pages_def[PIPE_DEF_BUFFERS];
4887         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4888         struct trace_iterator *iter = filp->private_data;
4889         struct splice_pipe_desc spd = {
4890                 .pages          = pages_def,
4891                 .partial        = partial_def,
4892                 .nr_pages       = 0, /* This gets updated below. */
4893                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4894                 .flags          = flags,
4895                 .ops            = &tracing_pipe_buf_ops,
4896                 .spd_release    = tracing_spd_release_pipe,
4897         };
4898         ssize_t ret;
4899         size_t rem;
4900         unsigned int i;
4901
4902         if (splice_grow_spd(pipe, &spd))
4903                 return -ENOMEM;
4904
4905         mutex_lock(&iter->mutex);
4906
4907         if (iter->trace->splice_read) {
4908                 ret = iter->trace->splice_read(iter, filp,
4909                                                ppos, pipe, len, flags);
4910                 if (ret)
4911                         goto out_err;
4912         }
4913
4914         ret = tracing_wait_pipe(filp);
4915         if (ret <= 0)
4916                 goto out_err;
4917
4918         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4919                 ret = -EFAULT;
4920                 goto out_err;
4921         }
4922
4923         trace_event_read_lock();
4924         trace_access_lock(iter->cpu_file);
4925
4926         /* Fill as many pages as possible. */
4927         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4928                 spd.pages[i] = alloc_page(GFP_KERNEL);
4929                 if (!spd.pages[i])
4930                         break;
4931
4932                 rem = tracing_fill_pipe_page(rem, iter);
4933
4934                 /* Copy the data into the page, so we can start over. */
4935                 ret = trace_seq_to_buffer(&iter->seq,
4936                                           page_address(spd.pages[i]),
4937                                           trace_seq_used(&iter->seq));
4938                 if (ret < 0) {
4939                         __free_page(spd.pages[i]);
4940                         break;
4941                 }
4942                 spd.partial[i].offset = 0;
4943                 spd.partial[i].len = trace_seq_used(&iter->seq);
4944
4945                 trace_seq_init(&iter->seq);
4946         }
4947
4948         trace_access_unlock(iter->cpu_file);
4949         trace_event_read_unlock();
4950         mutex_unlock(&iter->mutex);
4951
4952         spd.nr_pages = i;
4953
4954         ret = splice_to_pipe(pipe, &spd);
4955 out:
4956         splice_shrink_spd(&spd);
4957         return ret;
4958
4959 out_err:
4960         mutex_unlock(&iter->mutex);
4961         goto out;
4962 }
4963
4964 static ssize_t
4965 tracing_entries_read(struct file *filp, char __user *ubuf,
4966                      size_t cnt, loff_t *ppos)
4967 {
4968         struct inode *inode = file_inode(filp);
4969         struct trace_array *tr = inode->i_private;
4970         int cpu = tracing_get_cpu(inode);
4971         char buf[64];
4972         int r = 0;
4973         ssize_t ret;
4974
4975         mutex_lock(&trace_types_lock);
4976
4977         if (cpu == RING_BUFFER_ALL_CPUS) {
4978                 int cpu, buf_size_same;
4979                 unsigned long size;
4980
4981                 size = 0;
4982                 buf_size_same = 1;
4983                 /* check if all cpu sizes are same */
4984                 for_each_tracing_cpu(cpu) {
4985                         /* fill in the size from first enabled cpu */
4986                         if (size == 0)
4987                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4988                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4989                                 buf_size_same = 0;
4990                                 break;
4991                         }
4992                 }
4993
4994                 if (buf_size_same) {
4995                         if (!ring_buffer_expanded)
4996                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4997                                             size >> 10,
4998                                             trace_buf_size >> 10);
4999                         else
5000                                 r = sprintf(buf, "%lu\n", size >> 10);
5001                 } else
5002                         r = sprintf(buf, "X\n");
5003         } else
5004                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5005
5006         mutex_unlock(&trace_types_lock);
5007
5008         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5009         return ret;
5010 }
5011
5012 static ssize_t
5013 tracing_entries_write(struct file *filp, const char __user *ubuf,
5014                       size_t cnt, loff_t *ppos)
5015 {
5016         struct inode *inode = file_inode(filp);
5017         struct trace_array *tr = inode->i_private;
5018         unsigned long val;
5019         int ret;
5020
5021         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5022         if (ret)
5023                 return ret;
5024
5025         /* must have at least 1 entry */
5026         if (!val)
5027                 return -EINVAL;
5028
5029         /* value is in KB */
5030         val <<= 10;
5031         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5032         if (ret < 0)
5033                 return ret;
5034
5035         *ppos += cnt;
5036
5037         return cnt;
5038 }
5039
5040 static ssize_t
5041 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5042                                 size_t cnt, loff_t *ppos)
5043 {
5044         struct trace_array *tr = filp->private_data;
5045         char buf[64];
5046         int r, cpu;
5047         unsigned long size = 0, expanded_size = 0;
5048
5049         mutex_lock(&trace_types_lock);
5050         for_each_tracing_cpu(cpu) {
5051                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5052                 if (!ring_buffer_expanded)
5053                         expanded_size += trace_buf_size >> 10;
5054         }
5055         if (ring_buffer_expanded)
5056                 r = sprintf(buf, "%lu\n", size);
5057         else
5058                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5059         mutex_unlock(&trace_types_lock);
5060
5061         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5062 }
5063
5064 static ssize_t
5065 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5066                           size_t cnt, loff_t *ppos)
5067 {
5068         /*
5069          * There is no need to read what the user has written, this function
5070          * is just to make sure that there is no error when "echo" is used
5071          */
5072
5073         *ppos += cnt;
5074
5075         return cnt;
5076 }
5077
5078 static int
5079 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5080 {
5081         struct trace_array *tr = inode->i_private;
5082
5083         /* disable tracing ? */
5084         if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5085                 tracer_tracing_off(tr);
5086         /* resize the ring buffer to 0 */
5087         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5088
5089         trace_array_put(tr);
5090
5091         return 0;
5092 }
5093
5094 static ssize_t
5095 tracing_mark_write(struct file *filp, const char __user *ubuf,
5096                                         size_t cnt, loff_t *fpos)
5097 {
5098         unsigned long addr = (unsigned long)ubuf;
5099         struct trace_array *tr = filp->private_data;
5100         struct ring_buffer_event *event;
5101         struct ring_buffer *buffer;
5102         struct print_entry *entry;
5103         unsigned long irq_flags;
5104         struct page *pages[2];
5105         void *map_page[2];
5106         int nr_pages = 1;
5107         ssize_t written;
5108         int offset;
5109         int size;
5110         int len;
5111         int ret;
5112         int i;
5113
5114         if (tracing_disabled)
5115                 return -EINVAL;
5116
5117         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5118                 return -EINVAL;
5119
5120         if (cnt > TRACE_BUF_SIZE)
5121                 cnt = TRACE_BUF_SIZE;
5122
5123         /*
5124          * Userspace is injecting traces into the kernel trace buffer.
5125          * We want to be as non intrusive as possible.
5126          * To do so, we do not want to allocate any special buffers
5127          * or take any locks, but instead write the userspace data
5128          * straight into the ring buffer.
5129          *
5130          * First we need to pin the userspace buffer into memory,
5131          * which, most likely it is, because it just referenced it.
5132          * But there's no guarantee that it is. By using get_user_pages_fast()
5133          * and kmap_atomic/kunmap_atomic() we can get access to the
5134          * pages directly. We then write the data directly into the
5135          * ring buffer.
5136          */
5137         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5138
5139         /* check if we cross pages */
5140         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5141                 nr_pages = 2;
5142
5143         offset = addr & (PAGE_SIZE - 1);
5144         addr &= PAGE_MASK;
5145
5146         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5147         if (ret < nr_pages) {
5148                 while (--ret >= 0)
5149                         put_page(pages[ret]);
5150                 written = -EFAULT;
5151                 goto out;
5152         }
5153
5154         for (i = 0; i < nr_pages; i++)
5155                 map_page[i] = kmap_atomic(pages[i]);
5156
5157         local_save_flags(irq_flags);
5158         size = sizeof(*entry) + cnt + 2; /* possible \n added */
5159         buffer = tr->trace_buffer.buffer;
5160         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5161                                           irq_flags, preempt_count());
5162         if (!event) {
5163                 /* Ring buffer disabled, return as if not open for write */
5164                 written = -EBADF;
5165                 goto out_unlock;
5166         }
5167
5168         entry = ring_buffer_event_data(event);
5169         entry->ip = _THIS_IP_;
5170
5171         if (nr_pages == 2) {
5172                 len = PAGE_SIZE - offset;
5173                 memcpy(&entry->buf, map_page[0] + offset, len);
5174                 memcpy(&entry->buf[len], map_page[1], cnt - len);
5175         } else
5176                 memcpy(&entry->buf, map_page[0] + offset, cnt);
5177
5178         if (entry->buf[cnt - 1] != '\n') {
5179                 entry->buf[cnt] = '\n';
5180                 entry->buf[cnt + 1] = '\0';
5181         } else
5182                 entry->buf[cnt] = '\0';
5183
5184         __buffer_unlock_commit(buffer, event);
5185
5186         written = cnt;
5187
5188         *fpos += written;
5189
5190  out_unlock:
5191         for (i = nr_pages - 1; i >= 0; i--) {
5192                 kunmap_atomic(map_page[i]);
5193                 put_page(pages[i]);
5194         }
5195  out:
5196         return written;
5197 }
5198
5199 static int tracing_clock_show(struct seq_file *m, void *v)
5200 {
5201         struct trace_array *tr = m->private;
5202         int i;
5203
5204         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5205                 seq_printf(m,
5206                         "%s%s%s%s", i ? " " : "",
5207                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5208                         i == tr->clock_id ? "]" : "");
5209         seq_putc(m, '\n');
5210
5211         return 0;
5212 }
5213
5214 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5215 {
5216         int i;
5217
5218         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5219                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5220                         break;
5221         }
5222         if (i == ARRAY_SIZE(trace_clocks))
5223                 return -EINVAL;
5224
5225         mutex_lock(&trace_types_lock);
5226
5227         tr->clock_id = i;
5228
5229         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5230
5231         /*
5232          * New clock may not be consistent with the previous clock.
5233          * Reset the buffer so that it doesn't have incomparable timestamps.
5234          */
5235         tracing_reset_online_cpus(&tr->trace_buffer);
5236
5237 #ifdef CONFIG_TRACER_MAX_TRACE
5238         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5239                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5240         tracing_reset_online_cpus(&tr->max_buffer);
5241 #endif
5242
5243         mutex_unlock(&trace_types_lock);
5244
5245         return 0;
5246 }
5247
5248 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5249                                    size_t cnt, loff_t *fpos)
5250 {
5251         struct seq_file *m = filp->private_data;
5252         struct trace_array *tr = m->private;
5253         char buf[64];
5254         const char *clockstr;
5255         int ret;
5256
5257         if (cnt >= sizeof(buf))
5258                 return -EINVAL;
5259
5260         if (copy_from_user(&buf, ubuf, cnt))
5261                 return -EFAULT;
5262
5263         buf[cnt] = 0;
5264
5265         clockstr = strstrip(buf);
5266
5267         ret = tracing_set_clock(tr, clockstr);
5268         if (ret)
5269                 return ret;
5270
5271         *fpos += cnt;
5272
5273         return cnt;
5274 }
5275
5276 static int tracing_clock_open(struct inode *inode, struct file *file)
5277 {
5278         struct trace_array *tr = inode->i_private;
5279         int ret;
5280
5281         if (tracing_disabled)
5282                 return -ENODEV;
5283
5284         if (trace_array_get(tr))
5285                 return -ENODEV;
5286
5287         ret = single_open(file, tracing_clock_show, inode->i_private);
5288         if (ret < 0)
5289                 trace_array_put(tr);
5290
5291         return ret;
5292 }
5293
5294 struct ftrace_buffer_info {
5295         struct trace_iterator   iter;
5296         void                    *spare;
5297         unsigned int            read;
5298 };
5299
5300 #ifdef CONFIG_TRACER_SNAPSHOT
5301 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5302 {
5303         struct trace_array *tr = inode->i_private;
5304         struct trace_iterator *iter;
5305         struct seq_file *m;
5306         int ret = 0;
5307
5308         if (trace_array_get(tr) < 0)
5309                 return -ENODEV;
5310
5311         if (file->f_mode & FMODE_READ) {
5312                 iter = __tracing_open(inode, file, true);
5313                 if (IS_ERR(iter))
5314                         ret = PTR_ERR(iter);
5315         } else {
5316                 /* Writes still need the seq_file to hold the private data */
5317                 ret = -ENOMEM;
5318                 m = kzalloc(sizeof(*m), GFP_KERNEL);
5319                 if (!m)
5320                         goto out;
5321                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5322                 if (!iter) {
5323                         kfree(m);
5324                         goto out;
5325                 }
5326                 ret = 0;
5327
5328                 iter->tr = tr;
5329                 iter->trace_buffer = &tr->max_buffer;
5330                 iter->cpu_file = tracing_get_cpu(inode);
5331                 m->private = iter;
5332                 file->private_data = m;
5333         }
5334 out:
5335         if (ret < 0)
5336                 trace_array_put(tr);
5337
5338         return ret;
5339 }
5340
5341 static ssize_t
5342 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5343                        loff_t *ppos)
5344 {
5345         struct seq_file *m = filp->private_data;
5346         struct trace_iterator *iter = m->private;
5347         struct trace_array *tr = iter->tr;
5348         unsigned long val;
5349         int ret;
5350
5351         ret = tracing_update_buffers();
5352         if (ret < 0)
5353                 return ret;
5354
5355         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5356         if (ret)
5357                 return ret;
5358
5359         mutex_lock(&trace_types_lock);
5360
5361         if (tr->current_trace->use_max_tr) {
5362                 ret = -EBUSY;
5363                 goto out;
5364         }
5365
5366         switch (val) {
5367         case 0:
5368                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5369                         ret = -EINVAL;
5370                         break;
5371                 }
5372                 if (tr->allocated_snapshot)
5373                         free_snapshot(tr);
5374                 break;
5375         case 1:
5376 /* Only allow per-cpu swap if the ring buffer supports it */
5377 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5378                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5379                         ret = -EINVAL;
5380                         break;
5381                 }
5382 #endif
5383                 if (!tr->allocated_snapshot) {
5384                         ret = alloc_snapshot(tr);
5385                         if (ret < 0)
5386                                 break;
5387                 }
5388                 local_irq_disable();
5389                 /* Now, we're going to swap */
5390                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5391                         update_max_tr(tr, current, smp_processor_id());
5392                 else
5393                         update_max_tr_single(tr, current, iter->cpu_file);
5394                 local_irq_enable();
5395                 break;
5396         default:
5397                 if (tr->allocated_snapshot) {
5398                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5399                                 tracing_reset_online_cpus(&tr->max_buffer);
5400                         else
5401                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
5402                 }
5403                 break;
5404         }
5405
5406         if (ret >= 0) {
5407                 *ppos += cnt;
5408                 ret = cnt;
5409         }
5410 out:
5411         mutex_unlock(&trace_types_lock);
5412         return ret;
5413 }
5414
5415 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5416 {
5417         struct seq_file *m = file->private_data;
5418         int ret;
5419
5420         ret = tracing_release(inode, file);
5421
5422         if (file->f_mode & FMODE_READ)
5423                 return ret;
5424
5425         /* If write only, the seq_file is just a stub */
5426         if (m)
5427                 kfree(m->private);
5428         kfree(m);
5429
5430         return 0;
5431 }
5432
5433 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5434 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5435                                     size_t count, loff_t *ppos);
5436 static int tracing_buffers_release(struct inode *inode, struct file *file);
5437 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5438                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5439
5440 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5441 {
5442         struct ftrace_buffer_info *info;
5443         int ret;
5444
5445         ret = tracing_buffers_open(inode, filp);
5446         if (ret < 0)
5447                 return ret;
5448
5449         info = filp->private_data;
5450
5451         if (info->iter.trace->use_max_tr) {
5452                 tracing_buffers_release(inode, filp);
5453                 return -EBUSY;
5454         }
5455
5456         info->iter.snapshot = true;
5457         info->iter.trace_buffer = &info->iter.tr->max_buffer;
5458
5459         return ret;
5460 }
5461
5462 #endif /* CONFIG_TRACER_SNAPSHOT */
5463
5464
5465 static const struct file_operations tracing_thresh_fops = {
5466         .open           = tracing_open_generic,
5467         .read           = tracing_thresh_read,
5468         .write          = tracing_thresh_write,
5469         .llseek         = generic_file_llseek,
5470 };
5471
5472 static const struct file_operations tracing_max_lat_fops = {
5473         .open           = tracing_open_generic,
5474         .read           = tracing_max_lat_read,
5475         .write          = tracing_max_lat_write,
5476         .llseek         = generic_file_llseek,
5477 };
5478
5479 static const struct file_operations set_tracer_fops = {
5480         .open           = tracing_open_generic,
5481         .read           = tracing_set_trace_read,
5482         .write          = tracing_set_trace_write,
5483         .llseek         = generic_file_llseek,
5484 };
5485
5486 static const struct file_operations tracing_pipe_fops = {
5487         .open           = tracing_open_pipe,
5488         .poll           = tracing_poll_pipe,
5489         .read           = tracing_read_pipe,
5490         .splice_read    = tracing_splice_read_pipe,
5491         .release        = tracing_release_pipe,
5492         .llseek         = no_llseek,
5493 };
5494
5495 static const struct file_operations tracing_entries_fops = {
5496         .open           = tracing_open_generic_tr,
5497         .read           = tracing_entries_read,
5498         .write          = tracing_entries_write,
5499         .llseek         = generic_file_llseek,
5500         .release        = tracing_release_generic_tr,
5501 };
5502
5503 static const struct file_operations tracing_total_entries_fops = {
5504         .open           = tracing_open_generic_tr,
5505         .read           = tracing_total_entries_read,
5506         .llseek         = generic_file_llseek,
5507         .release        = tracing_release_generic_tr,
5508 };
5509
5510 static const struct file_operations tracing_free_buffer_fops = {
5511         .open           = tracing_open_generic_tr,
5512         .write          = tracing_free_buffer_write,
5513         .release        = tracing_free_buffer_release,
5514 };
5515
5516 static const struct file_operations tracing_mark_fops = {
5517         .open           = tracing_open_generic_tr,
5518         .write          = tracing_mark_write,
5519         .llseek         = generic_file_llseek,
5520         .release        = tracing_release_generic_tr,
5521 };
5522
5523 static const struct file_operations trace_clock_fops = {
5524         .open           = tracing_clock_open,
5525         .read           = seq_read,
5526         .llseek         = seq_lseek,
5527         .release        = tracing_single_release_tr,
5528         .write          = tracing_clock_write,
5529 };
5530
5531 #ifdef CONFIG_TRACER_SNAPSHOT
5532 static const struct file_operations snapshot_fops = {
5533         .open           = tracing_snapshot_open,
5534         .read           = seq_read,
5535         .write          = tracing_snapshot_write,
5536         .llseek         = tracing_lseek,
5537         .release        = tracing_snapshot_release,
5538 };
5539
5540 static const struct file_operations snapshot_raw_fops = {
5541         .open           = snapshot_raw_open,
5542         .read           = tracing_buffers_read,
5543         .release        = tracing_buffers_release,
5544         .splice_read    = tracing_buffers_splice_read,
5545         .llseek         = no_llseek,
5546 };
5547
5548 #endif /* CONFIG_TRACER_SNAPSHOT */
5549
5550 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5551 {
5552         struct trace_array *tr = inode->i_private;
5553         struct ftrace_buffer_info *info;
5554         int ret;
5555
5556         if (tracing_disabled)
5557                 return -ENODEV;
5558
5559         if (trace_array_get(tr) < 0)
5560                 return -ENODEV;
5561
5562         info = kzalloc(sizeof(*info), GFP_KERNEL);
5563         if (!info) {
5564                 trace_array_put(tr);
5565                 return -ENOMEM;
5566         }
5567
5568         mutex_lock(&trace_types_lock);
5569
5570         info->iter.tr           = tr;
5571         info->iter.cpu_file     = tracing_get_cpu(inode);
5572         info->iter.trace        = tr->current_trace;
5573         info->iter.trace_buffer = &tr->trace_buffer;
5574         info->spare             = NULL;
5575         /* Force reading ring buffer for first read */
5576         info->read              = (unsigned int)-1;
5577
5578         filp->private_data = info;
5579
5580         tr->current_trace->ref++;
5581
5582         mutex_unlock(&trace_types_lock);
5583
5584         ret = nonseekable_open(inode, filp);
5585         if (ret < 0)
5586                 trace_array_put(tr);
5587
5588         return ret;
5589 }
5590
5591 static unsigned int
5592 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5593 {
5594         struct ftrace_buffer_info *info = filp->private_data;
5595         struct trace_iterator *iter = &info->iter;
5596
5597         return trace_poll(iter, filp, poll_table);
5598 }
5599
5600 static ssize_t
5601 tracing_buffers_read(struct file *filp, char __user *ubuf,
5602                      size_t count, loff_t *ppos)
5603 {
5604         struct ftrace_buffer_info *info = filp->private_data;
5605         struct trace_iterator *iter = &info->iter;
5606         ssize_t ret;
5607         ssize_t size;
5608
5609         if (!count)
5610                 return 0;
5611
5612 #ifdef CONFIG_TRACER_MAX_TRACE
5613         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5614                 return -EBUSY;
5615 #endif
5616
5617         if (!info->spare)
5618                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5619                                                           iter->cpu_file);
5620         if (!info->spare)
5621                 return -ENOMEM;
5622
5623         /* Do we have previous read data to read? */
5624         if (info->read < PAGE_SIZE)
5625                 goto read;
5626
5627  again:
5628         trace_access_lock(iter->cpu_file);
5629         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5630                                     &info->spare,
5631                                     count,
5632                                     iter->cpu_file, 0);
5633         trace_access_unlock(iter->cpu_file);
5634
5635         if (ret < 0) {
5636                 if (trace_empty(iter)) {
5637                         if ((filp->f_flags & O_NONBLOCK))
5638                                 return -EAGAIN;
5639
5640                         ret = wait_on_pipe(iter, false);
5641                         if (ret)
5642                                 return ret;
5643
5644                         goto again;
5645                 }
5646                 return 0;
5647         }
5648
5649         info->read = 0;
5650  read:
5651         size = PAGE_SIZE - info->read;
5652         if (size > count)
5653                 size = count;
5654
5655         ret = copy_to_user(ubuf, info->spare + info->read, size);
5656         if (ret == size)
5657                 return -EFAULT;
5658
5659         size -= ret;
5660
5661         *ppos += size;
5662         info->read += size;
5663
5664         return size;
5665 }
5666
5667 static int tracing_buffers_release(struct inode *inode, struct file *file)
5668 {
5669         struct ftrace_buffer_info *info = file->private_data;
5670         struct trace_iterator *iter = &info->iter;
5671
5672         mutex_lock(&trace_types_lock);
5673
5674         iter->tr->current_trace->ref--;
5675
5676         __trace_array_put(iter->tr);
5677
5678         if (info->spare)
5679                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5680         kfree(info);
5681
5682         mutex_unlock(&trace_types_lock);
5683
5684         return 0;
5685 }
5686
5687 struct buffer_ref {
5688         struct ring_buffer      *buffer;
5689         void                    *page;
5690         int                     ref;
5691 };
5692
5693 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5694                                     struct pipe_buffer *buf)
5695 {
5696         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5697
5698         if (--ref->ref)
5699                 return;
5700
5701         ring_buffer_free_read_page(ref->buffer, ref->page);
5702         kfree(ref);
5703         buf->private = 0;
5704 }
5705
5706 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5707                                 struct pipe_buffer *buf)
5708 {
5709         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5710
5711         ref->ref++;
5712 }
5713
5714 /* Pipe buffer operations for a buffer. */
5715 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5716         .can_merge              = 0,
5717         .confirm                = generic_pipe_buf_confirm,
5718         .release                = buffer_pipe_buf_release,
5719         .steal                  = generic_pipe_buf_steal,
5720         .get                    = buffer_pipe_buf_get,
5721 };
5722
5723 /*
5724  * Callback from splice_to_pipe(), if we need to release some pages
5725  * at the end of the spd in case we error'ed out in filling the pipe.
5726  */
5727 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5728 {
5729         struct buffer_ref *ref =
5730                 (struct buffer_ref *)spd->partial[i].private;
5731
5732         if (--ref->ref)
5733                 return;
5734
5735         ring_buffer_free_read_page(ref->buffer, ref->page);
5736         kfree(ref);
5737         spd->partial[i].private = 0;
5738 }
5739
5740 static ssize_t
5741 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5742                             struct pipe_inode_info *pipe, size_t len,
5743                             unsigned int flags)
5744 {
5745         struct ftrace_buffer_info *info = file->private_data;
5746         struct trace_iterator *iter = &info->iter;
5747         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5748         struct page *pages_def[PIPE_DEF_BUFFERS];
5749         struct splice_pipe_desc spd = {
5750                 .pages          = pages_def,
5751                 .partial        = partial_def,
5752                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5753                 .flags          = flags,
5754                 .ops            = &buffer_pipe_buf_ops,
5755                 .spd_release    = buffer_spd_release,
5756         };
5757         struct buffer_ref *ref;
5758         int entries, size, i;
5759         ssize_t ret = 0;
5760
5761 #ifdef CONFIG_TRACER_MAX_TRACE
5762         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5763                 return -EBUSY;
5764 #endif
5765
5766         if (splice_grow_spd(pipe, &spd))
5767                 return -ENOMEM;
5768
5769         if (*ppos & (PAGE_SIZE - 1))
5770                 return -EINVAL;
5771
5772         if (len & (PAGE_SIZE - 1)) {
5773                 if (len < PAGE_SIZE)
5774                         return -EINVAL;
5775                 len &= PAGE_MASK;
5776         }
5777
5778  again:
5779         trace_access_lock(iter->cpu_file);
5780         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5781
5782         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5783                 struct page *page;
5784                 int r;
5785
5786                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5787                 if (!ref) {
5788                         ret = -ENOMEM;
5789                         break;
5790                 }
5791
5792                 ref->ref = 1;
5793                 ref->buffer = iter->trace_buffer->buffer;
5794                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5795                 if (!ref->page) {
5796                         ret = -ENOMEM;
5797                         kfree(ref);
5798                         break;
5799                 }
5800
5801                 r = ring_buffer_read_page(ref->buffer, &ref->page,
5802                                           len, iter->cpu_file, 1);
5803                 if (r < 0) {
5804                         ring_buffer_free_read_page(ref->buffer, ref->page);
5805                         kfree(ref);
5806                         break;
5807                 }
5808
5809                 /*
5810                  * zero out any left over data, this is going to
5811                  * user land.
5812                  */
5813                 size = ring_buffer_page_len(ref->page);
5814                 if (size < PAGE_SIZE)
5815                         memset(ref->page + size, 0, PAGE_SIZE - size);
5816
5817                 page = virt_to_page(ref->page);
5818
5819                 spd.pages[i] = page;
5820                 spd.partial[i].len = PAGE_SIZE;
5821                 spd.partial[i].offset = 0;
5822                 spd.partial[i].private = (unsigned long)ref;
5823                 spd.nr_pages++;
5824                 *ppos += PAGE_SIZE;
5825
5826                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5827         }
5828
5829         trace_access_unlock(iter->cpu_file);
5830         spd.nr_pages = i;
5831
5832         /* did we read anything? */
5833         if (!spd.nr_pages) {
5834                 if (ret)
5835                         return ret;
5836
5837                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5838                         return -EAGAIN;
5839
5840                 ret = wait_on_pipe(iter, true);
5841                 if (ret)
5842                         return ret;
5843
5844                 goto again;
5845         }
5846
5847         ret = splice_to_pipe(pipe, &spd);
5848         splice_shrink_spd(&spd);
5849
5850         return ret;
5851 }
5852
5853 static const struct file_operations tracing_buffers_fops = {
5854         .open           = tracing_buffers_open,
5855         .read           = tracing_buffers_read,
5856         .poll           = tracing_buffers_poll,
5857         .release        = tracing_buffers_release,
5858         .splice_read    = tracing_buffers_splice_read,
5859         .llseek         = no_llseek,
5860 };
5861
5862 static ssize_t
5863 tracing_stats_read(struct file *filp, char __user *ubuf,
5864                    size_t count, loff_t *ppos)
5865 {
5866         struct inode *inode = file_inode(filp);
5867         struct trace_array *tr = inode->i_private;
5868         struct trace_buffer *trace_buf = &tr->trace_buffer;
5869         int cpu = tracing_get_cpu(inode);
5870         struct trace_seq *s;
5871         unsigned long cnt;
5872         unsigned long long t;
5873         unsigned long usec_rem;
5874
5875         s = kmalloc(sizeof(*s), GFP_KERNEL);
5876         if (!s)
5877                 return -ENOMEM;
5878
5879         trace_seq_init(s);
5880
5881         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5882         trace_seq_printf(s, "entries: %ld\n", cnt);
5883
5884         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5885         trace_seq_printf(s, "overrun: %ld\n", cnt);
5886
5887         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5888         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5889
5890         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5891         trace_seq_printf(s, "bytes: %ld\n", cnt);
5892
5893         if (trace_clocks[tr->clock_id].in_ns) {
5894                 /* local or global for trace_clock */
5895                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5896                 usec_rem = do_div(t, USEC_PER_SEC);
5897                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5898                                                                 t, usec_rem);
5899
5900                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5901                 usec_rem = do_div(t, USEC_PER_SEC);
5902                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5903         } else {
5904                 /* counter or tsc mode for trace_clock */
5905                 trace_seq_printf(s, "oldest event ts: %llu\n",
5906                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5907
5908                 trace_seq_printf(s, "now ts: %llu\n",
5909                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5910         }
5911
5912         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5913         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5914
5915         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5916         trace_seq_printf(s, "read events: %ld\n", cnt);
5917
5918         count = simple_read_from_buffer(ubuf, count, ppos,
5919                                         s->buffer, trace_seq_used(s));
5920
5921         kfree(s);
5922
5923         return count;
5924 }
5925
5926 static const struct file_operations tracing_stats_fops = {
5927         .open           = tracing_open_generic_tr,
5928         .read           = tracing_stats_read,
5929         .llseek         = generic_file_llseek,
5930         .release        = tracing_release_generic_tr,
5931 };
5932
5933 #ifdef CONFIG_DYNAMIC_FTRACE
5934
5935 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5936 {
5937         return 0;
5938 }
5939
5940 static ssize_t
5941 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5942                   size_t cnt, loff_t *ppos)
5943 {
5944         static char ftrace_dyn_info_buffer[1024];
5945         static DEFINE_MUTEX(dyn_info_mutex);
5946         unsigned long *p = filp->private_data;
5947         char *buf = ftrace_dyn_info_buffer;
5948         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5949         int r;
5950
5951         mutex_lock(&dyn_info_mutex);
5952         r = sprintf(buf, "%ld ", *p);
5953
5954         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5955         buf[r++] = '\n';
5956
5957         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5958
5959         mutex_unlock(&dyn_info_mutex);
5960
5961         return r;
5962 }
5963
5964 static const struct file_operations tracing_dyn_info_fops = {
5965         .open           = tracing_open_generic,
5966         .read           = tracing_read_dyn_info,
5967         .llseek         = generic_file_llseek,
5968 };
5969 #endif /* CONFIG_DYNAMIC_FTRACE */
5970
5971 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5972 static void
5973 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5974 {
5975         tracing_snapshot();
5976 }
5977
5978 static void
5979 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5980 {
5981         unsigned long *count = (long *)data;
5982
5983         if (!*count)
5984                 return;
5985
5986         if (*count != -1)
5987                 (*count)--;
5988
5989         tracing_snapshot();
5990 }
5991
5992 static int
5993 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5994                       struct ftrace_probe_ops *ops, void *data)
5995 {
5996         long count = (long)data;
5997
5998         seq_printf(m, "%ps:", (void *)ip);
5999
6000         seq_puts(m, "snapshot");
6001
6002         if (count == -1)
6003                 seq_puts(m, ":unlimited\n");
6004         else
6005                 seq_printf(m, ":count=%ld\n", count);
6006
6007         return 0;
6008 }
6009
6010 static struct ftrace_probe_ops snapshot_probe_ops = {
6011         .func                   = ftrace_snapshot,
6012         .print                  = ftrace_snapshot_print,
6013 };
6014
6015 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6016         .func                   = ftrace_count_snapshot,
6017         .print                  = ftrace_snapshot_print,
6018 };
6019
6020 static int
6021 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6022                                char *glob, char *cmd, char *param, int enable)
6023 {
6024         struct ftrace_probe_ops *ops;
6025         void *count = (void *)-1;
6026         char *number;
6027         int ret;
6028
6029         /* hash funcs only work with set_ftrace_filter */
6030         if (!enable)
6031                 return -EINVAL;
6032
6033         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
6034
6035         if (glob[0] == '!') {
6036                 unregister_ftrace_function_probe_func(glob+1, ops);
6037                 return 0;
6038         }
6039
6040         if (!param)
6041                 goto out_reg;
6042
6043         number = strsep(&param, ":");
6044
6045         if (!strlen(number))
6046                 goto out_reg;
6047
6048         /*
6049          * We use the callback data field (which is a pointer)
6050          * as our counter.
6051          */
6052         ret = kstrtoul(number, 0, (unsigned long *)&count);
6053         if (ret)
6054                 return ret;
6055
6056  out_reg:
6057         ret = register_ftrace_function_probe(glob, ops, count);
6058
6059         if (ret >= 0)
6060                 alloc_snapshot(&global_trace);
6061
6062         return ret < 0 ? ret : 0;
6063 }
6064
6065 static struct ftrace_func_command ftrace_snapshot_cmd = {
6066         .name                   = "snapshot",
6067         .func                   = ftrace_trace_snapshot_callback,
6068 };
6069
6070 static __init int register_snapshot_cmd(void)
6071 {
6072         return register_ftrace_command(&ftrace_snapshot_cmd);
6073 }
6074 #else
6075 static inline __init int register_snapshot_cmd(void) { return 0; }
6076 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6077
6078 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6079 {
6080         if (WARN_ON(!tr->dir))
6081                 return ERR_PTR(-ENODEV);
6082
6083         /* Top directory uses NULL as the parent */
6084         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6085                 return NULL;
6086
6087         /* All sub buffers have a descriptor */
6088         return tr->dir;
6089 }
6090
6091 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6092 {
6093         struct dentry *d_tracer;
6094
6095         if (tr->percpu_dir)
6096                 return tr->percpu_dir;
6097
6098         d_tracer = tracing_get_dentry(tr);
6099         if (IS_ERR(d_tracer))
6100                 return NULL;
6101
6102         tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6103
6104         WARN_ONCE(!tr->percpu_dir,
6105                   "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6106
6107         return tr->percpu_dir;
6108 }
6109
6110 static struct dentry *
6111 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6112                       void *data, long cpu, const struct file_operations *fops)
6113 {
6114         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6115
6116         if (ret) /* See tracing_get_cpu() */
6117                 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6118         return ret;
6119 }
6120
6121 static void
6122 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6123 {
6124         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6125         struct dentry *d_cpu;
6126         char cpu_dir[30]; /* 30 characters should be more than enough */
6127
6128         if (!d_percpu)
6129                 return;
6130
6131         snprintf(cpu_dir, 30, "cpu%ld", cpu);
6132         d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6133         if (!d_cpu) {
6134                 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6135                 return;
6136         }
6137
6138         /* per cpu trace_pipe */
6139         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6140                                 tr, cpu, &tracing_pipe_fops);
6141
6142         /* per cpu trace */
6143         trace_create_cpu_file("trace", 0644, d_cpu,
6144                                 tr, cpu, &tracing_fops);
6145
6146         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6147                                 tr, cpu, &tracing_buffers_fops);
6148
6149         trace_create_cpu_file("stats", 0444, d_cpu,
6150                                 tr, cpu, &tracing_stats_fops);
6151
6152         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6153                                 tr, cpu, &tracing_entries_fops);
6154
6155 #ifdef CONFIG_TRACER_SNAPSHOT
6156         trace_create_cpu_file("snapshot", 0644, d_cpu,
6157                                 tr, cpu, &snapshot_fops);
6158
6159         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6160                                 tr, cpu, &snapshot_raw_fops);
6161 #endif
6162 }
6163
6164 #ifdef CONFIG_FTRACE_SELFTEST
6165 /* Let selftest have access to static functions in this file */
6166 #include "trace_selftest.c"
6167 #endif
6168
6169 static ssize_t
6170 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6171                         loff_t *ppos)
6172 {
6173         struct trace_option_dentry *topt = filp->private_data;
6174         char *buf;
6175
6176         if (topt->flags->val & topt->opt->bit)
6177                 buf = "1\n";
6178         else
6179                 buf = "0\n";
6180
6181         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6182 }
6183
6184 static ssize_t
6185 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6186                          loff_t *ppos)
6187 {
6188         struct trace_option_dentry *topt = filp->private_data;
6189         unsigned long val;
6190         int ret;
6191
6192         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6193         if (ret)
6194                 return ret;
6195
6196         if (val != 0 && val != 1)
6197                 return -EINVAL;
6198
6199         if (!!(topt->flags->val & topt->opt->bit) != val) {
6200                 mutex_lock(&trace_types_lock);
6201                 ret = __set_tracer_option(topt->tr, topt->flags,
6202                                           topt->opt, !val);
6203                 mutex_unlock(&trace_types_lock);
6204                 if (ret)
6205                         return ret;
6206         }
6207
6208         *ppos += cnt;
6209
6210         return cnt;
6211 }
6212
6213
6214 static const struct file_operations trace_options_fops = {
6215         .open = tracing_open_generic,
6216         .read = trace_options_read,
6217         .write = trace_options_write,
6218         .llseek = generic_file_llseek,
6219 };
6220
6221 /*
6222  * In order to pass in both the trace_array descriptor as well as the index
6223  * to the flag that the trace option file represents, the trace_array
6224  * has a character array of trace_flags_index[], which holds the index
6225  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6226  * The address of this character array is passed to the flag option file
6227  * read/write callbacks.
6228  *
6229  * In order to extract both the index and the trace_array descriptor,
6230  * get_tr_index() uses the following algorithm.
6231  *
6232  *   idx = *ptr;
6233  *
6234  * As the pointer itself contains the address of the index (remember
6235  * index[1] == 1).
6236  *
6237  * Then to get the trace_array descriptor, by subtracting that index
6238  * from the ptr, we get to the start of the index itself.
6239  *
6240  *   ptr - idx == &index[0]
6241  *
6242  * Then a simple container_of() from that pointer gets us to the
6243  * trace_array descriptor.
6244  */
6245 static void get_tr_index(void *data, struct trace_array **ptr,
6246                          unsigned int *pindex)
6247 {
6248         *pindex = *(unsigned char *)data;
6249
6250         *ptr = container_of(data - *pindex, struct trace_array,
6251                             trace_flags_index);
6252 }
6253
6254 static ssize_t
6255 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6256                         loff_t *ppos)
6257 {
6258         void *tr_index = filp->private_data;
6259         struct trace_array *tr;
6260         unsigned int index;
6261         char *buf;
6262
6263         get_tr_index(tr_index, &tr, &index);
6264
6265         if (tr->trace_flags & (1 << index))
6266                 buf = "1\n";
6267         else
6268                 buf = "0\n";
6269
6270         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6271 }
6272
6273 static ssize_t
6274 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6275                          loff_t *ppos)
6276 {
6277         void *tr_index = filp->private_data;
6278         struct trace_array *tr;
6279         unsigned int index;
6280         unsigned long val;
6281         int ret;
6282
6283         get_tr_index(tr_index, &tr, &index);
6284
6285         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6286         if (ret)
6287                 return ret;
6288
6289         if (val != 0 && val != 1)
6290                 return -EINVAL;
6291
6292         mutex_lock(&trace_types_lock);
6293         ret = set_tracer_flag(tr, 1 << index, val);
6294         mutex_unlock(&trace_types_lock);
6295
6296         if (ret < 0)
6297                 return ret;
6298
6299         *ppos += cnt;
6300
6301         return cnt;
6302 }
6303
6304 static const struct file_operations trace_options_core_fops = {
6305         .open = tracing_open_generic,
6306         .read = trace_options_core_read,
6307         .write = trace_options_core_write,
6308         .llseek = generic_file_llseek,
6309 };
6310
6311 struct dentry *trace_create_file(const char *name,
6312                                  umode_t mode,
6313                                  struct dentry *parent,
6314                                  void *data,
6315                                  const struct file_operations *fops)
6316 {
6317         struct dentry *ret;
6318
6319         ret = tracefs_create_file(name, mode, parent, data, fops);
6320         if (!ret)
6321                 pr_warning("Could not create tracefs '%s' entry\n", name);
6322
6323         return ret;
6324 }
6325
6326
6327 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6328 {
6329         struct dentry *d_tracer;
6330
6331         if (tr->options)
6332                 return tr->options;
6333
6334         d_tracer = tracing_get_dentry(tr);
6335         if (IS_ERR(d_tracer))
6336                 return NULL;
6337
6338         tr->options = tracefs_create_dir("options", d_tracer);
6339         if (!tr->options) {
6340                 pr_warning("Could not create tracefs directory 'options'\n");
6341                 return NULL;
6342         }
6343
6344         return tr->options;
6345 }
6346
6347 static void
6348 create_trace_option_file(struct trace_array *tr,
6349                          struct trace_option_dentry *topt,
6350                          struct tracer_flags *flags,
6351                          struct tracer_opt *opt)
6352 {
6353         struct dentry *t_options;
6354
6355         t_options = trace_options_init_dentry(tr);
6356         if (!t_options)
6357                 return;
6358
6359         topt->flags = flags;
6360         topt->opt = opt;
6361         topt->tr = tr;
6362
6363         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6364                                     &trace_options_fops);
6365
6366 }
6367
6368 static void
6369 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6370 {
6371         struct trace_option_dentry *topts;
6372         struct trace_options *tr_topts;
6373         struct tracer_flags *flags;
6374         struct tracer_opt *opts;
6375         int cnt;
6376         int i;
6377
6378         if (!tracer)
6379                 return;
6380
6381         flags = tracer->flags;
6382
6383         if (!flags || !flags->opts)
6384                 return;
6385
6386         /*
6387          * If this is an instance, only create flags for tracers
6388          * the instance may have.
6389          */
6390         if (!trace_ok_for_array(tracer, tr))
6391                 return;
6392
6393         for (i = 0; i < tr->nr_topts; i++) {
6394                 /*
6395                  * Check if these flags have already been added.
6396                  * Some tracers share flags.
6397                  */
6398                 if (tr->topts[i].tracer->flags == tracer->flags)
6399                         return;
6400         }
6401
6402         opts = flags->opts;
6403
6404         for (cnt = 0; opts[cnt].name; cnt++)
6405                 ;
6406
6407         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6408         if (!topts)
6409                 return;
6410
6411         tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6412                             GFP_KERNEL);
6413         if (!tr_topts) {
6414                 kfree(topts);
6415                 return;
6416         }
6417
6418         tr->topts = tr_topts;
6419         tr->topts[tr->nr_topts].tracer = tracer;
6420         tr->topts[tr->nr_topts].topts = topts;
6421         tr->nr_topts++;
6422
6423         for (cnt = 0; opts[cnt].name; cnt++) {
6424                 create_trace_option_file(tr, &topts[cnt], flags,
6425                                          &opts[cnt]);
6426                 WARN_ONCE(topts[cnt].entry == NULL,
6427                           "Failed to create trace option: %s",
6428                           opts[cnt].name);
6429         }
6430 }
6431
6432 static struct dentry *
6433 create_trace_option_core_file(struct trace_array *tr,
6434                               const char *option, long index)
6435 {
6436         struct dentry *t_options;
6437
6438         t_options = trace_options_init_dentry(tr);
6439         if (!t_options)
6440                 return NULL;
6441
6442         return trace_create_file(option, 0644, t_options,
6443                                  (void *)&tr->trace_flags_index[index],
6444                                  &trace_options_core_fops);
6445 }
6446
6447 static void create_trace_options_dir(struct trace_array *tr)
6448 {
6449         struct dentry *t_options;
6450         bool top_level = tr == &global_trace;
6451         int i;
6452
6453         t_options = trace_options_init_dentry(tr);
6454         if (!t_options)
6455                 return;
6456
6457         for (i = 0; trace_options[i]; i++) {
6458                 if (top_level ||
6459                     !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6460                         create_trace_option_core_file(tr, trace_options[i], i);
6461         }
6462 }
6463
6464 static ssize_t
6465 rb_simple_read(struct file *filp, char __user *ubuf,
6466                size_t cnt, loff_t *ppos)
6467 {
6468         struct trace_array *tr = filp->private_data;
6469         char buf[64];
6470         int r;
6471
6472         r = tracer_tracing_is_on(tr);
6473         r = sprintf(buf, "%d\n", r);
6474
6475         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6476 }
6477
6478 static ssize_t
6479 rb_simple_write(struct file *filp, const char __user *ubuf,
6480                 size_t cnt, loff_t *ppos)
6481 {
6482         struct trace_array *tr = filp->private_data;
6483         struct ring_buffer *buffer = tr->trace_buffer.buffer;
6484         unsigned long val;
6485         int ret;
6486
6487         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6488         if (ret)
6489                 return ret;
6490
6491         if (buffer) {
6492                 mutex_lock(&trace_types_lock);
6493                 if (val) {
6494                         tracer_tracing_on(tr);
6495                         if (tr->current_trace->start)
6496                                 tr->current_trace->start(tr);
6497                 } else {
6498                         tracer_tracing_off(tr);
6499                         if (tr->current_trace->stop)
6500                                 tr->current_trace->stop(tr);
6501                 }
6502                 mutex_unlock(&trace_types_lock);
6503         }
6504
6505         (*ppos)++;
6506
6507         return cnt;
6508 }
6509
6510 static const struct file_operations rb_simple_fops = {
6511         .open           = tracing_open_generic_tr,
6512         .read           = rb_simple_read,
6513         .write          = rb_simple_write,
6514         .release        = tracing_release_generic_tr,
6515         .llseek         = default_llseek,
6516 };
6517
6518 struct dentry *trace_instance_dir;
6519
6520 static void
6521 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6522
6523 static int
6524 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6525 {
6526         enum ring_buffer_flags rb_flags;
6527
6528         rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6529
6530         buf->tr = tr;
6531
6532         buf->buffer = ring_buffer_alloc(size, rb_flags);
6533         if (!buf->buffer)
6534                 return -ENOMEM;
6535
6536         buf->data = alloc_percpu(struct trace_array_cpu);
6537         if (!buf->data) {
6538                 ring_buffer_free(buf->buffer);
6539                 return -ENOMEM;
6540         }
6541
6542         /* Allocate the first page for all buffers */
6543         set_buffer_entries(&tr->trace_buffer,
6544                            ring_buffer_size(tr->trace_buffer.buffer, 0));
6545
6546         return 0;
6547 }
6548
6549 static int allocate_trace_buffers(struct trace_array *tr, int size)
6550 {
6551         int ret;
6552
6553         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6554         if (ret)
6555                 return ret;
6556
6557 #ifdef CONFIG_TRACER_MAX_TRACE
6558         ret = allocate_trace_buffer(tr, &tr->max_buffer,
6559                                     allocate_snapshot ? size : 1);
6560         if (WARN_ON(ret)) {
6561                 ring_buffer_free(tr->trace_buffer.buffer);
6562                 free_percpu(tr->trace_buffer.data);
6563                 return -ENOMEM;
6564         }
6565         tr->allocated_snapshot = allocate_snapshot;
6566
6567         /*
6568          * Only the top level trace array gets its snapshot allocated
6569          * from the kernel command line.
6570          */
6571         allocate_snapshot = false;
6572 #endif
6573         return 0;
6574 }
6575
6576 static void free_trace_buffer(struct trace_buffer *buf)
6577 {
6578         if (buf->buffer) {
6579                 ring_buffer_free(buf->buffer);
6580                 buf->buffer = NULL;
6581                 free_percpu(buf->data);
6582                 buf->data = NULL;
6583         }
6584 }
6585
6586 static void free_trace_buffers(struct trace_array *tr)
6587 {
6588         if (!tr)
6589                 return;
6590
6591         free_trace_buffer(&tr->trace_buffer);
6592
6593 #ifdef CONFIG_TRACER_MAX_TRACE
6594         free_trace_buffer(&tr->max_buffer);
6595 #endif
6596 }
6597
6598 static void init_trace_flags_index(struct trace_array *tr)
6599 {
6600         int i;
6601
6602         /* Used by the trace options files */
6603         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6604                 tr->trace_flags_index[i] = i;
6605 }
6606
6607 static void __update_tracer_options(struct trace_array *tr)
6608 {
6609         struct tracer *t;
6610
6611         for (t = trace_types; t; t = t->next)
6612                 add_tracer_options(tr, t);
6613 }
6614
6615 static void update_tracer_options(struct trace_array *tr)
6616 {
6617         mutex_lock(&trace_types_lock);
6618         __update_tracer_options(tr);
6619         mutex_unlock(&trace_types_lock);
6620 }
6621
6622 static int instance_mkdir(const char *name)
6623 {
6624         struct trace_array *tr;
6625         int ret;
6626
6627         mutex_lock(&trace_types_lock);
6628
6629         ret = -EEXIST;
6630         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6631                 if (tr->name && strcmp(tr->name, name) == 0)
6632                         goto out_unlock;
6633         }
6634
6635         ret = -ENOMEM;
6636         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6637         if (!tr)
6638                 goto out_unlock;
6639
6640         tr->name = kstrdup(name, GFP_KERNEL);
6641         if (!tr->name)
6642                 goto out_free_tr;
6643
6644         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6645                 goto out_free_tr;
6646
6647         tr->trace_flags = global_trace.trace_flags;
6648
6649         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6650
6651         raw_spin_lock_init(&tr->start_lock);
6652
6653         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6654
6655         tr->current_trace = &nop_trace;
6656
6657         INIT_LIST_HEAD(&tr->systems);
6658         INIT_LIST_HEAD(&tr->events);
6659
6660         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6661                 goto out_free_tr;
6662
6663         tr->dir = tracefs_create_dir(name, trace_instance_dir);
6664         if (!tr->dir)
6665                 goto out_free_tr;
6666
6667         ret = event_trace_add_tracer(tr->dir, tr);
6668         if (ret) {
6669                 tracefs_remove_recursive(tr->dir);
6670                 goto out_free_tr;
6671         }
6672
6673         init_tracer_tracefs(tr, tr->dir);
6674         init_trace_flags_index(tr);
6675         __update_tracer_options(tr);
6676
6677         list_add(&tr->list, &ftrace_trace_arrays);
6678
6679         mutex_unlock(&trace_types_lock);
6680
6681         return 0;
6682
6683  out_free_tr:
6684         free_trace_buffers(tr);
6685         free_cpumask_var(tr->tracing_cpumask);
6686         kfree(tr->name);
6687         kfree(tr);
6688
6689  out_unlock:
6690         mutex_unlock(&trace_types_lock);
6691
6692         return ret;
6693
6694 }
6695
6696 static int instance_rmdir(const char *name)
6697 {
6698         struct trace_array *tr;
6699         int found = 0;
6700         int ret;
6701         int i;
6702
6703         mutex_lock(&trace_types_lock);
6704
6705         ret = -ENODEV;
6706         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6707                 if (tr->name && strcmp(tr->name, name) == 0) {
6708                         found = 1;
6709                         break;
6710                 }
6711         }
6712         if (!found)
6713                 goto out_unlock;
6714
6715         ret = -EBUSY;
6716         if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6717                 goto out_unlock;
6718
6719         list_del(&tr->list);
6720
6721         tracing_set_nop(tr);
6722         event_trace_del_tracer(tr);
6723         ftrace_destroy_function_files(tr);
6724         tracefs_remove_recursive(tr->dir);
6725         free_trace_buffers(tr);
6726
6727         for (i = 0; i < tr->nr_topts; i++) {
6728                 kfree(tr->topts[i].topts);
6729         }
6730         kfree(tr->topts);
6731
6732         kfree(tr->name);
6733         kfree(tr);
6734
6735         ret = 0;
6736
6737  out_unlock:
6738         mutex_unlock(&trace_types_lock);
6739
6740         return ret;
6741 }
6742
6743 static __init void create_trace_instances(struct dentry *d_tracer)
6744 {
6745         trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6746                                                          instance_mkdir,
6747                                                          instance_rmdir);
6748         if (WARN_ON(!trace_instance_dir))
6749                 return;
6750 }
6751
6752 static void
6753 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6754 {
6755         int cpu;
6756
6757         trace_create_file("available_tracers", 0444, d_tracer,
6758                         tr, &show_traces_fops);
6759
6760         trace_create_file("current_tracer", 0644, d_tracer,
6761                         tr, &set_tracer_fops);
6762
6763         trace_create_file("tracing_cpumask", 0644, d_tracer,
6764                           tr, &tracing_cpumask_fops);
6765
6766         trace_create_file("trace_options", 0644, d_tracer,
6767                           tr, &tracing_iter_fops);
6768
6769         trace_create_file("trace", 0644, d_tracer,
6770                           tr, &tracing_fops);
6771
6772         trace_create_file("trace_pipe", 0444, d_tracer,
6773                           tr, &tracing_pipe_fops);
6774
6775         trace_create_file("buffer_size_kb", 0644, d_tracer,
6776                           tr, &tracing_entries_fops);
6777
6778         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6779                           tr, &tracing_total_entries_fops);
6780
6781         trace_create_file("free_buffer", 0200, d_tracer,
6782                           tr, &tracing_free_buffer_fops);
6783
6784         trace_create_file("trace_marker", 0220, d_tracer,
6785                           tr, &tracing_mark_fops);
6786
6787         trace_create_file("trace_clock", 0644, d_tracer, tr,
6788                           &trace_clock_fops);
6789
6790         trace_create_file("tracing_on", 0644, d_tracer,
6791                           tr, &rb_simple_fops);
6792
6793         create_trace_options_dir(tr);
6794
6795 #ifdef CONFIG_TRACER_MAX_TRACE
6796         trace_create_file("tracing_max_latency", 0644, d_tracer,
6797                         &tr->max_latency, &tracing_max_lat_fops);
6798 #endif
6799
6800         if (ftrace_create_function_files(tr, d_tracer))
6801                 WARN(1, "Could not allocate function filter files");
6802
6803 #ifdef CONFIG_TRACER_SNAPSHOT
6804         trace_create_file("snapshot", 0644, d_tracer,
6805                           tr, &snapshot_fops);
6806 #endif
6807
6808         for_each_tracing_cpu(cpu)
6809                 tracing_init_tracefs_percpu(tr, cpu);
6810
6811 }
6812
6813 static struct vfsmount *trace_automount(void *ingore)
6814 {
6815         struct vfsmount *mnt;
6816         struct file_system_type *type;
6817
6818         /*
6819          * To maintain backward compatibility for tools that mount
6820          * debugfs to get to the tracing facility, tracefs is automatically
6821          * mounted to the debugfs/tracing directory.
6822          */
6823         type = get_fs_type("tracefs");
6824         if (!type)
6825                 return NULL;
6826         mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6827         put_filesystem(type);
6828         if (IS_ERR(mnt))
6829                 return NULL;
6830         mntget(mnt);
6831
6832         return mnt;
6833 }
6834
6835 /**
6836  * tracing_init_dentry - initialize top level trace array
6837  *
6838  * This is called when creating files or directories in the tracing
6839  * directory. It is called via fs_initcall() by any of the boot up code
6840  * and expects to return the dentry of the top level tracing directory.
6841  */
6842 struct dentry *tracing_init_dentry(void)
6843 {
6844         struct trace_array *tr = &global_trace;
6845
6846         /* The top level trace array uses  NULL as parent */
6847         if (tr->dir)
6848                 return NULL;
6849
6850         if (WARN_ON(!debugfs_initialized()))
6851                 return ERR_PTR(-ENODEV);
6852
6853         /*
6854          * As there may still be users that expect the tracing
6855          * files to exist in debugfs/tracing, we must automount
6856          * the tracefs file system there, so older tools still
6857          * work with the newer kerenl.
6858          */
6859         tr->dir = debugfs_create_automount("tracing", NULL,
6860                                            trace_automount, NULL);
6861         if (!tr->dir) {
6862                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6863                 return ERR_PTR(-ENOMEM);
6864         }
6865
6866         return NULL;
6867 }
6868
6869 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6870 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6871
6872 static void __init trace_enum_init(void)
6873 {
6874         int len;
6875
6876         len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6877         trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6878 }
6879
6880 #ifdef CONFIG_MODULES
6881 static void trace_module_add_enums(struct module *mod)
6882 {
6883         if (!mod->num_trace_enums)
6884                 return;
6885
6886         /*
6887          * Modules with bad taint do not have events created, do
6888          * not bother with enums either.
6889          */
6890         if (trace_module_has_bad_taint(mod))
6891                 return;
6892
6893         trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6894 }
6895
6896 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6897 static void trace_module_remove_enums(struct module *mod)
6898 {
6899         union trace_enum_map_item *map;
6900         union trace_enum_map_item **last = &trace_enum_maps;
6901
6902         if (!mod->num_trace_enums)
6903                 return;
6904
6905         mutex_lock(&trace_enum_mutex);
6906
6907         map = trace_enum_maps;
6908
6909         while (map) {
6910                 if (map->head.mod == mod)
6911                         break;
6912                 map = trace_enum_jmp_to_tail(map);
6913                 last = &map->tail.next;
6914                 map = map->tail.next;
6915         }
6916         if (!map)
6917                 goto out;
6918
6919         *last = trace_enum_jmp_to_tail(map)->tail.next;
6920         kfree(map);
6921  out:
6922         mutex_unlock(&trace_enum_mutex);
6923 }
6924 #else
6925 static inline void trace_module_remove_enums(struct module *mod) { }
6926 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6927
6928 static int trace_module_notify(struct notifier_block *self,
6929                                unsigned long val, void *data)
6930 {
6931         struct module *mod = data;
6932
6933         switch (val) {
6934         case MODULE_STATE_COMING:
6935                 trace_module_add_enums(mod);
6936                 break;
6937         case MODULE_STATE_GOING:
6938                 trace_module_remove_enums(mod);
6939                 break;
6940         }
6941
6942         return 0;
6943 }
6944
6945 static struct notifier_block trace_module_nb = {
6946         .notifier_call = trace_module_notify,
6947         .priority = 0,
6948 };
6949 #endif /* CONFIG_MODULES */
6950
6951 static __init int tracer_init_tracefs(void)
6952 {
6953         struct dentry *d_tracer;
6954
6955         trace_access_lock_init();
6956
6957         d_tracer = tracing_init_dentry();
6958         if (IS_ERR(d_tracer))
6959                 return 0;
6960
6961         init_tracer_tracefs(&global_trace, d_tracer);
6962
6963         trace_create_file("tracing_thresh", 0644, d_tracer,
6964                         &global_trace, &tracing_thresh_fops);
6965
6966         trace_create_file("README", 0444, d_tracer,
6967                         NULL, &tracing_readme_fops);
6968
6969         trace_create_file("saved_cmdlines", 0444, d_tracer,
6970                         NULL, &tracing_saved_cmdlines_fops);
6971
6972         trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6973                           NULL, &tracing_saved_cmdlines_size_fops);
6974
6975         trace_enum_init();
6976
6977         trace_create_enum_file(d_tracer);
6978
6979 #ifdef CONFIG_MODULES
6980         register_module_notifier(&trace_module_nb);
6981 #endif
6982
6983 #ifdef CONFIG_DYNAMIC_FTRACE
6984         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6985                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6986 #endif
6987
6988         create_trace_instances(d_tracer);
6989
6990         update_tracer_options(&global_trace);
6991
6992         return 0;
6993 }
6994
6995 static int trace_panic_handler(struct notifier_block *this,
6996                                unsigned long event, void *unused)
6997 {
6998         if (ftrace_dump_on_oops)
6999                 ftrace_dump(ftrace_dump_on_oops);
7000         return NOTIFY_OK;
7001 }
7002
7003 static struct notifier_block trace_panic_notifier = {
7004         .notifier_call  = trace_panic_handler,
7005         .next           = NULL,
7006         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
7007 };
7008
7009 static int trace_die_handler(struct notifier_block *self,
7010                              unsigned long val,
7011                              void *data)
7012 {
7013         switch (val) {
7014         case DIE_OOPS:
7015                 if (ftrace_dump_on_oops)
7016                         ftrace_dump(ftrace_dump_on_oops);
7017                 break;
7018         default:
7019                 break;
7020         }
7021         return NOTIFY_OK;
7022 }
7023
7024 static struct notifier_block trace_die_notifier = {
7025         .notifier_call = trace_die_handler,
7026         .priority = 200
7027 };
7028
7029 /*
7030  * printk is set to max of 1024, we really don't need it that big.
7031  * Nothing should be printing 1000 characters anyway.
7032  */
7033 #define TRACE_MAX_PRINT         1000
7034
7035 /*
7036  * Define here KERN_TRACE so that we have one place to modify
7037  * it if we decide to change what log level the ftrace dump
7038  * should be at.
7039  */
7040 #define KERN_TRACE              KERN_EMERG
7041
7042 void
7043 trace_printk_seq(struct trace_seq *s)
7044 {
7045         /* Probably should print a warning here. */
7046         if (s->seq.len >= TRACE_MAX_PRINT)
7047                 s->seq.len = TRACE_MAX_PRINT;
7048
7049         /*
7050          * More paranoid code. Although the buffer size is set to
7051          * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7052          * an extra layer of protection.
7053          */
7054         if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7055                 s->seq.len = s->seq.size - 1;
7056
7057         /* should be zero ended, but we are paranoid. */
7058         s->buffer[s->seq.len] = 0;
7059
7060         printk(KERN_TRACE "%s", s->buffer);
7061
7062         trace_seq_init(s);
7063 }
7064
7065 void trace_init_global_iter(struct trace_iterator *iter)
7066 {
7067         iter->tr = &global_trace;
7068         iter->trace = iter->tr->current_trace;
7069         iter->cpu_file = RING_BUFFER_ALL_CPUS;
7070         iter->trace_buffer = &global_trace.trace_buffer;
7071
7072         if (iter->trace && iter->trace->open)
7073                 iter->trace->open(iter);
7074
7075         /* Annotate start of buffers if we had overruns */
7076         if (ring_buffer_overruns(iter->trace_buffer->buffer))
7077                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7078
7079         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7080         if (trace_clocks[iter->tr->clock_id].in_ns)
7081                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7082 }
7083
7084 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7085 {
7086         /* use static because iter can be a bit big for the stack */
7087         static struct trace_iterator iter;
7088         static atomic_t dump_running;
7089         struct trace_array *tr = &global_trace;
7090         unsigned int old_userobj;
7091         unsigned long flags;
7092         int cnt = 0, cpu;
7093
7094         /* Only allow one dump user at a time. */
7095         if (atomic_inc_return(&dump_running) != 1) {
7096                 atomic_dec(&dump_running);
7097                 return;
7098         }
7099
7100         /*
7101          * Always turn off tracing when we dump.
7102          * We don't need to show trace output of what happens
7103          * between multiple crashes.
7104          *
7105          * If the user does a sysrq-z, then they can re-enable
7106          * tracing with echo 1 > tracing_on.
7107          */
7108         tracing_off();
7109
7110         local_irq_save(flags);
7111
7112         /* Simulate the iterator */
7113         trace_init_global_iter(&iter);
7114
7115         for_each_tracing_cpu(cpu) {
7116                 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7117         }
7118
7119         old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7120
7121         /* don't look at user memory in panic mode */
7122         tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7123
7124         switch (oops_dump_mode) {
7125         case DUMP_ALL:
7126                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7127                 break;
7128         case DUMP_ORIG:
7129                 iter.cpu_file = raw_smp_processor_id();
7130                 break;
7131         case DUMP_NONE:
7132                 goto out_enable;
7133         default:
7134                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7135                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7136         }
7137
7138         printk(KERN_TRACE "Dumping ftrace buffer:\n");
7139
7140         /* Did function tracer already get disabled? */
7141         if (ftrace_is_dead()) {
7142                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7143                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
7144         }
7145
7146         /*
7147          * We need to stop all tracing on all CPUS to read the
7148          * the next buffer. This is a bit expensive, but is
7149          * not done often. We fill all what we can read,
7150          * and then release the locks again.
7151          */
7152
7153         while (!trace_empty(&iter)) {
7154
7155                 if (!cnt)
7156                         printk(KERN_TRACE "---------------------------------\n");
7157
7158                 cnt++;
7159
7160                 /* reset all but tr, trace, and overruns */
7161                 memset(&iter.seq, 0,
7162                        sizeof(struct trace_iterator) -
7163                        offsetof(struct trace_iterator, seq));
7164                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7165                 iter.pos = -1;
7166
7167                 if (trace_find_next_entry_inc(&iter) != NULL) {
7168                         int ret;
7169
7170                         ret = print_trace_line(&iter);
7171                         if (ret != TRACE_TYPE_NO_CONSUME)
7172                                 trace_consume(&iter);
7173                 }
7174                 touch_nmi_watchdog();
7175
7176                 trace_printk_seq(&iter.seq);
7177         }
7178
7179         if (!cnt)
7180                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
7181         else
7182                 printk(KERN_TRACE "---------------------------------\n");
7183
7184  out_enable:
7185         tr->trace_flags |= old_userobj;
7186
7187         for_each_tracing_cpu(cpu) {
7188                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7189         }
7190         atomic_dec(&dump_running);
7191         local_irq_restore(flags);
7192 }
7193 EXPORT_SYMBOL_GPL(ftrace_dump);
7194
7195 __init static int tracer_alloc_buffers(void)
7196 {
7197         int ring_buf_size;
7198         int ret = -ENOMEM;
7199
7200         /*
7201          * Make sure we don't accidently add more trace options
7202          * than we have bits for.
7203          */
7204         BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7205
7206         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7207                 goto out;
7208
7209         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7210                 goto out_free_buffer_mask;
7211
7212         /* Only allocate trace_printk buffers if a trace_printk exists */
7213         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7214                 /* Must be called before global_trace.buffer is allocated */
7215                 trace_printk_init_buffers();
7216
7217         /* To save memory, keep the ring buffer size to its minimum */
7218         if (ring_buffer_expanded)
7219                 ring_buf_size = trace_buf_size;
7220         else
7221                 ring_buf_size = 1;
7222
7223         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7224         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7225
7226         raw_spin_lock_init(&global_trace.start_lock);
7227
7228         /* Used for event triggers */
7229         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7230         if (!temp_buffer)
7231                 goto out_free_cpumask;
7232
7233         if (trace_create_savedcmd() < 0)
7234                 goto out_free_temp_buffer;
7235
7236         /* TODO: make the number of buffers hot pluggable with CPUS */
7237         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7238                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7239                 WARN_ON(1);
7240                 goto out_free_savedcmd;
7241         }
7242
7243         if (global_trace.buffer_disabled)
7244                 tracing_off();
7245
7246         if (trace_boot_clock) {
7247                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7248                 if (ret < 0)
7249                         pr_warning("Trace clock %s not defined, going back to default\n",
7250                                    trace_boot_clock);
7251         }
7252
7253         /*
7254          * register_tracer() might reference current_trace, so it
7255          * needs to be set before we register anything. This is
7256          * just a bootstrap of current_trace anyway.
7257          */
7258         global_trace.current_trace = &nop_trace;
7259
7260         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7261
7262         ftrace_init_global_array_ops(&global_trace);
7263
7264         init_trace_flags_index(&global_trace);
7265
7266         register_tracer(&nop_trace);
7267
7268         /* All seems OK, enable tracing */
7269         tracing_disabled = 0;
7270
7271         atomic_notifier_chain_register(&panic_notifier_list,
7272                                        &trace_panic_notifier);
7273
7274         register_die_notifier(&trace_die_notifier);
7275
7276         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7277
7278         INIT_LIST_HEAD(&global_trace.systems);
7279         INIT_LIST_HEAD(&global_trace.events);
7280         list_add(&global_trace.list, &ftrace_trace_arrays);
7281
7282         apply_trace_boot_options();
7283
7284         register_snapshot_cmd();
7285
7286         return 0;
7287
7288 out_free_savedcmd:
7289         free_saved_cmdlines_buffer(savedcmd);
7290 out_free_temp_buffer:
7291         ring_buffer_free(temp_buffer);
7292 out_free_cpumask:
7293         free_cpumask_var(global_trace.tracing_cpumask);
7294 out_free_buffer_mask:
7295         free_cpumask_var(tracing_buffer_mask);
7296 out:
7297         return ret;
7298 }
7299
7300 void __init trace_init(void)
7301 {
7302         if (tracepoint_printk) {
7303                 tracepoint_print_iter =
7304                         kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7305                 if (WARN_ON(!tracepoint_print_iter))
7306                         tracepoint_printk = 0;
7307         }
7308         tracer_alloc_buffers();
7309         trace_event_init();
7310 }
7311
7312 __init static int clear_boot_tracer(void)
7313 {
7314         /*
7315          * The default tracer at boot buffer is an init section.
7316          * This function is called in lateinit. If we did not
7317          * find the boot tracer, then clear it out, to prevent
7318          * later registration from accessing the buffer that is
7319          * about to be freed.
7320          */
7321         if (!default_bootup_tracer)
7322                 return 0;
7323
7324         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7325                default_bootup_tracer);
7326         default_bootup_tracer = NULL;
7327
7328         return 0;
7329 }
7330
7331 fs_initcall(tracer_init_tracefs);
7332 late_initcall(clear_boot_tracer);