tracing: Only create function graph options when it is compiled in
[cascardo/linux.git] / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16
17 #include "trace.h"
18
19 static struct trace_array               *irqsoff_trace __read_mostly;
20 static int                              tracer_enabled __read_mostly;
21
22 static DEFINE_PER_CPU(int, tracing_cpu);
23
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
25
26 enum {
27         TRACER_IRQS_OFF         = (1 << 1),
28         TRACER_PREEMPT_OFF      = (1 << 2),
29 };
30
31 static int trace_type __read_mostly;
32
33 static int save_flags;
34 static bool function_enabled;
35
36 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
37 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
38
39 #ifdef CONFIG_PREEMPT_TRACER
40 static inline int
41 preempt_trace(void)
42 {
43         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
44 }
45 #else
46 # define preempt_trace() (0)
47 #endif
48
49 #ifdef CONFIG_IRQSOFF_TRACER
50 static inline int
51 irq_trace(void)
52 {
53         return ((trace_type & TRACER_IRQS_OFF) &&
54                 irqs_disabled());
55 }
56 #else
57 # define irq_trace() (0)
58 #endif
59
60 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
61 static int irqsoff_display_graph(struct trace_array *tr, int set);
62 # define is_graph() (trace_flags & TRACE_ITER_DISPLAY_GRAPH)
63 #else
64 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
65 {
66         return -EINVAL;
67 }
68 # define is_graph() false
69 #endif
70
71 /*
72  * Sequence count - we record it when starting a measurement and
73  * skip the latency if the sequence has changed - some other section
74  * did a maximum and could disturb our measurement with serial console
75  * printouts, etc. Truly coinciding maximum latencies should be rare
76  * and what happens together happens separately as well, so this doesn't
77  * decrease the validity of the maximum found:
78  */
79 static __cacheline_aligned_in_smp       unsigned long max_sequence;
80
81 #ifdef CONFIG_FUNCTION_TRACER
82 /*
83  * Prologue for the preempt and irqs off function tracers.
84  *
85  * Returns 1 if it is OK to continue, and data->disabled is
86  *            incremented.
87  *         0 if the trace is to be ignored, and data->disabled
88  *            is kept the same.
89  *
90  * Note, this function is also used outside this ifdef but
91  *  inside the #ifdef of the function graph tracer below.
92  *  This is OK, since the function graph tracer is
93  *  dependent on the function tracer.
94  */
95 static int func_prolog_dec(struct trace_array *tr,
96                            struct trace_array_cpu **data,
97                            unsigned long *flags)
98 {
99         long disabled;
100         int cpu;
101
102         /*
103          * Does not matter if we preempt. We test the flags
104          * afterward, to see if irqs are disabled or not.
105          * If we preempt and get a false positive, the flags
106          * test will fail.
107          */
108         cpu = raw_smp_processor_id();
109         if (likely(!per_cpu(tracing_cpu, cpu)))
110                 return 0;
111
112         local_save_flags(*flags);
113         /* slight chance to get a false positive on tracing_cpu */
114         if (!irqs_disabled_flags(*flags))
115                 return 0;
116
117         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
118         disabled = atomic_inc_return(&(*data)->disabled);
119
120         if (likely(disabled == 1))
121                 return 1;
122
123         atomic_dec(&(*data)->disabled);
124
125         return 0;
126 }
127
128 /*
129  * irqsoff uses its own tracer function to keep the overhead down:
130  */
131 static void
132 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
133                     struct ftrace_ops *op, struct pt_regs *pt_regs)
134 {
135         struct trace_array *tr = irqsoff_trace;
136         struct trace_array_cpu *data;
137         unsigned long flags;
138
139         if (!func_prolog_dec(tr, &data, &flags))
140                 return;
141
142         trace_function(tr, ip, parent_ip, flags, preempt_count());
143
144         atomic_dec(&data->disabled);
145 }
146 #endif /* CONFIG_FUNCTION_TRACER */
147
148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 static int irqsoff_display_graph(struct trace_array *tr, int set)
150 {
151         int cpu;
152
153         if (!(is_graph() ^ set))
154                 return 0;
155
156         stop_irqsoff_tracer(irqsoff_trace, !set);
157
158         for_each_possible_cpu(cpu)
159                 per_cpu(tracing_cpu, cpu) = 0;
160
161         tr->max_latency = 0;
162         tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
163
164         return start_irqsoff_tracer(irqsoff_trace, set);
165 }
166
167 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
168 {
169         struct trace_array *tr = irqsoff_trace;
170         struct trace_array_cpu *data;
171         unsigned long flags;
172         int ret;
173         int pc;
174
175         if (!func_prolog_dec(tr, &data, &flags))
176                 return 0;
177
178         pc = preempt_count();
179         ret = __trace_graph_entry(tr, trace, flags, pc);
180         atomic_dec(&data->disabled);
181
182         return ret;
183 }
184
185 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
186 {
187         struct trace_array *tr = irqsoff_trace;
188         struct trace_array_cpu *data;
189         unsigned long flags;
190         int pc;
191
192         if (!func_prolog_dec(tr, &data, &flags))
193                 return;
194
195         pc = preempt_count();
196         __trace_graph_return(tr, trace, flags, pc);
197         atomic_dec(&data->disabled);
198 }
199
200 static void irqsoff_trace_open(struct trace_iterator *iter)
201 {
202         if (is_graph())
203                 graph_trace_open(iter);
204
205 }
206
207 static void irqsoff_trace_close(struct trace_iterator *iter)
208 {
209         if (iter->private)
210                 graph_trace_close(iter);
211 }
212
213 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
214                             TRACE_GRAPH_PRINT_PROC | \
215                             TRACE_GRAPH_PRINT_ABS_TIME | \
216                             TRACE_GRAPH_PRINT_DURATION)
217
218 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
219 {
220         /*
221          * In graph mode call the graph tracer output function,
222          * otherwise go with the TRACE_FN event handler
223          */
224         if (is_graph())
225                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
226
227         return TRACE_TYPE_UNHANDLED;
228 }
229
230 static void irqsoff_print_header(struct seq_file *s)
231 {
232         if (is_graph())
233                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
234         else
235                 trace_default_header(s);
236 }
237
238 static void
239 __trace_function(struct trace_array *tr,
240                  unsigned long ip, unsigned long parent_ip,
241                  unsigned long flags, int pc)
242 {
243         if (is_graph())
244                 trace_graph_function(tr, ip, parent_ip, flags, pc);
245         else
246                 trace_function(tr, ip, parent_ip, flags, pc);
247 }
248
249 #else
250 #define __trace_function trace_function
251
252 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
253 {
254         return -1;
255 }
256
257 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
258 {
259         return TRACE_TYPE_UNHANDLED;
260 }
261
262 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
263 static void irqsoff_trace_open(struct trace_iterator *iter) { }
264 static void irqsoff_trace_close(struct trace_iterator *iter) { }
265
266 #ifdef CONFIG_FUNCTION_TRACER
267 static void irqsoff_print_header(struct seq_file *s)
268 {
269         trace_default_header(s);
270 }
271 #else
272 static void irqsoff_print_header(struct seq_file *s)
273 {
274         trace_latency_header(s);
275 }
276 #endif /* CONFIG_FUNCTION_TRACER */
277 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
278
279 /*
280  * Should this new latency be reported/recorded?
281  */
282 static int report_latency(struct trace_array *tr, cycle_t delta)
283 {
284         if (tracing_thresh) {
285                 if (delta < tracing_thresh)
286                         return 0;
287         } else {
288                 if (delta <= tr->max_latency)
289                         return 0;
290         }
291         return 1;
292 }
293
294 static void
295 check_critical_timing(struct trace_array *tr,
296                       struct trace_array_cpu *data,
297                       unsigned long parent_ip,
298                       int cpu)
299 {
300         cycle_t T0, T1, delta;
301         unsigned long flags;
302         int pc;
303
304         T0 = data->preempt_timestamp;
305         T1 = ftrace_now(cpu);
306         delta = T1-T0;
307
308         local_save_flags(flags);
309
310         pc = preempt_count();
311
312         if (!report_latency(tr, delta))
313                 goto out;
314
315         raw_spin_lock_irqsave(&max_trace_lock, flags);
316
317         /* check if we are still the max latency */
318         if (!report_latency(tr, delta))
319                 goto out_unlock;
320
321         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
322         /* Skip 5 functions to get to the irq/preempt enable function */
323         __trace_stack(tr, flags, 5, pc);
324
325         if (data->critical_sequence != max_sequence)
326                 goto out_unlock;
327
328         data->critical_end = parent_ip;
329
330         if (likely(!is_tracing_stopped())) {
331                 tr->max_latency = delta;
332                 update_max_tr_single(tr, current, cpu);
333         }
334
335         max_sequence++;
336
337 out_unlock:
338         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
339
340 out:
341         data->critical_sequence = max_sequence;
342         data->preempt_timestamp = ftrace_now(cpu);
343         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
344 }
345
346 static inline void
347 start_critical_timing(unsigned long ip, unsigned long parent_ip)
348 {
349         int cpu;
350         struct trace_array *tr = irqsoff_trace;
351         struct trace_array_cpu *data;
352         unsigned long flags;
353
354         if (!tracer_enabled || !tracing_is_enabled())
355                 return;
356
357         cpu = raw_smp_processor_id();
358
359         if (per_cpu(tracing_cpu, cpu))
360                 return;
361
362         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
363
364         if (unlikely(!data) || atomic_read(&data->disabled))
365                 return;
366
367         atomic_inc(&data->disabled);
368
369         data->critical_sequence = max_sequence;
370         data->preempt_timestamp = ftrace_now(cpu);
371         data->critical_start = parent_ip ? : ip;
372
373         local_save_flags(flags);
374
375         __trace_function(tr, ip, parent_ip, flags, preempt_count());
376
377         per_cpu(tracing_cpu, cpu) = 1;
378
379         atomic_dec(&data->disabled);
380 }
381
382 static inline void
383 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
384 {
385         int cpu;
386         struct trace_array *tr = irqsoff_trace;
387         struct trace_array_cpu *data;
388         unsigned long flags;
389
390         cpu = raw_smp_processor_id();
391         /* Always clear the tracing cpu on stopping the trace */
392         if (unlikely(per_cpu(tracing_cpu, cpu)))
393                 per_cpu(tracing_cpu, cpu) = 0;
394         else
395                 return;
396
397         if (!tracer_enabled || !tracing_is_enabled())
398                 return;
399
400         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
401
402         if (unlikely(!data) ||
403             !data->critical_start || atomic_read(&data->disabled))
404                 return;
405
406         atomic_inc(&data->disabled);
407
408         local_save_flags(flags);
409         __trace_function(tr, ip, parent_ip, flags, preempt_count());
410         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
411         data->critical_start = 0;
412         atomic_dec(&data->disabled);
413 }
414
415 /* start and stop critical timings used to for stoppage (in idle) */
416 void start_critical_timings(void)
417 {
418         if (preempt_trace() || irq_trace())
419                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
420 }
421 EXPORT_SYMBOL_GPL(start_critical_timings);
422
423 void stop_critical_timings(void)
424 {
425         if (preempt_trace() || irq_trace())
426                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
427 }
428 EXPORT_SYMBOL_GPL(stop_critical_timings);
429
430 #ifdef CONFIG_IRQSOFF_TRACER
431 #ifdef CONFIG_PROVE_LOCKING
432 void time_hardirqs_on(unsigned long a0, unsigned long a1)
433 {
434         if (!preempt_trace() && irq_trace())
435                 stop_critical_timing(a0, a1);
436 }
437
438 void time_hardirqs_off(unsigned long a0, unsigned long a1)
439 {
440         if (!preempt_trace() && irq_trace())
441                 start_critical_timing(a0, a1);
442 }
443
444 #else /* !CONFIG_PROVE_LOCKING */
445
446 /*
447  * Stubs:
448  */
449
450 void trace_softirqs_on(unsigned long ip)
451 {
452 }
453
454 void trace_softirqs_off(unsigned long ip)
455 {
456 }
457
458 inline void print_irqtrace_events(struct task_struct *curr)
459 {
460 }
461
462 /*
463  * We are only interested in hardirq on/off events:
464  */
465 void trace_hardirqs_on(void)
466 {
467         if (!preempt_trace() && irq_trace())
468                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
469 }
470 EXPORT_SYMBOL(trace_hardirqs_on);
471
472 void trace_hardirqs_off(void)
473 {
474         if (!preempt_trace() && irq_trace())
475                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
476 }
477 EXPORT_SYMBOL(trace_hardirqs_off);
478
479 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
480 {
481         if (!preempt_trace() && irq_trace())
482                 stop_critical_timing(CALLER_ADDR0, caller_addr);
483 }
484 EXPORT_SYMBOL(trace_hardirqs_on_caller);
485
486 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
487 {
488         if (!preempt_trace() && irq_trace())
489                 start_critical_timing(CALLER_ADDR0, caller_addr);
490 }
491 EXPORT_SYMBOL(trace_hardirqs_off_caller);
492
493 #endif /* CONFIG_PROVE_LOCKING */
494 #endif /*  CONFIG_IRQSOFF_TRACER */
495
496 #ifdef CONFIG_PREEMPT_TRACER
497 void trace_preempt_on(unsigned long a0, unsigned long a1)
498 {
499         if (preempt_trace() && !irq_trace())
500                 stop_critical_timing(a0, a1);
501 }
502
503 void trace_preempt_off(unsigned long a0, unsigned long a1)
504 {
505         if (preempt_trace() && !irq_trace())
506                 start_critical_timing(a0, a1);
507 }
508 #endif /* CONFIG_PREEMPT_TRACER */
509
510 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
511 {
512         int ret;
513
514         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
515         if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
516                 return 0;
517
518         if (graph)
519                 ret = register_ftrace_graph(&irqsoff_graph_return,
520                                             &irqsoff_graph_entry);
521         else
522                 ret = register_ftrace_function(tr->ops);
523
524         if (!ret)
525                 function_enabled = true;
526
527         return ret;
528 }
529
530 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
531 {
532         if (!function_enabled)
533                 return;
534
535         if (graph)
536                 unregister_ftrace_graph();
537         else
538                 unregister_ftrace_function(tr->ops);
539
540         function_enabled = false;
541 }
542
543 static int irqsoff_function_set(struct trace_array *tr, int set)
544 {
545         if (set)
546                 register_irqsoff_function(tr, is_graph(), 1);
547         else
548                 unregister_irqsoff_function(tr, is_graph());
549         return 0;
550 }
551
552 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
553 {
554         struct tracer *tracer = tr->current_trace;
555
556         if (mask & TRACE_ITER_FUNCTION)
557                 return irqsoff_function_set(tr, set);
558
559 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
560         if (mask & TRACE_ITER_DISPLAY_GRAPH)
561                 return irqsoff_display_graph(tr, set);
562 #endif
563
564         return trace_keep_overwrite(tracer, mask, set);
565 }
566
567 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
568 {
569         int ret;
570
571         ret = register_irqsoff_function(tr, graph, 0);
572
573         if (!ret && tracing_is_enabled())
574                 tracer_enabled = 1;
575         else
576                 tracer_enabled = 0;
577
578         return ret;
579 }
580
581 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
582 {
583         tracer_enabled = 0;
584
585         unregister_irqsoff_function(tr, graph);
586 }
587
588 static bool irqsoff_busy;
589
590 static int __irqsoff_tracer_init(struct trace_array *tr)
591 {
592         if (irqsoff_busy)
593                 return -EBUSY;
594
595         save_flags = trace_flags;
596
597         /* non overwrite screws up the latency tracers */
598         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
599         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
600
601         tr->max_latency = 0;
602         irqsoff_trace = tr;
603         /* make sure that the tracer is visible */
604         smp_wmb();
605         tracing_reset_online_cpus(&tr->trace_buffer);
606
607         ftrace_init_array_ops(tr, irqsoff_tracer_call);
608
609         /* Only toplevel instance supports graph tracing */
610         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
611                                       is_graph())))
612                 printk(KERN_ERR "failed to start irqsoff tracer\n");
613
614         irqsoff_busy = true;
615         return 0;
616 }
617
618 static void irqsoff_tracer_reset(struct trace_array *tr)
619 {
620         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
621         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
622
623         stop_irqsoff_tracer(tr, is_graph());
624
625         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
626         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
627         ftrace_reset_array_ops(tr);
628
629         irqsoff_busy = false;
630 }
631
632 static void irqsoff_tracer_start(struct trace_array *tr)
633 {
634         tracer_enabled = 1;
635 }
636
637 static void irqsoff_tracer_stop(struct trace_array *tr)
638 {
639         tracer_enabled = 0;
640 }
641
642 #ifdef CONFIG_IRQSOFF_TRACER
643 static int irqsoff_tracer_init(struct trace_array *tr)
644 {
645         trace_type = TRACER_IRQS_OFF;
646
647         return __irqsoff_tracer_init(tr);
648 }
649 static struct tracer irqsoff_tracer __read_mostly =
650 {
651         .name           = "irqsoff",
652         .init           = irqsoff_tracer_init,
653         .reset          = irqsoff_tracer_reset,
654         .start          = irqsoff_tracer_start,
655         .stop           = irqsoff_tracer_stop,
656         .print_max      = true,
657         .print_header   = irqsoff_print_header,
658         .print_line     = irqsoff_print_line,
659         .flag_changed   = irqsoff_flag_changed,
660 #ifdef CONFIG_FTRACE_SELFTEST
661         .selftest    = trace_selftest_startup_irqsoff,
662 #endif
663         .open           = irqsoff_trace_open,
664         .close          = irqsoff_trace_close,
665         .allow_instances = true,
666         .use_max_tr     = true,
667 };
668 # define register_irqsoff(trace) register_tracer(&trace)
669 #else
670 # define register_irqsoff(trace) do { } while (0)
671 #endif
672
673 #ifdef CONFIG_PREEMPT_TRACER
674 static int preemptoff_tracer_init(struct trace_array *tr)
675 {
676         trace_type = TRACER_PREEMPT_OFF;
677
678         return __irqsoff_tracer_init(tr);
679 }
680
681 static struct tracer preemptoff_tracer __read_mostly =
682 {
683         .name           = "preemptoff",
684         .init           = preemptoff_tracer_init,
685         .reset          = irqsoff_tracer_reset,
686         .start          = irqsoff_tracer_start,
687         .stop           = irqsoff_tracer_stop,
688         .print_max      = true,
689         .print_header   = irqsoff_print_header,
690         .print_line     = irqsoff_print_line,
691         .flag_changed   = irqsoff_flag_changed,
692 #ifdef CONFIG_FTRACE_SELFTEST
693         .selftest    = trace_selftest_startup_preemptoff,
694 #endif
695         .open           = irqsoff_trace_open,
696         .close          = irqsoff_trace_close,
697         .allow_instances = true,
698         .use_max_tr     = true,
699 };
700 # define register_preemptoff(trace) register_tracer(&trace)
701 #else
702 # define register_preemptoff(trace) do { } while (0)
703 #endif
704
705 #if defined(CONFIG_IRQSOFF_TRACER) && \
706         defined(CONFIG_PREEMPT_TRACER)
707
708 static int preemptirqsoff_tracer_init(struct trace_array *tr)
709 {
710         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
711
712         return __irqsoff_tracer_init(tr);
713 }
714
715 static struct tracer preemptirqsoff_tracer __read_mostly =
716 {
717         .name           = "preemptirqsoff",
718         .init           = preemptirqsoff_tracer_init,
719         .reset          = irqsoff_tracer_reset,
720         .start          = irqsoff_tracer_start,
721         .stop           = irqsoff_tracer_stop,
722         .print_max      = true,
723         .print_header   = irqsoff_print_header,
724         .print_line     = irqsoff_print_line,
725         .flag_changed   = irqsoff_flag_changed,
726 #ifdef CONFIG_FTRACE_SELFTEST
727         .selftest    = trace_selftest_startup_preemptirqsoff,
728 #endif
729         .open           = irqsoff_trace_open,
730         .close          = irqsoff_trace_close,
731         .allow_instances = true,
732         .use_max_tr     = true,
733 };
734
735 # define register_preemptirqsoff(trace) register_tracer(&trace)
736 #else
737 # define register_preemptirqsoff(trace) do { } while (0)
738 #endif
739
740 __init static int init_irqsoff_tracer(void)
741 {
742         register_irqsoff(irqsoff_tracer);
743         register_preemptoff(preemptoff_tracer);
744         register_preemptirqsoff(preemptirqsoff_tracer);
745
746         return 0;
747 }
748 core_initcall(init_irqsoff_tracer);