a6c350c681cc30bf65617270c9633744a6578229
[cascardo/linux.git] / kernel / trace / trace_sched_wakeup.c
1 /*
2  * trace task wakeup timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <trace/events/sched.h>
19 #include "trace.h"
20
21 static struct trace_array       *wakeup_trace;
22 static int __read_mostly        tracer_enabled;
23
24 static struct task_struct       *wakeup_task;
25 static int                      wakeup_cpu;
26 static int                      wakeup_current_cpu;
27 static unsigned                 wakeup_prio = -1;
28 static int                      wakeup_rt;
29 static int                      wakeup_dl;
30 static int                      tracing_dl = 0;
31
32 static arch_spinlock_t wakeup_lock =
33         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
34
35 static void wakeup_reset(struct trace_array *tr);
36 static void __wakeup_reset(struct trace_array *tr);
37 static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
38 static void wakeup_graph_return(struct ftrace_graph_ret *trace);
39
40 static int save_flags;
41 static bool function_enabled;
42
43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 static int wakeup_display_graph(struct trace_array *tr, int set);
45 # define is_graph() (trace_flags & TRACE_ITER_DISPLAY_GRAPH)
46 #else
47 static inline int wakeup_display_graph(struct trace_array *tr, int set)
48 {
49         return -EINVAL;
50 }
51 # define is_graph() false
52 #endif
53
54
55 #ifdef CONFIG_FUNCTION_TRACER
56
57 /*
58  * Prologue for the wakeup function tracers.
59  *
60  * Returns 1 if it is OK to continue, and preemption
61  *            is disabled and data->disabled is incremented.
62  *         0 if the trace is to be ignored, and preemption
63  *            is not disabled and data->disabled is
64  *            kept the same.
65  *
66  * Note, this function is also used outside this ifdef but
67  *  inside the #ifdef of the function graph tracer below.
68  *  This is OK, since the function graph tracer is
69  *  dependent on the function tracer.
70  */
71 static int
72 func_prolog_preempt_disable(struct trace_array *tr,
73                             struct trace_array_cpu **data,
74                             int *pc)
75 {
76         long disabled;
77         int cpu;
78
79         if (likely(!wakeup_task))
80                 return 0;
81
82         *pc = preempt_count();
83         preempt_disable_notrace();
84
85         cpu = raw_smp_processor_id();
86         if (cpu != wakeup_current_cpu)
87                 goto out_enable;
88
89         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
90         disabled = atomic_inc_return(&(*data)->disabled);
91         if (unlikely(disabled != 1))
92                 goto out;
93
94         return 1;
95
96 out:
97         atomic_dec(&(*data)->disabled);
98
99 out_enable:
100         preempt_enable_notrace();
101         return 0;
102 }
103
104 /*
105  * wakeup uses its own tracer function to keep the overhead down:
106  */
107 static void
108 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
109                    struct ftrace_ops *op, struct pt_regs *pt_regs)
110 {
111         struct trace_array *tr = wakeup_trace;
112         struct trace_array_cpu *data;
113         unsigned long flags;
114         int pc;
115
116         if (!func_prolog_preempt_disable(tr, &data, &pc))
117                 return;
118
119         local_irq_save(flags);
120         trace_function(tr, ip, parent_ip, flags, pc);
121         local_irq_restore(flags);
122
123         atomic_dec(&data->disabled);
124         preempt_enable_notrace();
125 }
126 #endif /* CONFIG_FUNCTION_TRACER */
127
128 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
129 {
130         int ret;
131
132         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
133         if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
134                 return 0;
135
136         if (graph)
137                 ret = register_ftrace_graph(&wakeup_graph_return,
138                                             &wakeup_graph_entry);
139         else
140                 ret = register_ftrace_function(tr->ops);
141
142         if (!ret)
143                 function_enabled = true;
144
145         return ret;
146 }
147
148 static void unregister_wakeup_function(struct trace_array *tr, int graph)
149 {
150         if (!function_enabled)
151                 return;
152
153         if (graph)
154                 unregister_ftrace_graph();
155         else
156                 unregister_ftrace_function(tr->ops);
157
158         function_enabled = false;
159 }
160
161 static int wakeup_function_set(struct trace_array *tr, int set)
162 {
163         if (set)
164                 register_wakeup_function(tr, is_graph(), 1);
165         else
166                 unregister_wakeup_function(tr, is_graph());
167         return 0;
168 }
169
170 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
171 {
172         struct tracer *tracer = tr->current_trace;
173
174         if (mask & TRACE_ITER_FUNCTION)
175                 return wakeup_function_set(tr, set);
176
177 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
178         if (mask & TRACE_ITER_DISPLAY_GRAPH)
179                 return wakeup_display_graph(tr, set);
180 #endif
181
182         return trace_keep_overwrite(tracer, mask, set);
183 }
184
185 static int start_func_tracer(struct trace_array *tr, int graph)
186 {
187         int ret;
188
189         ret = register_wakeup_function(tr, graph, 0);
190
191         if (!ret && tracing_is_enabled())
192                 tracer_enabled = 1;
193         else
194                 tracer_enabled = 0;
195
196         return ret;
197 }
198
199 static void stop_func_tracer(struct trace_array *tr, int graph)
200 {
201         tracer_enabled = 0;
202
203         unregister_wakeup_function(tr, graph);
204 }
205
206 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
207 static int wakeup_display_graph(struct trace_array *tr, int set)
208 {
209         if (!(is_graph() ^ set))
210                 return 0;
211
212         stop_func_tracer(tr, !set);
213
214         wakeup_reset(wakeup_trace);
215         tr->max_latency = 0;
216
217         return start_func_tracer(tr, set);
218 }
219
220 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
221 {
222         struct trace_array *tr = wakeup_trace;
223         struct trace_array_cpu *data;
224         unsigned long flags;
225         int pc, ret = 0;
226
227         if (!func_prolog_preempt_disable(tr, &data, &pc))
228                 return 0;
229
230         local_save_flags(flags);
231         ret = __trace_graph_entry(tr, trace, flags, pc);
232         atomic_dec(&data->disabled);
233         preempt_enable_notrace();
234
235         return ret;
236 }
237
238 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
239 {
240         struct trace_array *tr = wakeup_trace;
241         struct trace_array_cpu *data;
242         unsigned long flags;
243         int pc;
244
245         if (!func_prolog_preempt_disable(tr, &data, &pc))
246                 return;
247
248         local_save_flags(flags);
249         __trace_graph_return(tr, trace, flags, pc);
250         atomic_dec(&data->disabled);
251
252         preempt_enable_notrace();
253         return;
254 }
255
256 static void wakeup_trace_open(struct trace_iterator *iter)
257 {
258         if (is_graph())
259                 graph_trace_open(iter);
260 }
261
262 static void wakeup_trace_close(struct trace_iterator *iter)
263 {
264         if (iter->private)
265                 graph_trace_close(iter);
266 }
267
268 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
269                             TRACE_GRAPH_PRINT_ABS_TIME | \
270                             TRACE_GRAPH_PRINT_DURATION)
271
272 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
273 {
274         /*
275          * In graph mode call the graph tracer output function,
276          * otherwise go with the TRACE_FN event handler
277          */
278         if (is_graph())
279                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
280
281         return TRACE_TYPE_UNHANDLED;
282 }
283
284 static void wakeup_print_header(struct seq_file *s)
285 {
286         if (is_graph())
287                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
288         else
289                 trace_default_header(s);
290 }
291
292 static void
293 __trace_function(struct trace_array *tr,
294                  unsigned long ip, unsigned long parent_ip,
295                  unsigned long flags, int pc)
296 {
297         if (is_graph())
298                 trace_graph_function(tr, ip, parent_ip, flags, pc);
299         else
300                 trace_function(tr, ip, parent_ip, flags, pc);
301 }
302 #else
303 #define __trace_function trace_function
304
305 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
306 {
307         return -1;
308 }
309
310 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
311 {
312         return TRACE_TYPE_UNHANDLED;
313 }
314
315 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
316 static void wakeup_trace_open(struct trace_iterator *iter) { }
317 static void wakeup_trace_close(struct trace_iterator *iter) { }
318
319 #ifdef CONFIG_FUNCTION_TRACER
320 static void wakeup_print_header(struct seq_file *s)
321 {
322         trace_default_header(s);
323 }
324 #else
325 static void wakeup_print_header(struct seq_file *s)
326 {
327         trace_latency_header(s);
328 }
329 #endif /* CONFIG_FUNCTION_TRACER */
330 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
331
332 /*
333  * Should this new latency be reported/recorded?
334  */
335 static int report_latency(struct trace_array *tr, cycle_t delta)
336 {
337         if (tracing_thresh) {
338                 if (delta < tracing_thresh)
339                         return 0;
340         } else {
341                 if (delta <= tr->max_latency)
342                         return 0;
343         }
344         return 1;
345 }
346
347 static void
348 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
349 {
350         if (task != wakeup_task)
351                 return;
352
353         wakeup_current_cpu = cpu;
354 }
355
356 static void
357 tracing_sched_switch_trace(struct trace_array *tr,
358                            struct task_struct *prev,
359                            struct task_struct *next,
360                            unsigned long flags, int pc)
361 {
362         struct trace_event_call *call = &event_context_switch;
363         struct ring_buffer *buffer = tr->trace_buffer.buffer;
364         struct ring_buffer_event *event;
365         struct ctx_switch_entry *entry;
366
367         event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
368                                           sizeof(*entry), flags, pc);
369         if (!event)
370                 return;
371         entry   = ring_buffer_event_data(event);
372         entry->prev_pid                 = prev->pid;
373         entry->prev_prio                = prev->prio;
374         entry->prev_state               = prev->state;
375         entry->next_pid                 = next->pid;
376         entry->next_prio                = next->prio;
377         entry->next_state               = next->state;
378         entry->next_cpu = task_cpu(next);
379
380         if (!call_filter_check_discard(call, entry, buffer, event))
381                 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
382 }
383
384 static void
385 tracing_sched_wakeup_trace(struct trace_array *tr,
386                            struct task_struct *wakee,
387                            struct task_struct *curr,
388                            unsigned long flags, int pc)
389 {
390         struct trace_event_call *call = &event_wakeup;
391         struct ring_buffer_event *event;
392         struct ctx_switch_entry *entry;
393         struct ring_buffer *buffer = tr->trace_buffer.buffer;
394
395         event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
396                                           sizeof(*entry), flags, pc);
397         if (!event)
398                 return;
399         entry   = ring_buffer_event_data(event);
400         entry->prev_pid                 = curr->pid;
401         entry->prev_prio                = curr->prio;
402         entry->prev_state               = curr->state;
403         entry->next_pid                 = wakee->pid;
404         entry->next_prio                = wakee->prio;
405         entry->next_state               = wakee->state;
406         entry->next_cpu                 = task_cpu(wakee);
407
408         if (!call_filter_check_discard(call, entry, buffer, event))
409                 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
410 }
411
412 static void notrace
413 probe_wakeup_sched_switch(void *ignore,
414                           struct task_struct *prev, struct task_struct *next)
415 {
416         struct trace_array_cpu *data;
417         cycle_t T0, T1, delta;
418         unsigned long flags;
419         long disabled;
420         int cpu;
421         int pc;
422
423         tracing_record_cmdline(prev);
424
425         if (unlikely(!tracer_enabled))
426                 return;
427
428         /*
429          * When we start a new trace, we set wakeup_task to NULL
430          * and then set tracer_enabled = 1. We want to make sure
431          * that another CPU does not see the tracer_enabled = 1
432          * and the wakeup_task with an older task, that might
433          * actually be the same as next.
434          */
435         smp_rmb();
436
437         if (next != wakeup_task)
438                 return;
439
440         pc = preempt_count();
441
442         /* disable local data, not wakeup_cpu data */
443         cpu = raw_smp_processor_id();
444         disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
445         if (likely(disabled != 1))
446                 goto out;
447
448         local_irq_save(flags);
449         arch_spin_lock(&wakeup_lock);
450
451         /* We could race with grabbing wakeup_lock */
452         if (unlikely(!tracer_enabled || next != wakeup_task))
453                 goto out_unlock;
454
455         /* The task we are waiting for is waking up */
456         data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
457
458         __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
459         tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
460
461         T0 = data->preempt_timestamp;
462         T1 = ftrace_now(cpu);
463         delta = T1-T0;
464
465         if (!report_latency(wakeup_trace, delta))
466                 goto out_unlock;
467
468         if (likely(!is_tracing_stopped())) {
469                 wakeup_trace->max_latency = delta;
470                 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
471         }
472
473 out_unlock:
474         __wakeup_reset(wakeup_trace);
475         arch_spin_unlock(&wakeup_lock);
476         local_irq_restore(flags);
477 out:
478         atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
479 }
480
481 static void __wakeup_reset(struct trace_array *tr)
482 {
483         wakeup_cpu = -1;
484         wakeup_prio = -1;
485         tracing_dl = 0;
486
487         if (wakeup_task)
488                 put_task_struct(wakeup_task);
489
490         wakeup_task = NULL;
491 }
492
493 static void wakeup_reset(struct trace_array *tr)
494 {
495         unsigned long flags;
496
497         tracing_reset_online_cpus(&tr->trace_buffer);
498
499         local_irq_save(flags);
500         arch_spin_lock(&wakeup_lock);
501         __wakeup_reset(tr);
502         arch_spin_unlock(&wakeup_lock);
503         local_irq_restore(flags);
504 }
505
506 static void
507 probe_wakeup(void *ignore, struct task_struct *p)
508 {
509         struct trace_array_cpu *data;
510         int cpu = smp_processor_id();
511         unsigned long flags;
512         long disabled;
513         int pc;
514
515         if (likely(!tracer_enabled))
516                 return;
517
518         tracing_record_cmdline(p);
519         tracing_record_cmdline(current);
520
521         /*
522          * Semantic is like this:
523          *  - wakeup tracer handles all tasks in the system, independently
524          *    from their scheduling class;
525          *  - wakeup_rt tracer handles tasks belonging to sched_dl and
526          *    sched_rt class;
527          *  - wakeup_dl handles tasks belonging to sched_dl class only.
528          */
529         if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
530             (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
531             (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
532                 return;
533
534         pc = preempt_count();
535         disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
536         if (unlikely(disabled != 1))
537                 goto out;
538
539         /* interrupts should be off from try_to_wake_up */
540         arch_spin_lock(&wakeup_lock);
541
542         /* check for races. */
543         if (!tracer_enabled || tracing_dl ||
544             (!dl_task(p) && p->prio >= wakeup_prio))
545                 goto out_locked;
546
547         /* reset the trace */
548         __wakeup_reset(wakeup_trace);
549
550         wakeup_cpu = task_cpu(p);
551         wakeup_current_cpu = wakeup_cpu;
552         wakeup_prio = p->prio;
553
554         /*
555          * Once you start tracing a -deadline task, don't bother tracing
556          * another task until the first one wakes up.
557          */
558         if (dl_task(p))
559                 tracing_dl = 1;
560         else
561                 tracing_dl = 0;
562
563         wakeup_task = p;
564         get_task_struct(wakeup_task);
565
566         local_save_flags(flags);
567
568         data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
569         data->preempt_timestamp = ftrace_now(cpu);
570         tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
571
572         /*
573          * We must be careful in using CALLER_ADDR2. But since wake_up
574          * is not called by an assembly function  (where as schedule is)
575          * it should be safe to use it here.
576          */
577         __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
578
579 out_locked:
580         arch_spin_unlock(&wakeup_lock);
581 out:
582         atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
583 }
584
585 static void start_wakeup_tracer(struct trace_array *tr)
586 {
587         int ret;
588
589         ret = register_trace_sched_wakeup(probe_wakeup, NULL);
590         if (ret) {
591                 pr_info("wakeup trace: Couldn't activate tracepoint"
592                         " probe to kernel_sched_wakeup\n");
593                 return;
594         }
595
596         ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
597         if (ret) {
598                 pr_info("wakeup trace: Couldn't activate tracepoint"
599                         " probe to kernel_sched_wakeup_new\n");
600                 goto fail_deprobe;
601         }
602
603         ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
604         if (ret) {
605                 pr_info("sched trace: Couldn't activate tracepoint"
606                         " probe to kernel_sched_switch\n");
607                 goto fail_deprobe_wake_new;
608         }
609
610         ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
611         if (ret) {
612                 pr_info("wakeup trace: Couldn't activate tracepoint"
613                         " probe to kernel_sched_migrate_task\n");
614                 return;
615         }
616
617         wakeup_reset(tr);
618
619         /*
620          * Don't let the tracer_enabled = 1 show up before
621          * the wakeup_task is reset. This may be overkill since
622          * wakeup_reset does a spin_unlock after setting the
623          * wakeup_task to NULL, but I want to be safe.
624          * This is a slow path anyway.
625          */
626         smp_wmb();
627
628         if (start_func_tracer(tr, is_graph()))
629                 printk(KERN_ERR "failed to start wakeup tracer\n");
630
631         return;
632 fail_deprobe_wake_new:
633         unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
634 fail_deprobe:
635         unregister_trace_sched_wakeup(probe_wakeup, NULL);
636 }
637
638 static void stop_wakeup_tracer(struct trace_array *tr)
639 {
640         tracer_enabled = 0;
641         stop_func_tracer(tr, is_graph());
642         unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
643         unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
644         unregister_trace_sched_wakeup(probe_wakeup, NULL);
645         unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
646 }
647
648 static bool wakeup_busy;
649
650 static int __wakeup_tracer_init(struct trace_array *tr)
651 {
652         save_flags = trace_flags;
653
654         /* non overwrite screws up the latency tracers */
655         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
656         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
657
658         tr->max_latency = 0;
659         wakeup_trace = tr;
660         ftrace_init_array_ops(tr, wakeup_tracer_call);
661         start_wakeup_tracer(tr);
662
663         wakeup_busy = true;
664         return 0;
665 }
666
667 static int wakeup_tracer_init(struct trace_array *tr)
668 {
669         if (wakeup_busy)
670                 return -EBUSY;
671
672         wakeup_dl = 0;
673         wakeup_rt = 0;
674         return __wakeup_tracer_init(tr);
675 }
676
677 static int wakeup_rt_tracer_init(struct trace_array *tr)
678 {
679         if (wakeup_busy)
680                 return -EBUSY;
681
682         wakeup_dl = 0;
683         wakeup_rt = 1;
684         return __wakeup_tracer_init(tr);
685 }
686
687 static int wakeup_dl_tracer_init(struct trace_array *tr)
688 {
689         if (wakeup_busy)
690                 return -EBUSY;
691
692         wakeup_dl = 1;
693         wakeup_rt = 0;
694         return __wakeup_tracer_init(tr);
695 }
696
697 static void wakeup_tracer_reset(struct trace_array *tr)
698 {
699         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
700         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
701
702         stop_wakeup_tracer(tr);
703         /* make sure we put back any tasks we are tracing */
704         wakeup_reset(tr);
705
706         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
707         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
708         ftrace_reset_array_ops(tr);
709         wakeup_busy = false;
710 }
711
712 static void wakeup_tracer_start(struct trace_array *tr)
713 {
714         wakeup_reset(tr);
715         tracer_enabled = 1;
716 }
717
718 static void wakeup_tracer_stop(struct trace_array *tr)
719 {
720         tracer_enabled = 0;
721 }
722
723 static struct tracer wakeup_tracer __read_mostly =
724 {
725         .name           = "wakeup",
726         .init           = wakeup_tracer_init,
727         .reset          = wakeup_tracer_reset,
728         .start          = wakeup_tracer_start,
729         .stop           = wakeup_tracer_stop,
730         .print_max      = true,
731         .print_header   = wakeup_print_header,
732         .print_line     = wakeup_print_line,
733         .flag_changed   = wakeup_flag_changed,
734 #ifdef CONFIG_FTRACE_SELFTEST
735         .selftest    = trace_selftest_startup_wakeup,
736 #endif
737         .open           = wakeup_trace_open,
738         .close          = wakeup_trace_close,
739         .allow_instances = true,
740         .use_max_tr     = true,
741 };
742
743 static struct tracer wakeup_rt_tracer __read_mostly =
744 {
745         .name           = "wakeup_rt",
746         .init           = wakeup_rt_tracer_init,
747         .reset          = wakeup_tracer_reset,
748         .start          = wakeup_tracer_start,
749         .stop           = wakeup_tracer_stop,
750         .print_max      = true,
751         .print_header   = wakeup_print_header,
752         .print_line     = wakeup_print_line,
753         .flag_changed   = wakeup_flag_changed,
754 #ifdef CONFIG_FTRACE_SELFTEST
755         .selftest    = trace_selftest_startup_wakeup,
756 #endif
757         .open           = wakeup_trace_open,
758         .close          = wakeup_trace_close,
759         .allow_instances = true,
760         .use_max_tr     = true,
761 };
762
763 static struct tracer wakeup_dl_tracer __read_mostly =
764 {
765         .name           = "wakeup_dl",
766         .init           = wakeup_dl_tracer_init,
767         .reset          = wakeup_tracer_reset,
768         .start          = wakeup_tracer_start,
769         .stop           = wakeup_tracer_stop,
770         .print_max      = true,
771         .print_header   = wakeup_print_header,
772         .print_line     = wakeup_print_line,
773         .flag_changed   = wakeup_flag_changed,
774 #ifdef CONFIG_FTRACE_SELFTEST
775         .selftest    = trace_selftest_startup_wakeup,
776 #endif
777         .open           = wakeup_trace_open,
778         .close          = wakeup_trace_close,
779         .use_max_tr     = true,
780 };
781
782 __init static int init_wakeup_tracer(void)
783 {
784         int ret;
785
786         ret = register_tracer(&wakeup_tracer);
787         if (ret)
788                 return ret;
789
790         ret = register_tracer(&wakeup_rt_tracer);
791         if (ret)
792                 return ret;
793
794         ret = register_tracer(&wakeup_dl_tracer);
795         if (ret)
796                 return ret;
797
798         return 0;
799 }
800 core_initcall(init_wakeup_tracer);