2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <trace/events/sched.h>
21 static struct trace_array *wakeup_trace;
22 static int __read_mostly tracer_enabled;
24 static struct task_struct *wakeup_task;
25 static int wakeup_cpu;
26 static int wakeup_current_cpu;
27 static unsigned wakeup_prio = -1;
30 static int tracing_dl = 0;
32 static arch_spinlock_t wakeup_lock =
33 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 static void wakeup_reset(struct trace_array *tr);
36 static void __wakeup_reset(struct trace_array *tr);
37 static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
38 static void wakeup_graph_return(struct ftrace_graph_ret *trace);
40 static int save_flags;
41 static bool function_enabled;
43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 static int wakeup_display_graph(struct trace_array *tr, int set);
45 # define is_graph() (trace_flags & TRACE_ITER_DISPLAY_GRAPH)
47 static inline int wakeup_display_graph(struct trace_array *tr, int set)
51 # define is_graph() false
55 #ifdef CONFIG_FUNCTION_TRACER
58 * Prologue for the wakeup function tracers.
60 * Returns 1 if it is OK to continue, and preemption
61 * is disabled and data->disabled is incremented.
62 * 0 if the trace is to be ignored, and preemption
63 * is not disabled and data->disabled is
66 * Note, this function is also used outside this ifdef but
67 * inside the #ifdef of the function graph tracer below.
68 * This is OK, since the function graph tracer is
69 * dependent on the function tracer.
72 func_prolog_preempt_disable(struct trace_array *tr,
73 struct trace_array_cpu **data,
79 if (likely(!wakeup_task))
82 *pc = preempt_count();
83 preempt_disable_notrace();
85 cpu = raw_smp_processor_id();
86 if (cpu != wakeup_current_cpu)
89 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
90 disabled = atomic_inc_return(&(*data)->disabled);
91 if (unlikely(disabled != 1))
97 atomic_dec(&(*data)->disabled);
100 preempt_enable_notrace();
105 * wakeup uses its own tracer function to keep the overhead down:
108 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
109 struct ftrace_ops *op, struct pt_regs *pt_regs)
111 struct trace_array *tr = wakeup_trace;
112 struct trace_array_cpu *data;
116 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 local_irq_save(flags);
120 trace_function(tr, ip, parent_ip, flags, pc);
121 local_irq_restore(flags);
123 atomic_dec(&data->disabled);
124 preempt_enable_notrace();
126 #endif /* CONFIG_FUNCTION_TRACER */
128 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
132 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
133 if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
137 ret = register_ftrace_graph(&wakeup_graph_return,
138 &wakeup_graph_entry);
140 ret = register_ftrace_function(tr->ops);
143 function_enabled = true;
148 static void unregister_wakeup_function(struct trace_array *tr, int graph)
150 if (!function_enabled)
154 unregister_ftrace_graph();
156 unregister_ftrace_function(tr->ops);
158 function_enabled = false;
161 static int wakeup_function_set(struct trace_array *tr, int set)
164 register_wakeup_function(tr, is_graph(), 1);
166 unregister_wakeup_function(tr, is_graph());
170 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
172 struct tracer *tracer = tr->current_trace;
174 if (mask & TRACE_ITER_FUNCTION)
175 return wakeup_function_set(tr, set);
177 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
178 if (mask & TRACE_ITER_DISPLAY_GRAPH)
179 return wakeup_display_graph(tr, set);
182 return trace_keep_overwrite(tracer, mask, set);
185 static int start_func_tracer(struct trace_array *tr, int graph)
189 ret = register_wakeup_function(tr, graph, 0);
191 if (!ret && tracing_is_enabled())
199 static void stop_func_tracer(struct trace_array *tr, int graph)
203 unregister_wakeup_function(tr, graph);
206 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
207 static int wakeup_display_graph(struct trace_array *tr, int set)
209 if (!(is_graph() ^ set))
212 stop_func_tracer(tr, !set);
214 wakeup_reset(wakeup_trace);
217 return start_func_tracer(tr, set);
220 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
222 struct trace_array *tr = wakeup_trace;
223 struct trace_array_cpu *data;
227 if (!func_prolog_preempt_disable(tr, &data, &pc))
230 local_save_flags(flags);
231 ret = __trace_graph_entry(tr, trace, flags, pc);
232 atomic_dec(&data->disabled);
233 preempt_enable_notrace();
238 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
240 struct trace_array *tr = wakeup_trace;
241 struct trace_array_cpu *data;
245 if (!func_prolog_preempt_disable(tr, &data, &pc))
248 local_save_flags(flags);
249 __trace_graph_return(tr, trace, flags, pc);
250 atomic_dec(&data->disabled);
252 preempt_enable_notrace();
256 static void wakeup_trace_open(struct trace_iterator *iter)
259 graph_trace_open(iter);
262 static void wakeup_trace_close(struct trace_iterator *iter)
265 graph_trace_close(iter);
268 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
269 TRACE_GRAPH_PRINT_ABS_TIME | \
270 TRACE_GRAPH_PRINT_DURATION)
272 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
275 * In graph mode call the graph tracer output function,
276 * otherwise go with the TRACE_FN event handler
279 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
281 return TRACE_TYPE_UNHANDLED;
284 static void wakeup_print_header(struct seq_file *s)
287 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
289 trace_default_header(s);
293 __trace_function(struct trace_array *tr,
294 unsigned long ip, unsigned long parent_ip,
295 unsigned long flags, int pc)
298 trace_graph_function(tr, ip, parent_ip, flags, pc);
300 trace_function(tr, ip, parent_ip, flags, pc);
303 #define __trace_function trace_function
305 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
310 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
312 return TRACE_TYPE_UNHANDLED;
315 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
316 static void wakeup_trace_open(struct trace_iterator *iter) { }
317 static void wakeup_trace_close(struct trace_iterator *iter) { }
319 #ifdef CONFIG_FUNCTION_TRACER
320 static void wakeup_print_header(struct seq_file *s)
322 trace_default_header(s);
325 static void wakeup_print_header(struct seq_file *s)
327 trace_latency_header(s);
329 #endif /* CONFIG_FUNCTION_TRACER */
330 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
333 * Should this new latency be reported/recorded?
335 static int report_latency(struct trace_array *tr, cycle_t delta)
337 if (tracing_thresh) {
338 if (delta < tracing_thresh)
341 if (delta <= tr->max_latency)
348 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
350 if (task != wakeup_task)
353 wakeup_current_cpu = cpu;
357 tracing_sched_switch_trace(struct trace_array *tr,
358 struct task_struct *prev,
359 struct task_struct *next,
360 unsigned long flags, int pc)
362 struct trace_event_call *call = &event_context_switch;
363 struct ring_buffer *buffer = tr->trace_buffer.buffer;
364 struct ring_buffer_event *event;
365 struct ctx_switch_entry *entry;
367 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
368 sizeof(*entry), flags, pc);
371 entry = ring_buffer_event_data(event);
372 entry->prev_pid = prev->pid;
373 entry->prev_prio = prev->prio;
374 entry->prev_state = prev->state;
375 entry->next_pid = next->pid;
376 entry->next_prio = next->prio;
377 entry->next_state = next->state;
378 entry->next_cpu = task_cpu(next);
380 if (!call_filter_check_discard(call, entry, buffer, event))
381 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
385 tracing_sched_wakeup_trace(struct trace_array *tr,
386 struct task_struct *wakee,
387 struct task_struct *curr,
388 unsigned long flags, int pc)
390 struct trace_event_call *call = &event_wakeup;
391 struct ring_buffer_event *event;
392 struct ctx_switch_entry *entry;
393 struct ring_buffer *buffer = tr->trace_buffer.buffer;
395 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
396 sizeof(*entry), flags, pc);
399 entry = ring_buffer_event_data(event);
400 entry->prev_pid = curr->pid;
401 entry->prev_prio = curr->prio;
402 entry->prev_state = curr->state;
403 entry->next_pid = wakee->pid;
404 entry->next_prio = wakee->prio;
405 entry->next_state = wakee->state;
406 entry->next_cpu = task_cpu(wakee);
408 if (!call_filter_check_discard(call, entry, buffer, event))
409 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
413 probe_wakeup_sched_switch(void *ignore,
414 struct task_struct *prev, struct task_struct *next)
416 struct trace_array_cpu *data;
417 cycle_t T0, T1, delta;
423 tracing_record_cmdline(prev);
425 if (unlikely(!tracer_enabled))
429 * When we start a new trace, we set wakeup_task to NULL
430 * and then set tracer_enabled = 1. We want to make sure
431 * that another CPU does not see the tracer_enabled = 1
432 * and the wakeup_task with an older task, that might
433 * actually be the same as next.
437 if (next != wakeup_task)
440 pc = preempt_count();
442 /* disable local data, not wakeup_cpu data */
443 cpu = raw_smp_processor_id();
444 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
445 if (likely(disabled != 1))
448 local_irq_save(flags);
449 arch_spin_lock(&wakeup_lock);
451 /* We could race with grabbing wakeup_lock */
452 if (unlikely(!tracer_enabled || next != wakeup_task))
455 /* The task we are waiting for is waking up */
456 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
458 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
459 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
461 T0 = data->preempt_timestamp;
462 T1 = ftrace_now(cpu);
465 if (!report_latency(wakeup_trace, delta))
468 if (likely(!is_tracing_stopped())) {
469 wakeup_trace->max_latency = delta;
470 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
474 __wakeup_reset(wakeup_trace);
475 arch_spin_unlock(&wakeup_lock);
476 local_irq_restore(flags);
478 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
481 static void __wakeup_reset(struct trace_array *tr)
488 put_task_struct(wakeup_task);
493 static void wakeup_reset(struct trace_array *tr)
497 tracing_reset_online_cpus(&tr->trace_buffer);
499 local_irq_save(flags);
500 arch_spin_lock(&wakeup_lock);
502 arch_spin_unlock(&wakeup_lock);
503 local_irq_restore(flags);
507 probe_wakeup(void *ignore, struct task_struct *p)
509 struct trace_array_cpu *data;
510 int cpu = smp_processor_id();
515 if (likely(!tracer_enabled))
518 tracing_record_cmdline(p);
519 tracing_record_cmdline(current);
522 * Semantic is like this:
523 * - wakeup tracer handles all tasks in the system, independently
524 * from their scheduling class;
525 * - wakeup_rt tracer handles tasks belonging to sched_dl and
527 * - wakeup_dl handles tasks belonging to sched_dl class only.
529 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
530 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
531 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
534 pc = preempt_count();
535 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
536 if (unlikely(disabled != 1))
539 /* interrupts should be off from try_to_wake_up */
540 arch_spin_lock(&wakeup_lock);
542 /* check for races. */
543 if (!tracer_enabled || tracing_dl ||
544 (!dl_task(p) && p->prio >= wakeup_prio))
547 /* reset the trace */
548 __wakeup_reset(wakeup_trace);
550 wakeup_cpu = task_cpu(p);
551 wakeup_current_cpu = wakeup_cpu;
552 wakeup_prio = p->prio;
555 * Once you start tracing a -deadline task, don't bother tracing
556 * another task until the first one wakes up.
564 get_task_struct(wakeup_task);
566 local_save_flags(flags);
568 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
569 data->preempt_timestamp = ftrace_now(cpu);
570 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
573 * We must be careful in using CALLER_ADDR2. But since wake_up
574 * is not called by an assembly function (where as schedule is)
575 * it should be safe to use it here.
577 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
580 arch_spin_unlock(&wakeup_lock);
582 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
585 static void start_wakeup_tracer(struct trace_array *tr)
589 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
591 pr_info("wakeup trace: Couldn't activate tracepoint"
592 " probe to kernel_sched_wakeup\n");
596 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
598 pr_info("wakeup trace: Couldn't activate tracepoint"
599 " probe to kernel_sched_wakeup_new\n");
603 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
605 pr_info("sched trace: Couldn't activate tracepoint"
606 " probe to kernel_sched_switch\n");
607 goto fail_deprobe_wake_new;
610 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
612 pr_info("wakeup trace: Couldn't activate tracepoint"
613 " probe to kernel_sched_migrate_task\n");
620 * Don't let the tracer_enabled = 1 show up before
621 * the wakeup_task is reset. This may be overkill since
622 * wakeup_reset does a spin_unlock after setting the
623 * wakeup_task to NULL, but I want to be safe.
624 * This is a slow path anyway.
628 if (start_func_tracer(tr, is_graph()))
629 printk(KERN_ERR "failed to start wakeup tracer\n");
632 fail_deprobe_wake_new:
633 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
635 unregister_trace_sched_wakeup(probe_wakeup, NULL);
638 static void stop_wakeup_tracer(struct trace_array *tr)
641 stop_func_tracer(tr, is_graph());
642 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
643 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
644 unregister_trace_sched_wakeup(probe_wakeup, NULL);
645 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
648 static bool wakeup_busy;
650 static int __wakeup_tracer_init(struct trace_array *tr)
652 save_flags = trace_flags;
654 /* non overwrite screws up the latency tracers */
655 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
656 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
660 ftrace_init_array_ops(tr, wakeup_tracer_call);
661 start_wakeup_tracer(tr);
667 static int wakeup_tracer_init(struct trace_array *tr)
674 return __wakeup_tracer_init(tr);
677 static int wakeup_rt_tracer_init(struct trace_array *tr)
684 return __wakeup_tracer_init(tr);
687 static int wakeup_dl_tracer_init(struct trace_array *tr)
694 return __wakeup_tracer_init(tr);
697 static void wakeup_tracer_reset(struct trace_array *tr)
699 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
700 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
702 stop_wakeup_tracer(tr);
703 /* make sure we put back any tasks we are tracing */
706 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
707 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
708 ftrace_reset_array_ops(tr);
712 static void wakeup_tracer_start(struct trace_array *tr)
718 static void wakeup_tracer_stop(struct trace_array *tr)
723 static struct tracer wakeup_tracer __read_mostly =
726 .init = wakeup_tracer_init,
727 .reset = wakeup_tracer_reset,
728 .start = wakeup_tracer_start,
729 .stop = wakeup_tracer_stop,
731 .print_header = wakeup_print_header,
732 .print_line = wakeup_print_line,
733 .flag_changed = wakeup_flag_changed,
734 #ifdef CONFIG_FTRACE_SELFTEST
735 .selftest = trace_selftest_startup_wakeup,
737 .open = wakeup_trace_open,
738 .close = wakeup_trace_close,
739 .allow_instances = true,
743 static struct tracer wakeup_rt_tracer __read_mostly =
746 .init = wakeup_rt_tracer_init,
747 .reset = wakeup_tracer_reset,
748 .start = wakeup_tracer_start,
749 .stop = wakeup_tracer_stop,
751 .print_header = wakeup_print_header,
752 .print_line = wakeup_print_line,
753 .flag_changed = wakeup_flag_changed,
754 #ifdef CONFIG_FTRACE_SELFTEST
755 .selftest = trace_selftest_startup_wakeup,
757 .open = wakeup_trace_open,
758 .close = wakeup_trace_close,
759 .allow_instances = true,
763 static struct tracer wakeup_dl_tracer __read_mostly =
766 .init = wakeup_dl_tracer_init,
767 .reset = wakeup_tracer_reset,
768 .start = wakeup_tracer_start,
769 .stop = wakeup_tracer_stop,
771 .print_header = wakeup_print_header,
772 .print_line = wakeup_print_line,
773 .flag_changed = wakeup_flag_changed,
774 #ifdef CONFIG_FTRACE_SELFTEST
775 .selftest = trace_selftest_startup_wakeup,
777 .open = wakeup_trace_open,
778 .close = wakeup_trace_close,
782 __init static int init_wakeup_tracer(void)
786 ret = register_tracer(&wakeup_tracer);
790 ret = register_tracer(&wakeup_rt_tracer);
794 ret = register_tracer(&wakeup_dl_tracer);
800 core_initcall(init_wakeup_tracer);