Merge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[cascardo/linux.git] / kernel / events / core.c
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/tick.h>
22 #include <linux/sysfs.h>
23 #include <linux/dcache.h>
24 #include <linux/percpu.h>
25 #include <linux/ptrace.h>
26 #include <linux/reboot.h>
27 #include <linux/vmstat.h>
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hardirq.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49
50 #include "internal.h"
51
52 #include <asm/irq_regs.h>
53
54 typedef int (*remote_function_f)(void *);
55
56 struct remote_function_call {
57         struct task_struct      *p;
58         remote_function_f       func;
59         void                    *info;
60         int                     ret;
61 };
62
63 static void remote_function(void *data)
64 {
65         struct remote_function_call *tfc = data;
66         struct task_struct *p = tfc->p;
67
68         if (p) {
69                 /* -EAGAIN */
70                 if (task_cpu(p) != smp_processor_id())
71                         return;
72
73                 /*
74                  * Now that we're on right CPU with IRQs disabled, we can test
75                  * if we hit the right task without races.
76                  */
77
78                 tfc->ret = -ESRCH; /* No such (running) process */
79                 if (p != current)
80                         return;
81         }
82
83         tfc->ret = tfc->func(tfc->info);
84 }
85
86 /**
87  * task_function_call - call a function on the cpu on which a task runs
88  * @p:          the task to evaluate
89  * @func:       the function to be called
90  * @info:       the function call argument
91  *
92  * Calls the function @func when the task is currently running. This might
93  * be on the current CPU, which just calls the function directly
94  *
95  * returns: @func return value, or
96  *          -ESRCH  - when the process isn't running
97  *          -EAGAIN - when the process moved away
98  */
99 static int
100 task_function_call(struct task_struct *p, remote_function_f func, void *info)
101 {
102         struct remote_function_call data = {
103                 .p      = p,
104                 .func   = func,
105                 .info   = info,
106                 .ret    = -EAGAIN,
107         };
108         int ret;
109
110         do {
111                 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
112                 if (!ret)
113                         ret = data.ret;
114         } while (ret == -EAGAIN);
115
116         return ret;
117 }
118
119 /**
120  * cpu_function_call - call a function on the cpu
121  * @func:       the function to be called
122  * @info:       the function call argument
123  *
124  * Calls the function @func on the remote cpu.
125  *
126  * returns: @func return value or -ENXIO when the cpu is offline
127  */
128 static int cpu_function_call(int cpu, remote_function_f func, void *info)
129 {
130         struct remote_function_call data = {
131                 .p      = NULL,
132                 .func   = func,
133                 .info   = info,
134                 .ret    = -ENXIO, /* No such CPU */
135         };
136
137         smp_call_function_single(cpu, remote_function, &data, 1);
138
139         return data.ret;
140 }
141
142 static inline struct perf_cpu_context *
143 __get_cpu_context(struct perf_event_context *ctx)
144 {
145         return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
146 }
147
148 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
149                           struct perf_event_context *ctx)
150 {
151         raw_spin_lock(&cpuctx->ctx.lock);
152         if (ctx)
153                 raw_spin_lock(&ctx->lock);
154 }
155
156 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
157                             struct perf_event_context *ctx)
158 {
159         if (ctx)
160                 raw_spin_unlock(&ctx->lock);
161         raw_spin_unlock(&cpuctx->ctx.lock);
162 }
163
164 #define TASK_TOMBSTONE ((void *)-1L)
165
166 static bool is_kernel_event(struct perf_event *event)
167 {
168         return READ_ONCE(event->owner) == TASK_TOMBSTONE;
169 }
170
171 /*
172  * On task ctx scheduling...
173  *
174  * When !ctx->nr_events a task context will not be scheduled. This means
175  * we can disable the scheduler hooks (for performance) without leaving
176  * pending task ctx state.
177  *
178  * This however results in two special cases:
179  *
180  *  - removing the last event from a task ctx; this is relatively straight
181  *    forward and is done in __perf_remove_from_context.
182  *
183  *  - adding the first event to a task ctx; this is tricky because we cannot
184  *    rely on ctx->is_active and therefore cannot use event_function_call().
185  *    See perf_install_in_context().
186  *
187  * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
188  */
189
190 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
191                         struct perf_event_context *, void *);
192
193 struct event_function_struct {
194         struct perf_event *event;
195         event_f func;
196         void *data;
197 };
198
199 static int event_function(void *info)
200 {
201         struct event_function_struct *efs = info;
202         struct perf_event *event = efs->event;
203         struct perf_event_context *ctx = event->ctx;
204         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
205         struct perf_event_context *task_ctx = cpuctx->task_ctx;
206         int ret = 0;
207
208         WARN_ON_ONCE(!irqs_disabled());
209
210         perf_ctx_lock(cpuctx, task_ctx);
211         /*
212          * Since we do the IPI call without holding ctx->lock things can have
213          * changed, double check we hit the task we set out to hit.
214          */
215         if (ctx->task) {
216                 if (ctx->task != current) {
217                         ret = -ESRCH;
218                         goto unlock;
219                 }
220
221                 /*
222                  * We only use event_function_call() on established contexts,
223                  * and event_function() is only ever called when active (or
224                  * rather, we'll have bailed in task_function_call() or the
225                  * above ctx->task != current test), therefore we must have
226                  * ctx->is_active here.
227                  */
228                 WARN_ON_ONCE(!ctx->is_active);
229                 /*
230                  * And since we have ctx->is_active, cpuctx->task_ctx must
231                  * match.
232                  */
233                 WARN_ON_ONCE(task_ctx != ctx);
234         } else {
235                 WARN_ON_ONCE(&cpuctx->ctx != ctx);
236         }
237
238         efs->func(event, cpuctx, ctx, efs->data);
239 unlock:
240         perf_ctx_unlock(cpuctx, task_ctx);
241
242         return ret;
243 }
244
245 static void event_function_local(struct perf_event *event, event_f func, void *data)
246 {
247         struct event_function_struct efs = {
248                 .event = event,
249                 .func = func,
250                 .data = data,
251         };
252
253         int ret = event_function(&efs);
254         WARN_ON_ONCE(ret);
255 }
256
257 static void event_function_call(struct perf_event *event, event_f func, void *data)
258 {
259         struct perf_event_context *ctx = event->ctx;
260         struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
261         struct event_function_struct efs = {
262                 .event = event,
263                 .func = func,
264                 .data = data,
265         };
266
267         if (!event->parent) {
268                 /*
269                  * If this is a !child event, we must hold ctx::mutex to
270                  * stabilize the the event->ctx relation. See
271                  * perf_event_ctx_lock().
272                  */
273                 lockdep_assert_held(&ctx->mutex);
274         }
275
276         if (!task) {
277                 cpu_function_call(event->cpu, event_function, &efs);
278                 return;
279         }
280
281         if (task == TASK_TOMBSTONE)
282                 return;
283
284 again:
285         if (!task_function_call(task, event_function, &efs))
286                 return;
287
288         raw_spin_lock_irq(&ctx->lock);
289         /*
290          * Reload the task pointer, it might have been changed by
291          * a concurrent perf_event_context_sched_out().
292          */
293         task = ctx->task;
294         if (task == TASK_TOMBSTONE) {
295                 raw_spin_unlock_irq(&ctx->lock);
296                 return;
297         }
298         if (ctx->is_active) {
299                 raw_spin_unlock_irq(&ctx->lock);
300                 goto again;
301         }
302         func(event, NULL, ctx, data);
303         raw_spin_unlock_irq(&ctx->lock);
304 }
305
306 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
307                        PERF_FLAG_FD_OUTPUT  |\
308                        PERF_FLAG_PID_CGROUP |\
309                        PERF_FLAG_FD_CLOEXEC)
310
311 /*
312  * branch priv levels that need permission checks
313  */
314 #define PERF_SAMPLE_BRANCH_PERM_PLM \
315         (PERF_SAMPLE_BRANCH_KERNEL |\
316          PERF_SAMPLE_BRANCH_HV)
317
318 enum event_type_t {
319         EVENT_FLEXIBLE = 0x1,
320         EVENT_PINNED = 0x2,
321         EVENT_TIME = 0x4,
322         EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
323 };
324
325 /*
326  * perf_sched_events : >0 events exist
327  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
328  */
329
330 static void perf_sched_delayed(struct work_struct *work);
331 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
332 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
333 static DEFINE_MUTEX(perf_sched_mutex);
334 static atomic_t perf_sched_count;
335
336 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
337 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
338 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
339
340 static atomic_t nr_mmap_events __read_mostly;
341 static atomic_t nr_comm_events __read_mostly;
342 static atomic_t nr_task_events __read_mostly;
343 static atomic_t nr_freq_events __read_mostly;
344 static atomic_t nr_switch_events __read_mostly;
345
346 static LIST_HEAD(pmus);
347 static DEFINE_MUTEX(pmus_lock);
348 static struct srcu_struct pmus_srcu;
349
350 /*
351  * perf event paranoia level:
352  *  -1 - not paranoid at all
353  *   0 - disallow raw tracepoint access for unpriv
354  *   1 - disallow cpu events for unpriv
355  *   2 - disallow kernel profiling for unpriv
356  */
357 int sysctl_perf_event_paranoid __read_mostly = 2;
358
359 /* Minimum for 512 kiB + 1 user control page */
360 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
361
362 /*
363  * max perf event sample rate
364  */
365 #define DEFAULT_MAX_SAMPLE_RATE         100000
366 #define DEFAULT_SAMPLE_PERIOD_NS        (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
367 #define DEFAULT_CPU_TIME_MAX_PERCENT    25
368
369 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
370
371 static int max_samples_per_tick __read_mostly   = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
372 static int perf_sample_period_ns __read_mostly  = DEFAULT_SAMPLE_PERIOD_NS;
373
374 static int perf_sample_allowed_ns __read_mostly =
375         DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
376
377 static void update_perf_cpu_limits(void)
378 {
379         u64 tmp = perf_sample_period_ns;
380
381         tmp *= sysctl_perf_cpu_time_max_percent;
382         tmp = div_u64(tmp, 100);
383         if (!tmp)
384                 tmp = 1;
385
386         WRITE_ONCE(perf_sample_allowed_ns, tmp);
387 }
388
389 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
390
391 int perf_proc_update_handler(struct ctl_table *table, int write,
392                 void __user *buffer, size_t *lenp,
393                 loff_t *ppos)
394 {
395         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
396
397         if (ret || !write)
398                 return ret;
399
400         /*
401          * If throttling is disabled don't allow the write:
402          */
403         if (sysctl_perf_cpu_time_max_percent == 100 ||
404             sysctl_perf_cpu_time_max_percent == 0)
405                 return -EINVAL;
406
407         max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
408         perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
409         update_perf_cpu_limits();
410
411         return 0;
412 }
413
414 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
415
416 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
417                                 void __user *buffer, size_t *lenp,
418                                 loff_t *ppos)
419 {
420         int ret = proc_dointvec(table, write, buffer, lenp, ppos);
421
422         if (ret || !write)
423                 return ret;
424
425         if (sysctl_perf_cpu_time_max_percent == 100 ||
426             sysctl_perf_cpu_time_max_percent == 0) {
427                 printk(KERN_WARNING
428                        "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
429                 WRITE_ONCE(perf_sample_allowed_ns, 0);
430         } else {
431                 update_perf_cpu_limits();
432         }
433
434         return 0;
435 }
436
437 /*
438  * perf samples are done in some very critical code paths (NMIs).
439  * If they take too much CPU time, the system can lock up and not
440  * get any real work done.  This will drop the sample rate when
441  * we detect that events are taking too long.
442  */
443 #define NR_ACCUMULATED_SAMPLES 128
444 static DEFINE_PER_CPU(u64, running_sample_length);
445
446 static u64 __report_avg;
447 static u64 __report_allowed;
448
449 static void perf_duration_warn(struct irq_work *w)
450 {
451         printk_ratelimited(KERN_WARNING
452                 "perf: interrupt took too long (%lld > %lld), lowering "
453                 "kernel.perf_event_max_sample_rate to %d\n",
454                 __report_avg, __report_allowed,
455                 sysctl_perf_event_sample_rate);
456 }
457
458 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
459
460 void perf_sample_event_took(u64 sample_len_ns)
461 {
462         u64 max_len = READ_ONCE(perf_sample_allowed_ns);
463         u64 running_len;
464         u64 avg_len;
465         u32 max;
466
467         if (max_len == 0)
468                 return;
469
470         /* Decay the counter by 1 average sample. */
471         running_len = __this_cpu_read(running_sample_length);
472         running_len -= running_len/NR_ACCUMULATED_SAMPLES;
473         running_len += sample_len_ns;
474         __this_cpu_write(running_sample_length, running_len);
475
476         /*
477          * Note: this will be biased artifically low until we have
478          * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
479          * from having to maintain a count.
480          */
481         avg_len = running_len/NR_ACCUMULATED_SAMPLES;
482         if (avg_len <= max_len)
483                 return;
484
485         __report_avg = avg_len;
486         __report_allowed = max_len;
487
488         /*
489          * Compute a throttle threshold 25% below the current duration.
490          */
491         avg_len += avg_len / 4;
492         max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
493         if (avg_len < max)
494                 max /= (u32)avg_len;
495         else
496                 max = 1;
497
498         WRITE_ONCE(perf_sample_allowed_ns, avg_len);
499         WRITE_ONCE(max_samples_per_tick, max);
500
501         sysctl_perf_event_sample_rate = max * HZ;
502         perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
503
504         if (!irq_work_queue(&perf_duration_work)) {
505                 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
506                              "kernel.perf_event_max_sample_rate to %d\n",
507                              __report_avg, __report_allowed,
508                              sysctl_perf_event_sample_rate);
509         }
510 }
511
512 static atomic64_t perf_event_id;
513
514 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
515                               enum event_type_t event_type);
516
517 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
518                              enum event_type_t event_type,
519                              struct task_struct *task);
520
521 static void update_context_time(struct perf_event_context *ctx);
522 static u64 perf_event_time(struct perf_event *event);
523
524 void __weak perf_event_print_debug(void)        { }
525
526 extern __weak const char *perf_pmu_name(void)
527 {
528         return "pmu";
529 }
530
531 static inline u64 perf_clock(void)
532 {
533         return local_clock();
534 }
535
536 static inline u64 perf_event_clock(struct perf_event *event)
537 {
538         return event->clock();
539 }
540
541 #ifdef CONFIG_CGROUP_PERF
542
543 static inline bool
544 perf_cgroup_match(struct perf_event *event)
545 {
546         struct perf_event_context *ctx = event->ctx;
547         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
548
549         /* @event doesn't care about cgroup */
550         if (!event->cgrp)
551                 return true;
552
553         /* wants specific cgroup scope but @cpuctx isn't associated with any */
554         if (!cpuctx->cgrp)
555                 return false;
556
557         /*
558          * Cgroup scoping is recursive.  An event enabled for a cgroup is
559          * also enabled for all its descendant cgroups.  If @cpuctx's
560          * cgroup is a descendant of @event's (the test covers identity
561          * case), it's a match.
562          */
563         return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
564                                     event->cgrp->css.cgroup);
565 }
566
567 static inline void perf_detach_cgroup(struct perf_event *event)
568 {
569         css_put(&event->cgrp->css);
570         event->cgrp = NULL;
571 }
572
573 static inline int is_cgroup_event(struct perf_event *event)
574 {
575         return event->cgrp != NULL;
576 }
577
578 static inline u64 perf_cgroup_event_time(struct perf_event *event)
579 {
580         struct perf_cgroup_info *t;
581
582         t = per_cpu_ptr(event->cgrp->info, event->cpu);
583         return t->time;
584 }
585
586 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
587 {
588         struct perf_cgroup_info *info;
589         u64 now;
590
591         now = perf_clock();
592
593         info = this_cpu_ptr(cgrp->info);
594
595         info->time += now - info->timestamp;
596         info->timestamp = now;
597 }
598
599 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
600 {
601         struct perf_cgroup *cgrp_out = cpuctx->cgrp;
602         if (cgrp_out)
603                 __update_cgrp_time(cgrp_out);
604 }
605
606 static inline void update_cgrp_time_from_event(struct perf_event *event)
607 {
608         struct perf_cgroup *cgrp;
609
610         /*
611          * ensure we access cgroup data only when needed and
612          * when we know the cgroup is pinned (css_get)
613          */
614         if (!is_cgroup_event(event))
615                 return;
616
617         cgrp = perf_cgroup_from_task(current, event->ctx);
618         /*
619          * Do not update time when cgroup is not active
620          */
621         if (cgrp == event->cgrp)
622                 __update_cgrp_time(event->cgrp);
623 }
624
625 static inline void
626 perf_cgroup_set_timestamp(struct task_struct *task,
627                           struct perf_event_context *ctx)
628 {
629         struct perf_cgroup *cgrp;
630         struct perf_cgroup_info *info;
631
632         /*
633          * ctx->lock held by caller
634          * ensure we do not access cgroup data
635          * unless we have the cgroup pinned (css_get)
636          */
637         if (!task || !ctx->nr_cgroups)
638                 return;
639
640         cgrp = perf_cgroup_from_task(task, ctx);
641         info = this_cpu_ptr(cgrp->info);
642         info->timestamp = ctx->timestamp;
643 }
644
645 #define PERF_CGROUP_SWOUT       0x1 /* cgroup switch out every event */
646 #define PERF_CGROUP_SWIN        0x2 /* cgroup switch in events based on task */
647
648 /*
649  * reschedule events based on the cgroup constraint of task.
650  *
651  * mode SWOUT : schedule out everything
652  * mode SWIN : schedule in based on cgroup for next
653  */
654 static void perf_cgroup_switch(struct task_struct *task, int mode)
655 {
656         struct perf_cpu_context *cpuctx;
657         struct pmu *pmu;
658         unsigned long flags;
659
660         /*
661          * disable interrupts to avoid geting nr_cgroup
662          * changes via __perf_event_disable(). Also
663          * avoids preemption.
664          */
665         local_irq_save(flags);
666
667         /*
668          * we reschedule only in the presence of cgroup
669          * constrained events.
670          */
671
672         list_for_each_entry_rcu(pmu, &pmus, entry) {
673                 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
674                 if (cpuctx->unique_pmu != pmu)
675                         continue; /* ensure we process each cpuctx once */
676
677                 /*
678                  * perf_cgroup_events says at least one
679                  * context on this CPU has cgroup events.
680                  *
681                  * ctx->nr_cgroups reports the number of cgroup
682                  * events for a context.
683                  */
684                 if (cpuctx->ctx.nr_cgroups > 0) {
685                         perf_ctx_lock(cpuctx, cpuctx->task_ctx);
686                         perf_pmu_disable(cpuctx->ctx.pmu);
687
688                         if (mode & PERF_CGROUP_SWOUT) {
689                                 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
690                                 /*
691                                  * must not be done before ctxswout due
692                                  * to event_filter_match() in event_sched_out()
693                                  */
694                                 cpuctx->cgrp = NULL;
695                         }
696
697                         if (mode & PERF_CGROUP_SWIN) {
698                                 WARN_ON_ONCE(cpuctx->cgrp);
699                                 /*
700                                  * set cgrp before ctxsw in to allow
701                                  * event_filter_match() to not have to pass
702                                  * task around
703                                  * we pass the cpuctx->ctx to perf_cgroup_from_task()
704                                  * because cgorup events are only per-cpu
705                                  */
706                                 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
707                                 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
708                         }
709                         perf_pmu_enable(cpuctx->ctx.pmu);
710                         perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
711                 }
712         }
713
714         local_irq_restore(flags);
715 }
716
717 static inline void perf_cgroup_sched_out(struct task_struct *task,
718                                          struct task_struct *next)
719 {
720         struct perf_cgroup *cgrp1;
721         struct perf_cgroup *cgrp2 = NULL;
722
723         rcu_read_lock();
724         /*
725          * we come here when we know perf_cgroup_events > 0
726          * we do not need to pass the ctx here because we know
727          * we are holding the rcu lock
728          */
729         cgrp1 = perf_cgroup_from_task(task, NULL);
730         cgrp2 = perf_cgroup_from_task(next, NULL);
731
732         /*
733          * only schedule out current cgroup events if we know
734          * that we are switching to a different cgroup. Otherwise,
735          * do no touch the cgroup events.
736          */
737         if (cgrp1 != cgrp2)
738                 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
739
740         rcu_read_unlock();
741 }
742
743 static inline void perf_cgroup_sched_in(struct task_struct *prev,
744                                         struct task_struct *task)
745 {
746         struct perf_cgroup *cgrp1;
747         struct perf_cgroup *cgrp2 = NULL;
748
749         rcu_read_lock();
750         /*
751          * we come here when we know perf_cgroup_events > 0
752          * we do not need to pass the ctx here because we know
753          * we are holding the rcu lock
754          */
755         cgrp1 = perf_cgroup_from_task(task, NULL);
756         cgrp2 = perf_cgroup_from_task(prev, NULL);
757
758         /*
759          * only need to schedule in cgroup events if we are changing
760          * cgroup during ctxsw. Cgroup events were not scheduled
761          * out of ctxsw out if that was not the case.
762          */
763         if (cgrp1 != cgrp2)
764                 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
765
766         rcu_read_unlock();
767 }
768
769 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
770                                       struct perf_event_attr *attr,
771                                       struct perf_event *group_leader)
772 {
773         struct perf_cgroup *cgrp;
774         struct cgroup_subsys_state *css;
775         struct fd f = fdget(fd);
776         int ret = 0;
777
778         if (!f.file)
779                 return -EBADF;
780
781         css = css_tryget_online_from_dir(f.file->f_path.dentry,
782                                          &perf_event_cgrp_subsys);
783         if (IS_ERR(css)) {
784                 ret = PTR_ERR(css);
785                 goto out;
786         }
787
788         cgrp = container_of(css, struct perf_cgroup, css);
789         event->cgrp = cgrp;
790
791         /*
792          * all events in a group must monitor
793          * the same cgroup because a task belongs
794          * to only one perf cgroup at a time
795          */
796         if (group_leader && group_leader->cgrp != cgrp) {
797                 perf_detach_cgroup(event);
798                 ret = -EINVAL;
799         }
800 out:
801         fdput(f);
802         return ret;
803 }
804
805 static inline void
806 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
807 {
808         struct perf_cgroup_info *t;
809         t = per_cpu_ptr(event->cgrp->info, event->cpu);
810         event->shadow_ctx_time = now - t->timestamp;
811 }
812
813 static inline void
814 perf_cgroup_defer_enabled(struct perf_event *event)
815 {
816         /*
817          * when the current task's perf cgroup does not match
818          * the event's, we need to remember to call the
819          * perf_mark_enable() function the first time a task with
820          * a matching perf cgroup is scheduled in.
821          */
822         if (is_cgroup_event(event) && !perf_cgroup_match(event))
823                 event->cgrp_defer_enabled = 1;
824 }
825
826 static inline void
827 perf_cgroup_mark_enabled(struct perf_event *event,
828                          struct perf_event_context *ctx)
829 {
830         struct perf_event *sub;
831         u64 tstamp = perf_event_time(event);
832
833         if (!event->cgrp_defer_enabled)
834                 return;
835
836         event->cgrp_defer_enabled = 0;
837
838         event->tstamp_enabled = tstamp - event->total_time_enabled;
839         list_for_each_entry(sub, &event->sibling_list, group_entry) {
840                 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
841                         sub->tstamp_enabled = tstamp - sub->total_time_enabled;
842                         sub->cgrp_defer_enabled = 0;
843                 }
844         }
845 }
846 #else /* !CONFIG_CGROUP_PERF */
847
848 static inline bool
849 perf_cgroup_match(struct perf_event *event)
850 {
851         return true;
852 }
853
854 static inline void perf_detach_cgroup(struct perf_event *event)
855 {}
856
857 static inline int is_cgroup_event(struct perf_event *event)
858 {
859         return 0;
860 }
861
862 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
863 {
864         return 0;
865 }
866
867 static inline void update_cgrp_time_from_event(struct perf_event *event)
868 {
869 }
870
871 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
872 {
873 }
874
875 static inline void perf_cgroup_sched_out(struct task_struct *task,
876                                          struct task_struct *next)
877 {
878 }
879
880 static inline void perf_cgroup_sched_in(struct task_struct *prev,
881                                         struct task_struct *task)
882 {
883 }
884
885 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
886                                       struct perf_event_attr *attr,
887                                       struct perf_event *group_leader)
888 {
889         return -EINVAL;
890 }
891
892 static inline void
893 perf_cgroup_set_timestamp(struct task_struct *task,
894                           struct perf_event_context *ctx)
895 {
896 }
897
898 void
899 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
900 {
901 }
902
903 static inline void
904 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
905 {
906 }
907
908 static inline u64 perf_cgroup_event_time(struct perf_event *event)
909 {
910         return 0;
911 }
912
913 static inline void
914 perf_cgroup_defer_enabled(struct perf_event *event)
915 {
916 }
917
918 static inline void
919 perf_cgroup_mark_enabled(struct perf_event *event,
920                          struct perf_event_context *ctx)
921 {
922 }
923 #endif
924
925 /*
926  * set default to be dependent on timer tick just
927  * like original code
928  */
929 #define PERF_CPU_HRTIMER (1000 / HZ)
930 /*
931  * function must be called with interrupts disbled
932  */
933 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
934 {
935         struct perf_cpu_context *cpuctx;
936         int rotations = 0;
937
938         WARN_ON(!irqs_disabled());
939
940         cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
941         rotations = perf_rotate_context(cpuctx);
942
943         raw_spin_lock(&cpuctx->hrtimer_lock);
944         if (rotations)
945                 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
946         else
947                 cpuctx->hrtimer_active = 0;
948         raw_spin_unlock(&cpuctx->hrtimer_lock);
949
950         return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
951 }
952
953 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
954 {
955         struct hrtimer *timer = &cpuctx->hrtimer;
956         struct pmu *pmu = cpuctx->ctx.pmu;
957         u64 interval;
958
959         /* no multiplexing needed for SW PMU */
960         if (pmu->task_ctx_nr == perf_sw_context)
961                 return;
962
963         /*
964          * check default is sane, if not set then force to
965          * default interval (1/tick)
966          */
967         interval = pmu->hrtimer_interval_ms;
968         if (interval < 1)
969                 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
970
971         cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
972
973         raw_spin_lock_init(&cpuctx->hrtimer_lock);
974         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
975         timer->function = perf_mux_hrtimer_handler;
976 }
977
978 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
979 {
980         struct hrtimer *timer = &cpuctx->hrtimer;
981         struct pmu *pmu = cpuctx->ctx.pmu;
982         unsigned long flags;
983
984         /* not for SW PMU */
985         if (pmu->task_ctx_nr == perf_sw_context)
986                 return 0;
987
988         raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
989         if (!cpuctx->hrtimer_active) {
990                 cpuctx->hrtimer_active = 1;
991                 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
992                 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
993         }
994         raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
995
996         return 0;
997 }
998
999 void perf_pmu_disable(struct pmu *pmu)
1000 {
1001         int *count = this_cpu_ptr(pmu->pmu_disable_count);
1002         if (!(*count)++)
1003                 pmu->pmu_disable(pmu);
1004 }
1005
1006 void perf_pmu_enable(struct pmu *pmu)
1007 {
1008         int *count = this_cpu_ptr(pmu->pmu_disable_count);
1009         if (!--(*count))
1010                 pmu->pmu_enable(pmu);
1011 }
1012
1013 static DEFINE_PER_CPU(struct list_head, active_ctx_list);
1014
1015 /*
1016  * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1017  * perf_event_task_tick() are fully serialized because they're strictly cpu
1018  * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1019  * disabled, while perf_event_task_tick is called from IRQ context.
1020  */
1021 static void perf_event_ctx_activate(struct perf_event_context *ctx)
1022 {
1023         struct list_head *head = this_cpu_ptr(&active_ctx_list);
1024
1025         WARN_ON(!irqs_disabled());
1026
1027         WARN_ON(!list_empty(&ctx->active_ctx_list));
1028
1029         list_add(&ctx->active_ctx_list, head);
1030 }
1031
1032 static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1033 {
1034         WARN_ON(!irqs_disabled());
1035
1036         WARN_ON(list_empty(&ctx->active_ctx_list));
1037
1038         list_del_init(&ctx->active_ctx_list);
1039 }
1040
1041 static void get_ctx(struct perf_event_context *ctx)
1042 {
1043         WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
1044 }
1045
1046 static void free_ctx(struct rcu_head *head)
1047 {
1048         struct perf_event_context *ctx;
1049
1050         ctx = container_of(head, struct perf_event_context, rcu_head);
1051         kfree(ctx->task_ctx_data);
1052         kfree(ctx);
1053 }
1054
1055 static void put_ctx(struct perf_event_context *ctx)
1056 {
1057         if (atomic_dec_and_test(&ctx->refcount)) {
1058                 if (ctx->parent_ctx)
1059                         put_ctx(ctx->parent_ctx);
1060                 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1061                         put_task_struct(ctx->task);
1062                 call_rcu(&ctx->rcu_head, free_ctx);
1063         }
1064 }
1065
1066 /*
1067  * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1068  * perf_pmu_migrate_context() we need some magic.
1069  *
1070  * Those places that change perf_event::ctx will hold both
1071  * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1072  *
1073  * Lock ordering is by mutex address. There are two other sites where
1074  * perf_event_context::mutex nests and those are:
1075  *
1076  *  - perf_event_exit_task_context()    [ child , 0 ]
1077  *      perf_event_exit_event()
1078  *        put_event()                   [ parent, 1 ]
1079  *
1080  *  - perf_event_init_context()         [ parent, 0 ]
1081  *      inherit_task_group()
1082  *        inherit_group()
1083  *          inherit_event()
1084  *            perf_event_alloc()
1085  *              perf_init_event()
1086  *                perf_try_init_event() [ child , 1 ]
1087  *
1088  * While it appears there is an obvious deadlock here -- the parent and child
1089  * nesting levels are inverted between the two. This is in fact safe because
1090  * life-time rules separate them. That is an exiting task cannot fork, and a
1091  * spawning task cannot (yet) exit.
1092  *
1093  * But remember that that these are parent<->child context relations, and
1094  * migration does not affect children, therefore these two orderings should not
1095  * interact.
1096  *
1097  * The change in perf_event::ctx does not affect children (as claimed above)
1098  * because the sys_perf_event_open() case will install a new event and break
1099  * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1100  * concerned with cpuctx and that doesn't have children.
1101  *
1102  * The places that change perf_event::ctx will issue:
1103  *
1104  *   perf_remove_from_context();
1105  *   synchronize_rcu();
1106  *   perf_install_in_context();
1107  *
1108  * to affect the change. The remove_from_context() + synchronize_rcu() should
1109  * quiesce the event, after which we can install it in the new location. This
1110  * means that only external vectors (perf_fops, prctl) can perturb the event
1111  * while in transit. Therefore all such accessors should also acquire
1112  * perf_event_context::mutex to serialize against this.
1113  *
1114  * However; because event->ctx can change while we're waiting to acquire
1115  * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1116  * function.
1117  *
1118  * Lock order:
1119  *    cred_guard_mutex
1120  *      task_struct::perf_event_mutex
1121  *        perf_event_context::mutex
1122  *          perf_event::child_mutex;
1123  *            perf_event_context::lock
1124  *          perf_event::mmap_mutex
1125  *          mmap_sem
1126  */
1127 static struct perf_event_context *
1128 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1129 {
1130         struct perf_event_context *ctx;
1131
1132 again:
1133         rcu_read_lock();
1134         ctx = ACCESS_ONCE(event->ctx);
1135         if (!atomic_inc_not_zero(&ctx->refcount)) {
1136                 rcu_read_unlock();
1137                 goto again;
1138         }
1139         rcu_read_unlock();
1140
1141         mutex_lock_nested(&ctx->mutex, nesting);
1142         if (event->ctx != ctx) {
1143                 mutex_unlock(&ctx->mutex);
1144                 put_ctx(ctx);
1145                 goto again;
1146         }
1147
1148         return ctx;
1149 }
1150
1151 static inline struct perf_event_context *
1152 perf_event_ctx_lock(struct perf_event *event)
1153 {
1154         return perf_event_ctx_lock_nested(event, 0);
1155 }
1156
1157 static void perf_event_ctx_unlock(struct perf_event *event,
1158                                   struct perf_event_context *ctx)
1159 {
1160         mutex_unlock(&ctx->mutex);
1161         put_ctx(ctx);
1162 }
1163
1164 /*
1165  * This must be done under the ctx->lock, such as to serialize against
1166  * context_equiv(), therefore we cannot call put_ctx() since that might end up
1167  * calling scheduler related locks and ctx->lock nests inside those.
1168  */
1169 static __must_check struct perf_event_context *
1170 unclone_ctx(struct perf_event_context *ctx)
1171 {
1172         struct perf_event_context *parent_ctx = ctx->parent_ctx;
1173
1174         lockdep_assert_held(&ctx->lock);
1175
1176         if (parent_ctx)
1177                 ctx->parent_ctx = NULL;
1178         ctx->generation++;
1179
1180         return parent_ctx;
1181 }
1182
1183 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1184 {
1185         /*
1186          * only top level events have the pid namespace they were created in
1187          */
1188         if (event->parent)
1189                 event = event->parent;
1190
1191         return task_tgid_nr_ns(p, event->ns);
1192 }
1193
1194 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1195 {
1196         /*
1197          * only top level events have the pid namespace they were created in
1198          */
1199         if (event->parent)
1200                 event = event->parent;
1201
1202         return task_pid_nr_ns(p, event->ns);
1203 }
1204
1205 /*
1206  * If we inherit events we want to return the parent event id
1207  * to userspace.
1208  */
1209 static u64 primary_event_id(struct perf_event *event)
1210 {
1211         u64 id = event->id;
1212
1213         if (event->parent)
1214                 id = event->parent->id;
1215
1216         return id;
1217 }
1218
1219 /*
1220  * Get the perf_event_context for a task and lock it.
1221  *
1222  * This has to cope with with the fact that until it is locked,
1223  * the context could get moved to another task.
1224  */
1225 static struct perf_event_context *
1226 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1227 {
1228         struct perf_event_context *ctx;
1229
1230 retry:
1231         /*
1232          * One of the few rules of preemptible RCU is that one cannot do
1233          * rcu_read_unlock() while holding a scheduler (or nested) lock when
1234          * part of the read side critical section was irqs-enabled -- see
1235          * rcu_read_unlock_special().
1236          *
1237          * Since ctx->lock nests under rq->lock we must ensure the entire read
1238          * side critical section has interrupts disabled.
1239          */
1240         local_irq_save(*flags);
1241         rcu_read_lock();
1242         ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1243         if (ctx) {
1244                 /*
1245                  * If this context is a clone of another, it might
1246                  * get swapped for another underneath us by
1247                  * perf_event_task_sched_out, though the
1248                  * rcu_read_lock() protects us from any context
1249                  * getting freed.  Lock the context and check if it
1250                  * got swapped before we could get the lock, and retry
1251                  * if so.  If we locked the right context, then it
1252                  * can't get swapped on us any more.
1253                  */
1254                 raw_spin_lock(&ctx->lock);
1255                 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1256                         raw_spin_unlock(&ctx->lock);
1257                         rcu_read_unlock();
1258                         local_irq_restore(*flags);
1259                         goto retry;
1260                 }
1261
1262                 if (ctx->task == TASK_TOMBSTONE ||
1263                     !atomic_inc_not_zero(&ctx->refcount)) {
1264                         raw_spin_unlock(&ctx->lock);
1265                         ctx = NULL;
1266                 } else {
1267                         WARN_ON_ONCE(ctx->task != task);
1268                 }
1269         }
1270         rcu_read_unlock();
1271         if (!ctx)
1272                 local_irq_restore(*flags);
1273         return ctx;
1274 }
1275
1276 /*
1277  * Get the context for a task and increment its pin_count so it
1278  * can't get swapped to another task.  This also increments its
1279  * reference count so that the context can't get freed.
1280  */
1281 static struct perf_event_context *
1282 perf_pin_task_context(struct task_struct *task, int ctxn)
1283 {
1284         struct perf_event_context *ctx;
1285         unsigned long flags;
1286
1287         ctx = perf_lock_task_context(task, ctxn, &flags);
1288         if (ctx) {
1289                 ++ctx->pin_count;
1290                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1291         }
1292         return ctx;
1293 }
1294
1295 static void perf_unpin_context(struct perf_event_context *ctx)
1296 {
1297         unsigned long flags;
1298
1299         raw_spin_lock_irqsave(&ctx->lock, flags);
1300         --ctx->pin_count;
1301         raw_spin_unlock_irqrestore(&ctx->lock, flags);
1302 }
1303
1304 /*
1305  * Update the record of the current time in a context.
1306  */
1307 static void update_context_time(struct perf_event_context *ctx)
1308 {
1309         u64 now = perf_clock();
1310
1311         ctx->time += now - ctx->timestamp;
1312         ctx->timestamp = now;
1313 }
1314
1315 static u64 perf_event_time(struct perf_event *event)
1316 {
1317         struct perf_event_context *ctx = event->ctx;
1318
1319         if (is_cgroup_event(event))
1320                 return perf_cgroup_event_time(event);
1321
1322         return ctx ? ctx->time : 0;
1323 }
1324
1325 /*
1326  * Update the total_time_enabled and total_time_running fields for a event.
1327  */
1328 static void update_event_times(struct perf_event *event)
1329 {
1330         struct perf_event_context *ctx = event->ctx;
1331         u64 run_end;
1332
1333         lockdep_assert_held(&ctx->lock);
1334
1335         if (event->state < PERF_EVENT_STATE_INACTIVE ||
1336             event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1337                 return;
1338
1339         /*
1340          * in cgroup mode, time_enabled represents
1341          * the time the event was enabled AND active
1342          * tasks were in the monitored cgroup. This is
1343          * independent of the activity of the context as
1344          * there may be a mix of cgroup and non-cgroup events.
1345          *
1346          * That is why we treat cgroup events differently
1347          * here.
1348          */
1349         if (is_cgroup_event(event))
1350                 run_end = perf_cgroup_event_time(event);
1351         else if (ctx->is_active)
1352                 run_end = ctx->time;
1353         else
1354                 run_end = event->tstamp_stopped;
1355
1356         event->total_time_enabled = run_end - event->tstamp_enabled;
1357
1358         if (event->state == PERF_EVENT_STATE_INACTIVE)
1359                 run_end = event->tstamp_stopped;
1360         else
1361                 run_end = perf_event_time(event);
1362
1363         event->total_time_running = run_end - event->tstamp_running;
1364
1365 }
1366
1367 /*
1368  * Update total_time_enabled and total_time_running for all events in a group.
1369  */
1370 static void update_group_times(struct perf_event *leader)
1371 {
1372         struct perf_event *event;
1373
1374         update_event_times(leader);
1375         list_for_each_entry(event, &leader->sibling_list, group_entry)
1376                 update_event_times(event);
1377 }
1378
1379 static struct list_head *
1380 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1381 {
1382         if (event->attr.pinned)
1383                 return &ctx->pinned_groups;
1384         else
1385                 return &ctx->flexible_groups;
1386 }
1387
1388 /*
1389  * Add a event from the lists for its context.
1390  * Must be called with ctx->mutex and ctx->lock held.
1391  */
1392 static void
1393 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1394 {
1395         lockdep_assert_held(&ctx->lock);
1396
1397         WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1398         event->attach_state |= PERF_ATTACH_CONTEXT;
1399
1400         /*
1401          * If we're a stand alone event or group leader, we go to the context
1402          * list, group events are kept attached to the group so that
1403          * perf_group_detach can, at all times, locate all siblings.
1404          */
1405         if (event->group_leader == event) {
1406                 struct list_head *list;
1407
1408                 if (is_software_event(event))
1409                         event->group_flags |= PERF_GROUP_SOFTWARE;
1410
1411                 list = ctx_group_list(event, ctx);
1412                 list_add_tail(&event->group_entry, list);
1413         }
1414
1415         if (is_cgroup_event(event))
1416                 ctx->nr_cgroups++;
1417
1418         list_add_rcu(&event->event_entry, &ctx->event_list);
1419         ctx->nr_events++;
1420         if (event->attr.inherit_stat)
1421                 ctx->nr_stat++;
1422
1423         ctx->generation++;
1424 }
1425
1426 /*
1427  * Initialize event state based on the perf_event_attr::disabled.
1428  */
1429 static inline void perf_event__state_init(struct perf_event *event)
1430 {
1431         event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1432                                               PERF_EVENT_STATE_INACTIVE;
1433 }
1434
1435 static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
1436 {
1437         int entry = sizeof(u64); /* value */
1438         int size = 0;
1439         int nr = 1;
1440
1441         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1442                 size += sizeof(u64);
1443
1444         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1445                 size += sizeof(u64);
1446
1447         if (event->attr.read_format & PERF_FORMAT_ID)
1448                 entry += sizeof(u64);
1449
1450         if (event->attr.read_format & PERF_FORMAT_GROUP) {
1451                 nr += nr_siblings;
1452                 size += sizeof(u64);
1453         }
1454
1455         size += entry * nr;
1456         event->read_size = size;
1457 }
1458
1459 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1460 {
1461         struct perf_sample_data *data;
1462         u16 size = 0;
1463
1464         if (sample_type & PERF_SAMPLE_IP)
1465                 size += sizeof(data->ip);
1466
1467         if (sample_type & PERF_SAMPLE_ADDR)
1468                 size += sizeof(data->addr);
1469
1470         if (sample_type & PERF_SAMPLE_PERIOD)
1471                 size += sizeof(data->period);
1472
1473         if (sample_type & PERF_SAMPLE_WEIGHT)
1474                 size += sizeof(data->weight);
1475
1476         if (sample_type & PERF_SAMPLE_READ)
1477                 size += event->read_size;
1478
1479         if (sample_type & PERF_SAMPLE_DATA_SRC)
1480                 size += sizeof(data->data_src.val);
1481
1482         if (sample_type & PERF_SAMPLE_TRANSACTION)
1483                 size += sizeof(data->txn);
1484
1485         event->header_size = size;
1486 }
1487
1488 /*
1489  * Called at perf_event creation and when events are attached/detached from a
1490  * group.
1491  */
1492 static void perf_event__header_size(struct perf_event *event)
1493 {
1494         __perf_event_read_size(event,
1495                                event->group_leader->nr_siblings);
1496         __perf_event_header_size(event, event->attr.sample_type);
1497 }
1498
1499 static void perf_event__id_header_size(struct perf_event *event)
1500 {
1501         struct perf_sample_data *data;
1502         u64 sample_type = event->attr.sample_type;
1503         u16 size = 0;
1504
1505         if (sample_type & PERF_SAMPLE_TID)
1506                 size += sizeof(data->tid_entry);
1507
1508         if (sample_type & PERF_SAMPLE_TIME)
1509                 size += sizeof(data->time);
1510
1511         if (sample_type & PERF_SAMPLE_IDENTIFIER)
1512                 size += sizeof(data->id);
1513
1514         if (sample_type & PERF_SAMPLE_ID)
1515                 size += sizeof(data->id);
1516
1517         if (sample_type & PERF_SAMPLE_STREAM_ID)
1518                 size += sizeof(data->stream_id);
1519
1520         if (sample_type & PERF_SAMPLE_CPU)
1521                 size += sizeof(data->cpu_entry);
1522
1523         event->id_header_size = size;
1524 }
1525
1526 static bool perf_event_validate_size(struct perf_event *event)
1527 {
1528         /*
1529          * The values computed here will be over-written when we actually
1530          * attach the event.
1531          */
1532         __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1533         __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1534         perf_event__id_header_size(event);
1535
1536         /*
1537          * Sum the lot; should not exceed the 64k limit we have on records.
1538          * Conservative limit to allow for callchains and other variable fields.
1539          */
1540         if (event->read_size + event->header_size +
1541             event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1542                 return false;
1543
1544         return true;
1545 }
1546
1547 static void perf_group_attach(struct perf_event *event)
1548 {
1549         struct perf_event *group_leader = event->group_leader, *pos;
1550
1551         /*
1552          * We can have double attach due to group movement in perf_event_open.
1553          */
1554         if (event->attach_state & PERF_ATTACH_GROUP)
1555                 return;
1556
1557         event->attach_state |= PERF_ATTACH_GROUP;
1558
1559         if (group_leader == event)
1560                 return;
1561
1562         WARN_ON_ONCE(group_leader->ctx != event->ctx);
1563
1564         if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1565                         !is_software_event(event))
1566                 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1567
1568         list_add_tail(&event->group_entry, &group_leader->sibling_list);
1569         group_leader->nr_siblings++;
1570
1571         perf_event__header_size(group_leader);
1572
1573         list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1574                 perf_event__header_size(pos);
1575 }
1576
1577 /*
1578  * Remove a event from the lists for its context.
1579  * Must be called with ctx->mutex and ctx->lock held.
1580  */
1581 static void
1582 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1583 {
1584         struct perf_cpu_context *cpuctx;
1585
1586         WARN_ON_ONCE(event->ctx != ctx);
1587         lockdep_assert_held(&ctx->lock);
1588
1589         /*
1590          * We can have double detach due to exit/hot-unplug + close.
1591          */
1592         if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1593                 return;
1594
1595         event->attach_state &= ~PERF_ATTACH_CONTEXT;
1596
1597         if (is_cgroup_event(event)) {
1598                 ctx->nr_cgroups--;
1599                 /*
1600                  * Because cgroup events are always per-cpu events, this will
1601                  * always be called from the right CPU.
1602                  */
1603                 cpuctx = __get_cpu_context(ctx);
1604                 /*
1605                  * If there are no more cgroup events then clear cgrp to avoid
1606                  * stale pointer in update_cgrp_time_from_cpuctx().
1607                  */
1608                 if (!ctx->nr_cgroups)
1609                         cpuctx->cgrp = NULL;
1610         }
1611
1612         ctx->nr_events--;
1613         if (event->attr.inherit_stat)
1614                 ctx->nr_stat--;
1615
1616         list_del_rcu(&event->event_entry);
1617
1618         if (event->group_leader == event)
1619                 list_del_init(&event->group_entry);
1620
1621         update_group_times(event);
1622
1623         /*
1624          * If event was in error state, then keep it
1625          * that way, otherwise bogus counts will be
1626          * returned on read(). The only way to get out
1627          * of error state is by explicit re-enabling
1628          * of the event
1629          */
1630         if (event->state > PERF_EVENT_STATE_OFF)
1631                 event->state = PERF_EVENT_STATE_OFF;
1632
1633         ctx->generation++;
1634 }
1635
1636 static void perf_group_detach(struct perf_event *event)
1637 {
1638         struct perf_event *sibling, *tmp;
1639         struct list_head *list = NULL;
1640
1641         /*
1642          * We can have double detach due to exit/hot-unplug + close.
1643          */
1644         if (!(event->attach_state & PERF_ATTACH_GROUP))
1645                 return;
1646
1647         event->attach_state &= ~PERF_ATTACH_GROUP;
1648
1649         /*
1650          * If this is a sibling, remove it from its group.
1651          */
1652         if (event->group_leader != event) {
1653                 list_del_init(&event->group_entry);
1654                 event->group_leader->nr_siblings--;
1655                 goto out;
1656         }
1657
1658         if (!list_empty(&event->group_entry))
1659                 list = &event->group_entry;
1660
1661         /*
1662          * If this was a group event with sibling events then
1663          * upgrade the siblings to singleton events by adding them
1664          * to whatever list we are on.
1665          */
1666         list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1667                 if (list)
1668                         list_move_tail(&sibling->group_entry, list);
1669                 sibling->group_leader = sibling;
1670
1671                 /* Inherit group flags from the previous leader */
1672                 sibling->group_flags = event->group_flags;
1673
1674                 WARN_ON_ONCE(sibling->ctx != event->ctx);
1675         }
1676
1677 out:
1678         perf_event__header_size(event->group_leader);
1679
1680         list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1681                 perf_event__header_size(tmp);
1682 }
1683
1684 static bool is_orphaned_event(struct perf_event *event)
1685 {
1686         return event->state == PERF_EVENT_STATE_DEAD;
1687 }
1688
1689 static inline int __pmu_filter_match(struct perf_event *event)
1690 {
1691         struct pmu *pmu = event->pmu;
1692         return pmu->filter_match ? pmu->filter_match(event) : 1;
1693 }
1694
1695 /*
1696  * Check whether we should attempt to schedule an event group based on
1697  * PMU-specific filtering. An event group can consist of HW and SW events,
1698  * potentially with a SW leader, so we must check all the filters, to
1699  * determine whether a group is schedulable:
1700  */
1701 static inline int pmu_filter_match(struct perf_event *event)
1702 {
1703         struct perf_event *child;
1704
1705         if (!__pmu_filter_match(event))
1706                 return 0;
1707
1708         list_for_each_entry(child, &event->sibling_list, group_entry) {
1709                 if (!__pmu_filter_match(child))
1710                         return 0;
1711         }
1712
1713         return 1;
1714 }
1715
1716 static inline int
1717 event_filter_match(struct perf_event *event)
1718 {
1719         return (event->cpu == -1 || event->cpu == smp_processor_id())
1720             && perf_cgroup_match(event) && pmu_filter_match(event);
1721 }
1722
1723 static void
1724 event_sched_out(struct perf_event *event,
1725                   struct perf_cpu_context *cpuctx,
1726                   struct perf_event_context *ctx)
1727 {
1728         u64 tstamp = perf_event_time(event);
1729         u64 delta;
1730
1731         WARN_ON_ONCE(event->ctx != ctx);
1732         lockdep_assert_held(&ctx->lock);
1733
1734         /*
1735          * An event which could not be activated because of
1736          * filter mismatch still needs to have its timings
1737          * maintained, otherwise bogus information is return
1738          * via read() for time_enabled, time_running:
1739          */
1740         if (event->state == PERF_EVENT_STATE_INACTIVE
1741             && !event_filter_match(event)) {
1742                 delta = tstamp - event->tstamp_stopped;
1743                 event->tstamp_running += delta;
1744                 event->tstamp_stopped = tstamp;
1745         }
1746
1747         if (event->state != PERF_EVENT_STATE_ACTIVE)
1748                 return;
1749
1750         perf_pmu_disable(event->pmu);
1751
1752         event->tstamp_stopped = tstamp;
1753         event->pmu->del(event, 0);
1754         event->oncpu = -1;
1755         event->state = PERF_EVENT_STATE_INACTIVE;
1756         if (event->pending_disable) {
1757                 event->pending_disable = 0;
1758                 event->state = PERF_EVENT_STATE_OFF;
1759         }
1760
1761         if (!is_software_event(event))
1762                 cpuctx->active_oncpu--;
1763         if (!--ctx->nr_active)
1764                 perf_event_ctx_deactivate(ctx);
1765         if (event->attr.freq && event->attr.sample_freq)
1766                 ctx->nr_freq--;
1767         if (event->attr.exclusive || !cpuctx->active_oncpu)
1768                 cpuctx->exclusive = 0;
1769
1770         perf_pmu_enable(event->pmu);
1771 }
1772
1773 static void
1774 group_sched_out(struct perf_event *group_event,
1775                 struct perf_cpu_context *cpuctx,
1776                 struct perf_event_context *ctx)
1777 {
1778         struct perf_event *event;
1779         int state = group_event->state;
1780
1781         event_sched_out(group_event, cpuctx, ctx);
1782
1783         /*
1784          * Schedule out siblings (if any):
1785          */
1786         list_for_each_entry(event, &group_event->sibling_list, group_entry)
1787                 event_sched_out(event, cpuctx, ctx);
1788
1789         if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1790                 cpuctx->exclusive = 0;
1791 }
1792
1793 #define DETACH_GROUP    0x01UL
1794
1795 /*
1796  * Cross CPU call to remove a performance event
1797  *
1798  * We disable the event on the hardware level first. After that we
1799  * remove it from the context list.
1800  */
1801 static void
1802 __perf_remove_from_context(struct perf_event *event,
1803                            struct perf_cpu_context *cpuctx,
1804                            struct perf_event_context *ctx,
1805                            void *info)
1806 {
1807         unsigned long flags = (unsigned long)info;
1808
1809         event_sched_out(event, cpuctx, ctx);
1810         if (flags & DETACH_GROUP)
1811                 perf_group_detach(event);
1812         list_del_event(event, ctx);
1813
1814         if (!ctx->nr_events && ctx->is_active) {
1815                 ctx->is_active = 0;
1816                 if (ctx->task) {
1817                         WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1818                         cpuctx->task_ctx = NULL;
1819                 }
1820         }
1821 }
1822
1823 /*
1824  * Remove the event from a task's (or a CPU's) list of events.
1825  *
1826  * If event->ctx is a cloned context, callers must make sure that
1827  * every task struct that event->ctx->task could possibly point to
1828  * remains valid.  This is OK when called from perf_release since
1829  * that only calls us on the top-level context, which can't be a clone.
1830  * When called from perf_event_exit_task, it's OK because the
1831  * context has been detached from its task.
1832  */
1833 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1834 {
1835         lockdep_assert_held(&event->ctx->mutex);
1836
1837         event_function_call(event, __perf_remove_from_context, (void *)flags);
1838 }
1839
1840 /*
1841  * Cross CPU call to disable a performance event
1842  */
1843 static void __perf_event_disable(struct perf_event *event,
1844                                  struct perf_cpu_context *cpuctx,
1845                                  struct perf_event_context *ctx,
1846                                  void *info)
1847 {
1848         if (event->state < PERF_EVENT_STATE_INACTIVE)
1849                 return;
1850
1851         update_context_time(ctx);
1852         update_cgrp_time_from_event(event);
1853         update_group_times(event);
1854         if (event == event->group_leader)
1855                 group_sched_out(event, cpuctx, ctx);
1856         else
1857                 event_sched_out(event, cpuctx, ctx);
1858         event->state = PERF_EVENT_STATE_OFF;
1859 }
1860
1861 /*
1862  * Disable a event.
1863  *
1864  * If event->ctx is a cloned context, callers must make sure that
1865  * every task struct that event->ctx->task could possibly point to
1866  * remains valid.  This condition is satisifed when called through
1867  * perf_event_for_each_child or perf_event_for_each because they
1868  * hold the top-level event's child_mutex, so any descendant that
1869  * goes to exit will block in perf_event_exit_event().
1870  *
1871  * When called from perf_pending_event it's OK because event->ctx
1872  * is the current context on this CPU and preemption is disabled,
1873  * hence we can't get into perf_event_task_sched_out for this context.
1874  */
1875 static void _perf_event_disable(struct perf_event *event)
1876 {
1877         struct perf_event_context *ctx = event->ctx;
1878
1879         raw_spin_lock_irq(&ctx->lock);
1880         if (event->state <= PERF_EVENT_STATE_OFF) {
1881                 raw_spin_unlock_irq(&ctx->lock);
1882                 return;
1883         }
1884         raw_spin_unlock_irq(&ctx->lock);
1885
1886         event_function_call(event, __perf_event_disable, NULL);
1887 }
1888
1889 void perf_event_disable_local(struct perf_event *event)
1890 {
1891         event_function_local(event, __perf_event_disable, NULL);
1892 }
1893
1894 /*
1895  * Strictly speaking kernel users cannot create groups and therefore this
1896  * interface does not need the perf_event_ctx_lock() magic.
1897  */
1898 void perf_event_disable(struct perf_event *event)
1899 {
1900         struct perf_event_context *ctx;
1901
1902         ctx = perf_event_ctx_lock(event);
1903         _perf_event_disable(event);
1904         perf_event_ctx_unlock(event, ctx);
1905 }
1906 EXPORT_SYMBOL_GPL(perf_event_disable);
1907
1908 static void perf_set_shadow_time(struct perf_event *event,
1909                                  struct perf_event_context *ctx,
1910                                  u64 tstamp)
1911 {
1912         /*
1913          * use the correct time source for the time snapshot
1914          *
1915          * We could get by without this by leveraging the
1916          * fact that to get to this function, the caller
1917          * has most likely already called update_context_time()
1918          * and update_cgrp_time_xx() and thus both timestamp
1919          * are identical (or very close). Given that tstamp is,
1920          * already adjusted for cgroup, we could say that:
1921          *    tstamp - ctx->timestamp
1922          * is equivalent to
1923          *    tstamp - cgrp->timestamp.
1924          *
1925          * Then, in perf_output_read(), the calculation would
1926          * work with no changes because:
1927          * - event is guaranteed scheduled in
1928          * - no scheduled out in between
1929          * - thus the timestamp would be the same
1930          *
1931          * But this is a bit hairy.
1932          *
1933          * So instead, we have an explicit cgroup call to remain
1934          * within the time time source all along. We believe it
1935          * is cleaner and simpler to understand.
1936          */
1937         if (is_cgroup_event(event))
1938                 perf_cgroup_set_shadow_time(event, tstamp);
1939         else
1940                 event->shadow_ctx_time = tstamp - ctx->timestamp;
1941 }
1942
1943 #define MAX_INTERRUPTS (~0ULL)
1944
1945 static void perf_log_throttle(struct perf_event *event, int enable);
1946 static void perf_log_itrace_start(struct perf_event *event);
1947
1948 static int
1949 event_sched_in(struct perf_event *event,
1950                  struct perf_cpu_context *cpuctx,
1951                  struct perf_event_context *ctx)
1952 {
1953         u64 tstamp = perf_event_time(event);
1954         int ret = 0;
1955
1956         lockdep_assert_held(&ctx->lock);
1957
1958         if (event->state <= PERF_EVENT_STATE_OFF)
1959                 return 0;
1960
1961         WRITE_ONCE(event->oncpu, smp_processor_id());
1962         /*
1963          * Order event::oncpu write to happen before the ACTIVE state
1964          * is visible.
1965          */
1966         smp_wmb();
1967         WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
1968
1969         /*
1970          * Unthrottle events, since we scheduled we might have missed several
1971          * ticks already, also for a heavily scheduling task there is little
1972          * guarantee it'll get a tick in a timely manner.
1973          */
1974         if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1975                 perf_log_throttle(event, 1);
1976                 event->hw.interrupts = 0;
1977         }
1978
1979         /*
1980          * The new state must be visible before we turn it on in the hardware:
1981          */
1982         smp_wmb();
1983
1984         perf_pmu_disable(event->pmu);
1985
1986         perf_set_shadow_time(event, ctx, tstamp);
1987
1988         perf_log_itrace_start(event);
1989
1990         if (event->pmu->add(event, PERF_EF_START)) {
1991                 event->state = PERF_EVENT_STATE_INACTIVE;
1992                 event->oncpu = -1;
1993                 ret = -EAGAIN;
1994                 goto out;
1995         }
1996
1997         event->tstamp_running += tstamp - event->tstamp_stopped;
1998
1999         if (!is_software_event(event))
2000                 cpuctx->active_oncpu++;
2001         if (!ctx->nr_active++)
2002                 perf_event_ctx_activate(ctx);
2003         if (event->attr.freq && event->attr.sample_freq)
2004                 ctx->nr_freq++;
2005
2006         if (event->attr.exclusive)
2007                 cpuctx->exclusive = 1;
2008
2009 out:
2010         perf_pmu_enable(event->pmu);
2011
2012         return ret;
2013 }
2014
2015 static int
2016 group_sched_in(struct perf_event *group_event,
2017                struct perf_cpu_context *cpuctx,
2018                struct perf_event_context *ctx)
2019 {
2020         struct perf_event *event, *partial_group = NULL;
2021         struct pmu *pmu = ctx->pmu;
2022         u64 now = ctx->time;
2023         bool simulate = false;
2024
2025         if (group_event->state == PERF_EVENT_STATE_OFF)
2026                 return 0;
2027
2028         pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2029
2030         if (event_sched_in(group_event, cpuctx, ctx)) {
2031                 pmu->cancel_txn(pmu);
2032                 perf_mux_hrtimer_restart(cpuctx);
2033                 return -EAGAIN;
2034         }
2035
2036         /*
2037          * Schedule in siblings as one group (if any):
2038          */
2039         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2040                 if (event_sched_in(event, cpuctx, ctx)) {
2041                         partial_group = event;
2042                         goto group_error;
2043                 }
2044         }
2045
2046         if (!pmu->commit_txn(pmu))
2047                 return 0;
2048
2049 group_error:
2050         /*
2051          * Groups can be scheduled in as one unit only, so undo any
2052          * partial group before returning:
2053          * The events up to the failed event are scheduled out normally,
2054          * tstamp_stopped will be updated.
2055          *
2056          * The failed events and the remaining siblings need to have
2057          * their timings updated as if they had gone thru event_sched_in()
2058          * and event_sched_out(). This is required to get consistent timings
2059          * across the group. This also takes care of the case where the group
2060          * could never be scheduled by ensuring tstamp_stopped is set to mark
2061          * the time the event was actually stopped, such that time delta
2062          * calculation in update_event_times() is correct.
2063          */
2064         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2065                 if (event == partial_group)
2066                         simulate = true;
2067
2068                 if (simulate) {
2069                         event->tstamp_running += now - event->tstamp_stopped;
2070                         event->tstamp_stopped = now;
2071                 } else {
2072                         event_sched_out(event, cpuctx, ctx);
2073                 }
2074         }
2075         event_sched_out(group_event, cpuctx, ctx);
2076
2077         pmu->cancel_txn(pmu);
2078
2079         perf_mux_hrtimer_restart(cpuctx);
2080
2081         return -EAGAIN;
2082 }
2083
2084 /*
2085  * Work out whether we can put this event group on the CPU now.
2086  */
2087 static int group_can_go_on(struct perf_event *event,
2088                            struct perf_cpu_context *cpuctx,
2089                            int can_add_hw)
2090 {
2091         /*
2092          * Groups consisting entirely of software events can always go on.
2093          */
2094         if (event->group_flags & PERF_GROUP_SOFTWARE)
2095                 return 1;
2096         /*
2097          * If an exclusive group is already on, no other hardware
2098          * events can go on.
2099          */
2100         if (cpuctx->exclusive)
2101                 return 0;
2102         /*
2103          * If this group is exclusive and there are already
2104          * events on the CPU, it can't go on.
2105          */
2106         if (event->attr.exclusive && cpuctx->active_oncpu)
2107                 return 0;
2108         /*
2109          * Otherwise, try to add it if all previous groups were able
2110          * to go on.
2111          */
2112         return can_add_hw;
2113 }
2114
2115 static void add_event_to_ctx(struct perf_event *event,
2116                                struct perf_event_context *ctx)
2117 {
2118         u64 tstamp = perf_event_time(event);
2119
2120         list_add_event(event, ctx);
2121         perf_group_attach(event);
2122         event->tstamp_enabled = tstamp;
2123         event->tstamp_running = tstamp;
2124         event->tstamp_stopped = tstamp;
2125 }
2126
2127 static void ctx_sched_out(struct perf_event_context *ctx,
2128                           struct perf_cpu_context *cpuctx,
2129                           enum event_type_t event_type);
2130 static void
2131 ctx_sched_in(struct perf_event_context *ctx,
2132              struct perf_cpu_context *cpuctx,
2133              enum event_type_t event_type,
2134              struct task_struct *task);
2135
2136 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2137                                struct perf_event_context *ctx)
2138 {
2139         if (!cpuctx->task_ctx)
2140                 return;
2141
2142         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2143                 return;
2144
2145         ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2146 }
2147
2148 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2149                                 struct perf_event_context *ctx,
2150                                 struct task_struct *task)
2151 {
2152         cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2153         if (ctx)
2154                 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2155         cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2156         if (ctx)
2157                 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2158 }
2159
2160 static void ctx_resched(struct perf_cpu_context *cpuctx,
2161                         struct perf_event_context *task_ctx)
2162 {
2163         perf_pmu_disable(cpuctx->ctx.pmu);
2164         if (task_ctx)
2165                 task_ctx_sched_out(cpuctx, task_ctx);
2166         cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2167         perf_event_sched_in(cpuctx, task_ctx, current);
2168         perf_pmu_enable(cpuctx->ctx.pmu);
2169 }
2170
2171 /*
2172  * Cross CPU call to install and enable a performance event
2173  *
2174  * Very similar to remote_function() + event_function() but cannot assume that
2175  * things like ctx->is_active and cpuctx->task_ctx are set.
2176  */
2177 static int  __perf_install_in_context(void *info)
2178 {
2179         struct perf_event *event = info;
2180         struct perf_event_context *ctx = event->ctx;
2181         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2182         struct perf_event_context *task_ctx = cpuctx->task_ctx;
2183         bool activate = true;
2184         int ret = 0;
2185
2186         raw_spin_lock(&cpuctx->ctx.lock);
2187         if (ctx->task) {
2188                 raw_spin_lock(&ctx->lock);
2189                 task_ctx = ctx;
2190
2191                 /* If we're on the wrong CPU, try again */
2192                 if (task_cpu(ctx->task) != smp_processor_id()) {
2193                         ret = -ESRCH;
2194                         goto unlock;
2195                 }
2196
2197                 /*
2198                  * If we're on the right CPU, see if the task we target is
2199                  * current, if not we don't have to activate the ctx, a future
2200                  * context switch will do that for us.
2201                  */
2202                 if (ctx->task != current)
2203                         activate = false;
2204                 else
2205                         WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2206
2207         } else if (task_ctx) {
2208                 raw_spin_lock(&task_ctx->lock);
2209         }
2210
2211         if (activate) {
2212                 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2213                 add_event_to_ctx(event, ctx);
2214                 ctx_resched(cpuctx, task_ctx);
2215         } else {
2216                 add_event_to_ctx(event, ctx);
2217         }
2218
2219 unlock:
2220         perf_ctx_unlock(cpuctx, task_ctx);
2221
2222         return ret;
2223 }
2224
2225 /*
2226  * Attach a performance event to a context.
2227  *
2228  * Very similar to event_function_call, see comment there.
2229  */
2230 static void
2231 perf_install_in_context(struct perf_event_context *ctx,
2232                         struct perf_event *event,
2233                         int cpu)
2234 {
2235         struct task_struct *task = READ_ONCE(ctx->task);
2236
2237         lockdep_assert_held(&ctx->mutex);
2238
2239         event->ctx = ctx;
2240         if (event->cpu != -1)
2241                 event->cpu = cpu;
2242
2243         if (!task) {
2244                 cpu_function_call(cpu, __perf_install_in_context, event);
2245                 return;
2246         }
2247
2248         /*
2249          * Should not happen, we validate the ctx is still alive before calling.
2250          */
2251         if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2252                 return;
2253
2254         /*
2255          * Installing events is tricky because we cannot rely on ctx->is_active
2256          * to be set in case this is the nr_events 0 -> 1 transition.
2257          */
2258 again:
2259         /*
2260          * Cannot use task_function_call() because we need to run on the task's
2261          * CPU regardless of whether its current or not.
2262          */
2263         if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2264                 return;
2265
2266         raw_spin_lock_irq(&ctx->lock);
2267         task = ctx->task;
2268         if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2269                 /*
2270                  * Cannot happen because we already checked above (which also
2271                  * cannot happen), and we hold ctx->mutex, which serializes us
2272                  * against perf_event_exit_task_context().
2273                  */
2274                 raw_spin_unlock_irq(&ctx->lock);
2275                 return;
2276         }
2277         raw_spin_unlock_irq(&ctx->lock);
2278         /*
2279          * Since !ctx->is_active doesn't mean anything, we must IPI
2280          * unconditionally.
2281          */
2282         goto again;
2283 }
2284
2285 /*
2286  * Put a event into inactive state and update time fields.
2287  * Enabling the leader of a group effectively enables all
2288  * the group members that aren't explicitly disabled, so we
2289  * have to update their ->tstamp_enabled also.
2290  * Note: this works for group members as well as group leaders
2291  * since the non-leader members' sibling_lists will be empty.
2292  */
2293 static void __perf_event_mark_enabled(struct perf_event *event)
2294 {
2295         struct perf_event *sub;
2296         u64 tstamp = perf_event_time(event);
2297
2298         event->state = PERF_EVENT_STATE_INACTIVE;
2299         event->tstamp_enabled = tstamp - event->total_time_enabled;
2300         list_for_each_entry(sub, &event->sibling_list, group_entry) {
2301                 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2302                         sub->tstamp_enabled = tstamp - sub->total_time_enabled;
2303         }
2304 }
2305
2306 /*
2307  * Cross CPU call to enable a performance event
2308  */
2309 static void __perf_event_enable(struct perf_event *event,
2310                                 struct perf_cpu_context *cpuctx,
2311                                 struct perf_event_context *ctx,
2312                                 void *info)
2313 {
2314         struct perf_event *leader = event->group_leader;
2315         struct perf_event_context *task_ctx;
2316
2317         if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2318             event->state <= PERF_EVENT_STATE_ERROR)
2319                 return;
2320
2321         if (ctx->is_active)
2322                 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2323
2324         __perf_event_mark_enabled(event);
2325
2326         if (!ctx->is_active)
2327                 return;
2328
2329         if (!event_filter_match(event)) {
2330                 if (is_cgroup_event(event))
2331                         perf_cgroup_defer_enabled(event);
2332                 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2333                 return;
2334         }
2335
2336         /*
2337          * If the event is in a group and isn't the group leader,
2338          * then don't put it on unless the group is on.
2339          */
2340         if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2341                 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2342                 return;
2343         }
2344
2345         task_ctx = cpuctx->task_ctx;
2346         if (ctx->task)
2347                 WARN_ON_ONCE(task_ctx != ctx);
2348
2349         ctx_resched(cpuctx, task_ctx);
2350 }
2351
2352 /*
2353  * Enable a event.
2354  *
2355  * If event->ctx is a cloned context, callers must make sure that
2356  * every task struct that event->ctx->task could possibly point to
2357  * remains valid.  This condition is satisfied when called through
2358  * perf_event_for_each_child or perf_event_for_each as described
2359  * for perf_event_disable.
2360  */
2361 static void _perf_event_enable(struct perf_event *event)
2362 {
2363         struct perf_event_context *ctx = event->ctx;
2364
2365         raw_spin_lock_irq(&ctx->lock);
2366         if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2367             event->state <  PERF_EVENT_STATE_ERROR) {
2368                 raw_spin_unlock_irq(&ctx->lock);
2369                 return;
2370         }
2371
2372         /*
2373          * If the event is in error state, clear that first.
2374          *
2375          * That way, if we see the event in error state below, we know that it
2376          * has gone back into error state, as distinct from the task having
2377          * been scheduled away before the cross-call arrived.
2378          */
2379         if (event->state == PERF_EVENT_STATE_ERROR)
2380                 event->state = PERF_EVENT_STATE_OFF;
2381         raw_spin_unlock_irq(&ctx->lock);
2382
2383         event_function_call(event, __perf_event_enable, NULL);
2384 }
2385
2386 /*
2387  * See perf_event_disable();
2388  */
2389 void perf_event_enable(struct perf_event *event)
2390 {
2391         struct perf_event_context *ctx;
2392
2393         ctx = perf_event_ctx_lock(event);
2394         _perf_event_enable(event);
2395         perf_event_ctx_unlock(event, ctx);
2396 }
2397 EXPORT_SYMBOL_GPL(perf_event_enable);
2398
2399 struct stop_event_data {
2400         struct perf_event       *event;
2401         unsigned int            restart;
2402 };
2403
2404 static int __perf_event_stop(void *info)
2405 {
2406         struct stop_event_data *sd = info;
2407         struct perf_event *event = sd->event;
2408
2409         /* if it's already INACTIVE, do nothing */
2410         if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2411                 return 0;
2412
2413         /* matches smp_wmb() in event_sched_in() */
2414         smp_rmb();
2415
2416         /*
2417          * There is a window with interrupts enabled before we get here,
2418          * so we need to check again lest we try to stop another CPU's event.
2419          */
2420         if (READ_ONCE(event->oncpu) != smp_processor_id())
2421                 return -EAGAIN;
2422
2423         event->pmu->stop(event, PERF_EF_UPDATE);
2424
2425         /*
2426          * May race with the actual stop (through perf_pmu_output_stop()),
2427          * but it is only used for events with AUX ring buffer, and such
2428          * events will refuse to restart because of rb::aux_mmap_count==0,
2429          * see comments in perf_aux_output_begin().
2430          *
2431          * Since this is happening on a event-local CPU, no trace is lost
2432          * while restarting.
2433          */
2434         if (sd->restart)
2435                 event->pmu->start(event, PERF_EF_START);
2436
2437         return 0;
2438 }
2439
2440 static int perf_event_restart(struct perf_event *event)
2441 {
2442         struct stop_event_data sd = {
2443                 .event          = event,
2444                 .restart        = 1,
2445         };
2446         int ret = 0;
2447
2448         do {
2449                 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2450                         return 0;
2451
2452                 /* matches smp_wmb() in event_sched_in() */
2453                 smp_rmb();
2454
2455                 /*
2456                  * We only want to restart ACTIVE events, so if the event goes
2457                  * inactive here (event->oncpu==-1), there's nothing more to do;
2458                  * fall through with ret==-ENXIO.
2459                  */
2460                 ret = cpu_function_call(READ_ONCE(event->oncpu),
2461                                         __perf_event_stop, &sd);
2462         } while (ret == -EAGAIN);
2463
2464         return ret;
2465 }
2466
2467 /*
2468  * In order to contain the amount of racy and tricky in the address filter
2469  * configuration management, it is a two part process:
2470  *
2471  * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2472  *      we update the addresses of corresponding vmas in
2473  *      event::addr_filters_offs array and bump the event::addr_filters_gen;
2474  * (p2) when an event is scheduled in (pmu::add), it calls
2475  *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2476  *      if the generation has changed since the previous call.
2477  *
2478  * If (p1) happens while the event is active, we restart it to force (p2).
2479  *
2480  * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2481  *     pre-existing mappings, called once when new filters arrive via SET_FILTER
2482  *     ioctl;
2483  * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2484  *     registered mapping, called for every new mmap(), with mm::mmap_sem down
2485  *     for reading;
2486  * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2487  *     of exec.
2488  */
2489 void perf_event_addr_filters_sync(struct perf_event *event)
2490 {
2491         struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2492
2493         if (!has_addr_filter(event))
2494                 return;
2495
2496         raw_spin_lock(&ifh->lock);
2497         if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2498                 event->pmu->addr_filters_sync(event);
2499                 event->hw.addr_filters_gen = event->addr_filters_gen;
2500         }
2501         raw_spin_unlock(&ifh->lock);
2502 }
2503 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2504
2505 static int _perf_event_refresh(struct perf_event *event, int refresh)
2506 {
2507         /*
2508          * not supported on inherited events
2509          */
2510         if (event->attr.inherit || !is_sampling_event(event))
2511                 return -EINVAL;
2512
2513         atomic_add(refresh, &event->event_limit);
2514         _perf_event_enable(event);
2515
2516         return 0;
2517 }
2518
2519 /*
2520  * See perf_event_disable()
2521  */
2522 int perf_event_refresh(struct perf_event *event, int refresh)
2523 {
2524         struct perf_event_context *ctx;
2525         int ret;
2526
2527         ctx = perf_event_ctx_lock(event);
2528         ret = _perf_event_refresh(event, refresh);
2529         perf_event_ctx_unlock(event, ctx);
2530
2531         return ret;
2532 }
2533 EXPORT_SYMBOL_GPL(perf_event_refresh);
2534
2535 static void ctx_sched_out(struct perf_event_context *ctx,
2536                           struct perf_cpu_context *cpuctx,
2537                           enum event_type_t event_type)
2538 {
2539         int is_active = ctx->is_active;
2540         struct perf_event *event;
2541
2542         lockdep_assert_held(&ctx->lock);
2543
2544         if (likely(!ctx->nr_events)) {
2545                 /*
2546                  * See __perf_remove_from_context().
2547                  */
2548                 WARN_ON_ONCE(ctx->is_active);
2549                 if (ctx->task)
2550                         WARN_ON_ONCE(cpuctx->task_ctx);
2551                 return;
2552         }
2553
2554         ctx->is_active &= ~event_type;
2555         if (!(ctx->is_active & EVENT_ALL))
2556                 ctx->is_active = 0;
2557
2558         if (ctx->task) {
2559                 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2560                 if (!ctx->is_active)
2561                         cpuctx->task_ctx = NULL;
2562         }
2563
2564         /*
2565          * Always update time if it was set; not only when it changes.
2566          * Otherwise we can 'forget' to update time for any but the last
2567          * context we sched out. For example:
2568          *
2569          *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2570          *   ctx_sched_out(.event_type = EVENT_PINNED)
2571          *
2572          * would only update time for the pinned events.
2573          */
2574         if (is_active & EVENT_TIME) {
2575                 /* update (and stop) ctx time */
2576                 update_context_time(ctx);
2577                 update_cgrp_time_from_cpuctx(cpuctx);
2578         }
2579
2580         is_active ^= ctx->is_active; /* changed bits */
2581
2582         if (!ctx->nr_active || !(is_active & EVENT_ALL))
2583                 return;
2584
2585         perf_pmu_disable(ctx->pmu);
2586         if (is_active & EVENT_PINNED) {
2587                 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2588                         group_sched_out(event, cpuctx, ctx);
2589         }
2590
2591         if (is_active & EVENT_FLEXIBLE) {
2592                 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2593                         group_sched_out(event, cpuctx, ctx);
2594         }
2595         perf_pmu_enable(ctx->pmu);
2596 }
2597
2598 /*
2599  * Test whether two contexts are equivalent, i.e. whether they have both been
2600  * cloned from the same version of the same context.
2601  *
2602  * Equivalence is measured using a generation number in the context that is
2603  * incremented on each modification to it; see unclone_ctx(), list_add_event()
2604  * and list_del_event().
2605  */
2606 static int context_equiv(struct perf_event_context *ctx1,
2607                          struct perf_event_context *ctx2)
2608 {
2609         lockdep_assert_held(&ctx1->lock);
2610         lockdep_assert_held(&ctx2->lock);
2611
2612         /* Pinning disables the swap optimization */
2613         if (ctx1->pin_count || ctx2->pin_count)
2614                 return 0;
2615
2616         /* If ctx1 is the parent of ctx2 */
2617         if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2618                 return 1;
2619
2620         /* If ctx2 is the parent of ctx1 */
2621         if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2622                 return 1;
2623
2624         /*
2625          * If ctx1 and ctx2 have the same parent; we flatten the parent
2626          * hierarchy, see perf_event_init_context().
2627          */
2628         if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2629                         ctx1->parent_gen == ctx2->parent_gen)
2630                 return 1;
2631
2632         /* Unmatched */
2633         return 0;
2634 }
2635
2636 static void __perf_event_sync_stat(struct perf_event *event,
2637                                      struct perf_event *next_event)
2638 {
2639         u64 value;
2640
2641         if (!event->attr.inherit_stat)
2642                 return;
2643
2644         /*
2645          * Update the event value, we cannot use perf_event_read()
2646          * because we're in the middle of a context switch and have IRQs
2647          * disabled, which upsets smp_call_function_single(), however
2648          * we know the event must be on the current CPU, therefore we
2649          * don't need to use it.
2650          */
2651         switch (event->state) {
2652         case PERF_EVENT_STATE_ACTIVE:
2653                 event->pmu->read(event);
2654                 /* fall-through */
2655
2656         case PERF_EVENT_STATE_INACTIVE:
2657                 update_event_times(event);
2658                 break;
2659
2660         default:
2661                 break;
2662         }
2663
2664         /*
2665          * In order to keep per-task stats reliable we need to flip the event
2666          * values when we flip the contexts.
2667          */
2668         value = local64_read(&next_event->count);
2669         value = local64_xchg(&event->count, value);
2670         local64_set(&next_event->count, value);
2671
2672         swap(event->total_time_enabled, next_event->total_time_enabled);
2673         swap(event->total_time_running, next_event->total_time_running);
2674
2675         /*
2676          * Since we swizzled the values, update the user visible data too.
2677          */
2678         perf_event_update_userpage(event);
2679         perf_event_update_userpage(next_event);
2680 }
2681
2682 static void perf_event_sync_stat(struct perf_event_context *ctx,
2683                                    struct perf_event_context *next_ctx)
2684 {
2685         struct perf_event *event, *next_event;
2686
2687         if (!ctx->nr_stat)
2688                 return;
2689
2690         update_context_time(ctx);
2691
2692         event = list_first_entry(&ctx->event_list,
2693                                    struct perf_event, event_entry);
2694
2695         next_event = list_first_entry(&next_ctx->event_list,
2696                                         struct perf_event, event_entry);
2697
2698         while (&event->event_entry != &ctx->event_list &&
2699                &next_event->event_entry != &next_ctx->event_list) {
2700
2701                 __perf_event_sync_stat(event, next_event);
2702
2703                 event = list_next_entry(event, event_entry);
2704                 next_event = list_next_entry(next_event, event_entry);
2705         }
2706 }
2707
2708 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2709                                          struct task_struct *next)
2710 {
2711         struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2712         struct perf_event_context *next_ctx;
2713         struct perf_event_context *parent, *next_parent;
2714         struct perf_cpu_context *cpuctx;
2715         int do_switch = 1;
2716
2717         if (likely(!ctx))
2718                 return;
2719
2720         cpuctx = __get_cpu_context(ctx);
2721         if (!cpuctx->task_ctx)
2722                 return;
2723
2724         rcu_read_lock();
2725         next_ctx = next->perf_event_ctxp[ctxn];
2726         if (!next_ctx)
2727                 goto unlock;
2728
2729         parent = rcu_dereference(ctx->parent_ctx);
2730         next_parent = rcu_dereference(next_ctx->parent_ctx);
2731
2732         /* If neither context have a parent context; they cannot be clones. */
2733         if (!parent && !next_parent)
2734                 goto unlock;
2735
2736         if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2737                 /*
2738                  * Looks like the two contexts are clones, so we might be
2739                  * able to optimize the context switch.  We lock both
2740                  * contexts and check that they are clones under the
2741                  * lock (including re-checking that neither has been
2742                  * uncloned in the meantime).  It doesn't matter which
2743                  * order we take the locks because no other cpu could
2744                  * be trying to lock both of these tasks.
2745                  */
2746                 raw_spin_lock(&ctx->lock);
2747                 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2748                 if (context_equiv(ctx, next_ctx)) {
2749                         WRITE_ONCE(ctx->task, next);
2750                         WRITE_ONCE(next_ctx->task, task);
2751
2752                         swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2753
2754                         /*
2755                          * RCU_INIT_POINTER here is safe because we've not
2756                          * modified the ctx and the above modification of
2757                          * ctx->task and ctx->task_ctx_data are immaterial
2758                          * since those values are always verified under
2759                          * ctx->lock which we're now holding.
2760                          */
2761                         RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2762                         RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2763
2764                         do_switch = 0;
2765
2766                         perf_event_sync_stat(ctx, next_ctx);
2767                 }
2768                 raw_spin_unlock(&next_ctx->lock);
2769                 raw_spin_unlock(&ctx->lock);
2770         }
2771 unlock:
2772         rcu_read_unlock();
2773
2774         if (do_switch) {
2775                 raw_spin_lock(&ctx->lock);
2776                 task_ctx_sched_out(cpuctx, ctx);
2777                 raw_spin_unlock(&ctx->lock);
2778         }
2779 }
2780
2781 void perf_sched_cb_dec(struct pmu *pmu)
2782 {
2783         this_cpu_dec(perf_sched_cb_usages);
2784 }
2785
2786 void perf_sched_cb_inc(struct pmu *pmu)
2787 {
2788         this_cpu_inc(perf_sched_cb_usages);
2789 }
2790
2791 /*
2792  * This function provides the context switch callback to the lower code
2793  * layer. It is invoked ONLY when the context switch callback is enabled.
2794  */
2795 static void perf_pmu_sched_task(struct task_struct *prev,
2796                                 struct task_struct *next,
2797                                 bool sched_in)
2798 {
2799         struct perf_cpu_context *cpuctx;
2800         struct pmu *pmu;
2801         unsigned long flags;
2802
2803         if (prev == next)
2804                 return;
2805
2806         local_irq_save(flags);
2807
2808         rcu_read_lock();
2809
2810         list_for_each_entry_rcu(pmu, &pmus, entry) {
2811                 if (pmu->sched_task) {
2812                         cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2813
2814                         perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2815
2816                         perf_pmu_disable(pmu);
2817
2818                         pmu->sched_task(cpuctx->task_ctx, sched_in);
2819
2820                         perf_pmu_enable(pmu);
2821
2822                         perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2823                 }
2824         }
2825
2826         rcu_read_unlock();
2827
2828         local_irq_restore(flags);
2829 }
2830
2831 static void perf_event_switch(struct task_struct *task,
2832                               struct task_struct *next_prev, bool sched_in);
2833
2834 #define for_each_task_context_nr(ctxn)                                  \
2835         for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2836
2837 /*
2838  * Called from scheduler to remove the events of the current task,
2839  * with interrupts disabled.
2840  *
2841  * We stop each event and update the event value in event->count.
2842  *
2843  * This does not protect us against NMI, but disable()
2844  * sets the disabled bit in the control field of event _before_
2845  * accessing the event control register. If a NMI hits, then it will
2846  * not restart the event.
2847  */
2848 void __perf_event_task_sched_out(struct task_struct *task,
2849                                  struct task_struct *next)
2850 {
2851         int ctxn;
2852
2853         if (__this_cpu_read(perf_sched_cb_usages))
2854                 perf_pmu_sched_task(task, next, false);
2855
2856         if (atomic_read(&nr_switch_events))
2857                 perf_event_switch(task, next, false);
2858
2859         for_each_task_context_nr(ctxn)
2860                 perf_event_context_sched_out(task, ctxn, next);
2861
2862         /*
2863          * if cgroup events exist on this CPU, then we need
2864          * to check if we have to switch out PMU state.
2865          * cgroup event are system-wide mode only
2866          */
2867         if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2868                 perf_cgroup_sched_out(task, next);
2869 }
2870
2871 /*
2872  * Called with IRQs disabled
2873  */
2874 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2875                               enum event_type_t event_type)
2876 {
2877         ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2878 }
2879
2880 static void
2881 ctx_pinned_sched_in(struct perf_event_context *ctx,
2882                     struct perf_cpu_context *cpuctx)
2883 {
2884         struct perf_event *event;
2885
2886         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2887                 if (event->state <= PERF_EVENT_STATE_OFF)
2888                         continue;
2889                 if (!event_filter_match(event))
2890                         continue;
2891
2892                 /* may need to reset tstamp_enabled */
2893                 if (is_cgroup_event(event))
2894                         perf_cgroup_mark_enabled(event, ctx);
2895
2896                 if (group_can_go_on(event, cpuctx, 1))
2897                         group_sched_in(event, cpuctx, ctx);
2898
2899                 /*
2900                  * If this pinned group hasn't been scheduled,
2901                  * put it in error state.
2902                  */
2903                 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2904                         update_group_times(event);
2905                         event->state = PERF_EVENT_STATE_ERROR;
2906                 }
2907         }
2908 }
2909
2910 static void
2911 ctx_flexible_sched_in(struct perf_event_context *ctx,
2912                       struct perf_cpu_context *cpuctx)
2913 {
2914         struct perf_event *event;
2915         int can_add_hw = 1;
2916
2917         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2918                 /* Ignore events in OFF or ERROR state */
2919                 if (event->state <= PERF_EVENT_STATE_OFF)
2920                         continue;
2921                 /*
2922                  * Listen to the 'cpu' scheduling filter constraint
2923                  * of events:
2924                  */
2925                 if (!event_filter_match(event))
2926                         continue;
2927
2928                 /* may need to reset tstamp_enabled */
2929                 if (is_cgroup_event(event))
2930                         perf_cgroup_mark_enabled(event, ctx);
2931
2932                 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2933                         if (group_sched_in(event, cpuctx, ctx))
2934                                 can_add_hw = 0;
2935                 }
2936         }
2937 }
2938
2939 static void
2940 ctx_sched_in(struct perf_event_context *ctx,
2941              struct perf_cpu_context *cpuctx,
2942              enum event_type_t event_type,
2943              struct task_struct *task)
2944 {
2945         int is_active = ctx->is_active;
2946         u64 now;
2947
2948         lockdep_assert_held(&ctx->lock);
2949
2950         if (likely(!ctx->nr_events))
2951                 return;
2952
2953         ctx->is_active |= (event_type | EVENT_TIME);
2954         if (ctx->task) {
2955                 if (!is_active)
2956                         cpuctx->task_ctx = ctx;
2957                 else
2958                         WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2959         }
2960
2961         is_active ^= ctx->is_active; /* changed bits */
2962
2963         if (is_active & EVENT_TIME) {
2964                 /* start ctx time */
2965                 now = perf_clock();
2966                 ctx->timestamp = now;
2967                 perf_cgroup_set_timestamp(task, ctx);
2968         }
2969
2970         /*
2971          * First go through the list and put on any pinned groups
2972          * in order to give them the best chance of going on.
2973          */
2974         if (is_active & EVENT_PINNED)
2975                 ctx_pinned_sched_in(ctx, cpuctx);
2976
2977         /* Then walk through the lower prio flexible groups */
2978         if (is_active & EVENT_FLEXIBLE)
2979                 ctx_flexible_sched_in(ctx, cpuctx);
2980 }
2981
2982 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2983                              enum event_type_t event_type,
2984                              struct task_struct *task)
2985 {
2986         struct perf_event_context *ctx = &cpuctx->ctx;
2987
2988         ctx_sched_in(ctx, cpuctx, event_type, task);
2989 }
2990
2991 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2992                                         struct task_struct *task)
2993 {
2994         struct perf_cpu_context *cpuctx;
2995
2996         cpuctx = __get_cpu_context(ctx);
2997         if (cpuctx->task_ctx == ctx)
2998                 return;
2999
3000         perf_ctx_lock(cpuctx, ctx);
3001         perf_pmu_disable(ctx->pmu);
3002         /*
3003          * We want to keep the following priority order:
3004          * cpu pinned (that don't need to move), task pinned,
3005          * cpu flexible, task flexible.
3006          */
3007         cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3008         perf_event_sched_in(cpuctx, ctx, task);
3009         perf_pmu_enable(ctx->pmu);
3010         perf_ctx_unlock(cpuctx, ctx);
3011 }
3012
3013 /*
3014  * Called from scheduler to add the events of the current task
3015  * with interrupts disabled.
3016  *
3017  * We restore the event value and then enable it.
3018  *
3019  * This does not protect us against NMI, but enable()
3020  * sets the enabled bit in the control field of event _before_
3021  * accessing the event control register. If a NMI hits, then it will
3022  * keep the event running.
3023  */
3024 void __perf_event_task_sched_in(struct task_struct *prev,
3025                                 struct task_struct *task)
3026 {
3027         struct perf_event_context *ctx;
3028         int ctxn;
3029
3030         /*
3031          * If cgroup events exist on this CPU, then we need to check if we have
3032          * to switch in PMU state; cgroup event are system-wide mode only.
3033          *
3034          * Since cgroup events are CPU events, we must schedule these in before
3035          * we schedule in the task events.
3036          */
3037         if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3038                 perf_cgroup_sched_in(prev, task);
3039
3040         for_each_task_context_nr(ctxn) {
3041                 ctx = task->perf_event_ctxp[ctxn];
3042                 if (likely(!ctx))
3043                         continue;
3044
3045                 perf_event_context_sched_in(ctx, task);
3046         }
3047
3048         if (atomic_read(&nr_switch_events))
3049                 perf_event_switch(task, prev, true);
3050
3051         if (__this_cpu_read(perf_sched_cb_usages))
3052                 perf_pmu_sched_task(prev, task, true);
3053 }
3054
3055 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3056 {
3057         u64 frequency = event->attr.sample_freq;
3058         u64 sec = NSEC_PER_SEC;
3059         u64 divisor, dividend;
3060
3061         int count_fls, nsec_fls, frequency_fls, sec_fls;
3062
3063         count_fls = fls64(count);
3064         nsec_fls = fls64(nsec);
3065         frequency_fls = fls64(frequency);
3066         sec_fls = 30;
3067
3068         /*
3069          * We got @count in @nsec, with a target of sample_freq HZ
3070          * the target period becomes:
3071          *
3072          *             @count * 10^9
3073          * period = -------------------
3074          *          @nsec * sample_freq
3075          *
3076          */
3077
3078         /*
3079          * Reduce accuracy by one bit such that @a and @b converge
3080          * to a similar magnitude.
3081          */
3082 #define REDUCE_FLS(a, b)                \
3083 do {                                    \
3084         if (a##_fls > b##_fls) {        \
3085                 a >>= 1;                \
3086                 a##_fls--;              \
3087         } else {                        \
3088                 b >>= 1;                \
3089                 b##_fls--;              \
3090         }                               \
3091 } while (0)
3092
3093         /*
3094          * Reduce accuracy until either term fits in a u64, then proceed with
3095          * the other, so that finally we can do a u64/u64 division.
3096          */
3097         while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3098                 REDUCE_FLS(nsec, frequency);
3099                 REDUCE_FLS(sec, count);
3100         }
3101
3102         if (count_fls + sec_fls > 64) {
3103                 divisor = nsec * frequency;
3104
3105                 while (count_fls + sec_fls > 64) {
3106                         REDUCE_FLS(count, sec);
3107                         divisor >>= 1;
3108                 }
3109
3110                 dividend = count * sec;
3111         } else {
3112                 dividend = count * sec;
3113
3114                 while (nsec_fls + frequency_fls > 64) {
3115                         REDUCE_FLS(nsec, frequency);
3116                         dividend >>= 1;
3117                 }
3118
3119                 divisor = nsec * frequency;
3120         }
3121
3122         if (!divisor)
3123                 return dividend;
3124
3125         return div64_u64(dividend, divisor);
3126 }
3127
3128 static DEFINE_PER_CPU(int, perf_throttled_count);
3129 static DEFINE_PER_CPU(u64, perf_throttled_seq);
3130
3131 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
3132 {
3133         struct hw_perf_event *hwc = &event->hw;
3134         s64 period, sample_period;
3135         s64 delta;
3136
3137         period = perf_calculate_period(event, nsec, count);
3138
3139         delta = (s64)(period - hwc->sample_period);
3140         delta = (delta + 7) / 8; /* low pass filter */
3141
3142         sample_period = hwc->sample_period + delta;
3143
3144         if (!sample_period)
3145                 sample_period = 1;
3146
3147         hwc->sample_period = sample_period;
3148
3149         if (local64_read(&hwc->period_left) > 8*sample_period) {
3150                 if (disable)
3151                         event->pmu->stop(event, PERF_EF_UPDATE);
3152
3153                 local64_set(&hwc->period_left, 0);
3154
3155                 if (disable)
3156                         event->pmu->start(event, PERF_EF_RELOAD);
3157         }
3158 }
3159
3160 /*
3161  * combine freq adjustment with unthrottling to avoid two passes over the
3162  * events. At the same time, make sure, having freq events does not change
3163  * the rate of unthrottling as that would introduce bias.
3164  */
3165 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3166                                            int needs_unthr)
3167 {
3168         struct perf_event *event;
3169         struct hw_perf_event *hwc;
3170         u64 now, period = TICK_NSEC;
3171         s64 delta;
3172
3173         /*
3174          * only need to iterate over all events iff:
3175          * - context have events in frequency mode (needs freq adjust)
3176          * - there are events to unthrottle on this cpu
3177          */
3178         if (!(ctx->nr_freq || needs_unthr))
3179                 return;
3180
3181         raw_spin_lock(&ctx->lock);
3182         perf_pmu_disable(ctx->pmu);
3183
3184         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3185                 if (event->state != PERF_EVENT_STATE_ACTIVE)
3186                         continue;
3187
3188                 if (!event_filter_match(event))
3189                         continue;
3190
3191                 perf_pmu_disable(event->pmu);
3192
3193                 hwc = &event->hw;
3194
3195                 if (hwc->interrupts == MAX_INTERRUPTS) {
3196                         hwc->interrupts = 0;
3197                         perf_log_throttle(event, 1);
3198                         event->pmu->start(event, 0);
3199                 }
3200
3201                 if (!event->attr.freq || !event->attr.sample_freq)
3202                         goto next;
3203
3204                 /*
3205                  * stop the event and update event->count
3206                  */
3207                 event->pmu->stop(event, PERF_EF_UPDATE);
3208
3209                 now = local64_read(&event->count);
3210                 delta = now - hwc->freq_count_stamp;
3211                 hwc->freq_count_stamp = now;
3212
3213                 /*
3214                  * restart the event
3215                  * reload only if value has changed
3216                  * we have stopped the event so tell that
3217                  * to perf_adjust_period() to avoid stopping it
3218                  * twice.
3219                  */
3220                 if (delta > 0)
3221                         perf_adjust_period(event, period, delta, false);
3222
3223                 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
3224         next:
3225                 perf_pmu_enable(event->pmu);
3226         }
3227
3228         perf_pmu_enable(ctx->pmu);
3229         raw_spin_unlock(&ctx->lock);
3230 }
3231
3232 /*
3233  * Round-robin a context's events:
3234  */
3235 static void rotate_ctx(struct perf_event_context *ctx)
3236 {
3237         /*
3238          * Rotate the first entry last of non-pinned groups. Rotation might be
3239          * disabled by the inheritance code.
3240          */
3241         if (!ctx->rotate_disable)
3242                 list_rotate_left(&ctx->flexible_groups);
3243 }
3244
3245 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
3246 {
3247         struct perf_event_context *ctx = NULL;
3248         int rotate = 0;
3249
3250         if (cpuctx->ctx.nr_events) {
3251                 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3252                         rotate = 1;
3253         }
3254
3255         ctx = cpuctx->task_ctx;
3256         if (ctx && ctx->nr_events) {
3257                 if (ctx->nr_events != ctx->nr_active)
3258                         rotate = 1;
3259         }
3260
3261         if (!rotate)
3262                 goto done;
3263
3264         perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3265         perf_pmu_disable(cpuctx->ctx.pmu);
3266
3267         cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3268         if (ctx)
3269                 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
3270
3271         rotate_ctx(&cpuctx->ctx);
3272         if (ctx)
3273                 rotate_ctx(ctx);
3274
3275         perf_event_sched_in(cpuctx, ctx, current);
3276
3277         perf_pmu_enable(cpuctx->ctx.pmu);
3278         perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3279 done:
3280
3281         return rotate;
3282 }
3283
3284 void perf_event_task_tick(void)
3285 {
3286         struct list_head *head = this_cpu_ptr(&active_ctx_list);
3287         struct perf_event_context *ctx, *tmp;
3288         int throttled;
3289
3290         WARN_ON(!irqs_disabled());
3291
3292         __this_cpu_inc(perf_throttled_seq);
3293         throttled = __this_cpu_xchg(perf_throttled_count, 0);
3294         tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
3295
3296         list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
3297                 perf_adjust_freq_unthr_context(ctx, throttled);
3298 }
3299
3300 static int event_enable_on_exec(struct perf_event *event,
3301                                 struct perf_event_context *ctx)
3302 {
3303         if (!event->attr.enable_on_exec)
3304                 return 0;
3305
3306         event->attr.enable_on_exec = 0;
3307         if (event->state >= PERF_EVENT_STATE_INACTIVE)
3308                 return 0;
3309
3310         __perf_event_mark_enabled(event);
3311
3312         return 1;
3313 }
3314
3315 /*
3316  * Enable all of a task's events that have been marked enable-on-exec.
3317  * This expects task == current.
3318  */
3319 static void perf_event_enable_on_exec(int ctxn)
3320 {
3321         struct perf_event_context *ctx, *clone_ctx = NULL;
3322         struct perf_cpu_context *cpuctx;
3323         struct perf_event *event;
3324         unsigned long flags;
3325         int enabled = 0;
3326
3327         local_irq_save(flags);
3328         ctx = current->perf_event_ctxp[ctxn];
3329         if (!ctx || !ctx->nr_events)
3330                 goto out;
3331
3332         cpuctx = __get_cpu_context(ctx);
3333         perf_ctx_lock(cpuctx, ctx);
3334         ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3335         list_for_each_entry(event, &ctx->event_list, event_entry)
3336                 enabled |= event_enable_on_exec(event, ctx);
3337
3338         /*
3339          * Unclone and reschedule this context if we enabled any event.
3340          */
3341         if (enabled) {
3342                 clone_ctx = unclone_ctx(ctx);
3343                 ctx_resched(cpuctx, ctx);
3344         }
3345         perf_ctx_unlock(cpuctx, ctx);
3346
3347 out:
3348         local_irq_restore(flags);
3349
3350         if (clone_ctx)
3351                 put_ctx(clone_ctx);
3352 }
3353
3354 struct perf_read_data {
3355         struct perf_event *event;
3356         bool group;
3357         int ret;
3358 };
3359
3360 /*
3361  * Cross CPU call to read the hardware event
3362  */
3363 static void __perf_event_read(void *info)
3364 {
3365         struct perf_read_data *data = info;
3366         struct perf_event *sub, *event = data->event;
3367         struct perf_event_context *ctx = event->ctx;
3368         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3369         struct pmu *pmu = event->pmu;
3370
3371         /*
3372          * If this is a task context, we need to check whether it is
3373          * the current task context of this cpu.  If not it has been
3374          * scheduled out before the smp call arrived.  In that case
3375          * event->count would have been updated to a recent sample
3376          * when the event was scheduled out.
3377          */
3378         if (ctx->task && cpuctx->task_ctx != ctx)
3379                 return;
3380
3381         raw_spin_lock(&ctx->lock);
3382         if (ctx->is_active) {
3383                 update_context_time(ctx);
3384                 update_cgrp_time_from_event(event);
3385         }
3386
3387         update_event_times(event);
3388         if (event->state != PERF_EVENT_STATE_ACTIVE)
3389                 goto unlock;
3390
3391         if (!data->group) {
3392                 pmu->read(event);
3393                 data->ret = 0;
3394                 goto unlock;
3395         }
3396
3397         pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3398
3399         pmu->read(event);
3400
3401         list_for_each_entry(sub, &event->sibling_list, group_entry) {
3402                 update_event_times(sub);
3403                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3404                         /*
3405                          * Use sibling's PMU rather than @event's since
3406                          * sibling could be on different (eg: software) PMU.
3407                          */
3408                         sub->pmu->read(sub);
3409                 }
3410         }
3411
3412         data->ret = pmu->commit_txn(pmu);
3413
3414 unlock:
3415         raw_spin_unlock(&ctx->lock);
3416 }
3417
3418 static inline u64 perf_event_count(struct perf_event *event)
3419 {
3420         if (event->pmu->count)
3421                 return event->pmu->count(event);
3422
3423         return __perf_event_count(event);
3424 }
3425
3426 /*
3427  * NMI-safe method to read a local event, that is an event that
3428  * is:
3429  *   - either for the current task, or for this CPU
3430  *   - does not have inherit set, for inherited task events
3431  *     will not be local and we cannot read them atomically
3432  *   - must not have a pmu::count method
3433  */
3434 u64 perf_event_read_local(struct perf_event *event)
3435 {
3436         unsigned long flags;
3437         u64 val;
3438
3439         /*
3440          * Disabling interrupts avoids all counter scheduling (context
3441          * switches, timer based rotation and IPIs).
3442          */
3443         local_irq_save(flags);
3444
3445         /* If this is a per-task event, it must be for current */
3446         WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3447                      event->hw.target != current);
3448
3449         /* If this is a per-CPU event, it must be for this CPU */
3450         WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3451                      event->cpu != smp_processor_id());
3452
3453         /*
3454          * It must not be an event with inherit set, we cannot read
3455          * all child counters from atomic context.
3456          */
3457         WARN_ON_ONCE(event->attr.inherit);
3458
3459         /*
3460          * It must not have a pmu::count method, those are not
3461          * NMI safe.
3462          */
3463         WARN_ON_ONCE(event->pmu->count);
3464
3465         /*
3466          * If the event is currently on this CPU, its either a per-task event,
3467          * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3468          * oncpu == -1).
3469          */
3470         if (event->oncpu == smp_processor_id())
3471                 event->pmu->read(event);
3472
3473         val = local64_read(&event->count);
3474         local_irq_restore(flags);
3475
3476         return val;
3477 }
3478
3479 static int perf_event_read(struct perf_event *event, bool group)
3480 {
3481         int ret = 0;
3482
3483         /*
3484          * If event is enabled and currently active on a CPU, update the
3485          * value in the event structure:
3486          */
3487         if (event->state == PERF_EVENT_STATE_ACTIVE) {
3488                 struct perf_read_data data = {
3489                         .event = event,
3490                         .group = group,
3491                         .ret = 0,
3492                 };
3493                 smp_call_function_single(event->oncpu,
3494                                          __perf_event_read, &data, 1);
3495                 ret = data.ret;
3496         } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3497                 struct perf_event_context *ctx = event->ctx;
3498                 unsigned long flags;
3499
3500                 raw_spin_lock_irqsave(&ctx->lock, flags);
3501                 /*
3502                  * may read while context is not active
3503                  * (e.g., thread is blocked), in that case
3504                  * we cannot update context time
3505                  */
3506                 if (ctx->is_active) {
3507                         update_context_time(ctx);
3508                         update_cgrp_time_from_event(event);
3509                 }
3510                 if (group)
3511                         update_group_times(event);
3512                 else
3513                         update_event_times(event);
3514                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3515         }
3516
3517         return ret;
3518 }
3519
3520 /*
3521  * Initialize the perf_event context in a task_struct:
3522  */
3523 static void __perf_event_init_context(struct perf_event_context *ctx)
3524 {
3525         raw_spin_lock_init(&ctx->lock);
3526         mutex_init(&ctx->mutex);
3527         INIT_LIST_HEAD(&ctx->active_ctx_list);
3528         INIT_LIST_HEAD(&ctx->pinned_groups);
3529         INIT_LIST_HEAD(&ctx->flexible_groups);
3530         INIT_LIST_HEAD(&ctx->event_list);
3531         atomic_set(&ctx->refcount, 1);
3532 }
3533
3534 static struct perf_event_context *
3535 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3536 {
3537         struct perf_event_context *ctx;
3538
3539         ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3540         if (!ctx)
3541                 return NULL;
3542
3543         __perf_event_init_context(ctx);
3544         if (task) {
3545                 ctx->task = task;
3546                 get_task_struct(task);
3547         }
3548         ctx->pmu = pmu;
3549
3550         return ctx;
3551 }
3552
3553 static struct task_struct *
3554 find_lively_task_by_vpid(pid_t vpid)
3555 {
3556         struct task_struct *task;
3557
3558         rcu_read_lock();
3559         if (!vpid)
3560                 task = current;
3561         else
3562                 task = find_task_by_vpid(vpid);
3563         if (task)
3564                 get_task_struct(task);
3565         rcu_read_unlock();
3566
3567         if (!task)
3568                 return ERR_PTR(-ESRCH);
3569
3570         return task;
3571 }
3572
3573 /*
3574  * Returns a matching context with refcount and pincount.
3575  */
3576 static struct perf_event_context *
3577 find_get_context(struct pmu *pmu, struct task_struct *task,
3578                 struct perf_event *event)
3579 {
3580         struct perf_event_context *ctx, *clone_ctx = NULL;
3581         struct perf_cpu_context *cpuctx;
3582         void *task_ctx_data = NULL;
3583         unsigned long flags;
3584         int ctxn, err;
3585         int cpu = event->cpu;
3586
3587         if (!task) {
3588                 /* Must be root to operate on a CPU event: */
3589                 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3590                         return ERR_PTR(-EACCES);
3591
3592                 /*
3593                  * We could be clever and allow to attach a event to an
3594                  * offline CPU and activate it when the CPU comes up, but
3595                  * that's for later.
3596                  */
3597                 if (!cpu_online(cpu))
3598                         return ERR_PTR(-ENODEV);
3599
3600                 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3601                 ctx = &cpuctx->ctx;
3602                 get_ctx(ctx);
3603                 ++ctx->pin_count;
3604
3605                 return ctx;
3606         }
3607
3608         err = -EINVAL;
3609         ctxn = pmu->task_ctx_nr;
3610         if (ctxn < 0)
3611                 goto errout;
3612
3613         if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3614                 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3615                 if (!task_ctx_data) {
3616                         err = -ENOMEM;
3617                         goto errout;
3618                 }
3619         }
3620
3621 retry:
3622         ctx = perf_lock_task_context(task, ctxn, &flags);
3623         if (ctx) {
3624                 clone_ctx = unclone_ctx(ctx);
3625                 ++ctx->pin_count;
3626
3627                 if (task_ctx_data && !ctx->task_ctx_data) {
3628                         ctx->task_ctx_data = task_ctx_data;
3629                         task_ctx_data = NULL;
3630                 }
3631                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3632
3633                 if (clone_ctx)
3634                         put_ctx(clone_ctx);
3635         } else {
3636                 ctx = alloc_perf_context(pmu, task);
3637                 err = -ENOMEM;
3638                 if (!ctx)
3639                         goto errout;
3640
3641                 if (task_ctx_data) {
3642                         ctx->task_ctx_data = task_ctx_data;
3643                         task_ctx_data = NULL;
3644                 }
3645
3646                 err = 0;
3647                 mutex_lock(&task->perf_event_mutex);
3648                 /*
3649                  * If it has already passed perf_event_exit_task().
3650                  * we must see PF_EXITING, it takes this mutex too.
3651                  */
3652                 if (task->flags & PF_EXITING)
3653                         err = -ESRCH;
3654                 else if (task->perf_event_ctxp[ctxn])
3655                         err = -EAGAIN;
3656                 else {
3657                         get_ctx(ctx);
3658                         ++ctx->pin_count;
3659                         rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3660                 }
3661                 mutex_unlock(&task->perf_event_mutex);
3662
3663                 if (unlikely(err)) {
3664                         put_ctx(ctx);
3665
3666                         if (err == -EAGAIN)
3667                                 goto retry;
3668                         goto errout;
3669                 }
3670         }
3671
3672         kfree(task_ctx_data);
3673         return ctx;
3674
3675 errout:
3676         kfree(task_ctx_data);
3677         return ERR_PTR(err);
3678 }
3679
3680 static void perf_event_free_filter(struct perf_event *event);
3681 static void perf_event_free_bpf_prog(struct perf_event *event);
3682
3683 static void free_event_rcu(struct rcu_head *head)
3684 {
3685         struct perf_event *event;
3686
3687         event = container_of(head, struct perf_event, rcu_head);
3688         if (event->ns)
3689                 put_pid_ns(event->ns);
3690         perf_event_free_filter(event);
3691         kfree(event);
3692 }
3693
3694 static void ring_buffer_attach(struct perf_event *event,
3695                                struct ring_buffer *rb);
3696
3697 static void detach_sb_event(struct perf_event *event)
3698 {
3699         struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3700
3701         raw_spin_lock(&pel->lock);
3702         list_del_rcu(&event->sb_list);
3703         raw_spin_unlock(&pel->lock);
3704 }
3705
3706 static bool is_sb_event(struct perf_event *event)
3707 {
3708         struct perf_event_attr *attr = &event->attr;
3709
3710         if (event->parent)
3711                 return false;
3712
3713         if (event->attach_state & PERF_ATTACH_TASK)
3714                 return false;
3715
3716         if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3717             attr->comm || attr->comm_exec ||
3718             attr->task ||
3719             attr->context_switch)
3720                 return true;
3721         return false;
3722 }
3723
3724 static void unaccount_pmu_sb_event(struct perf_event *event)
3725 {
3726         if (is_sb_event(event))
3727                 detach_sb_event(event);
3728 }
3729
3730 static void unaccount_event_cpu(struct perf_event *event, int cpu)
3731 {
3732         if (event->parent)
3733                 return;
3734
3735         if (is_cgroup_event(event))
3736                 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3737 }
3738
3739 #ifdef CONFIG_NO_HZ_FULL
3740 static DEFINE_SPINLOCK(nr_freq_lock);
3741 #endif
3742
3743 static void unaccount_freq_event_nohz(void)
3744 {
3745 #ifdef CONFIG_NO_HZ_FULL
3746         spin_lock(&nr_freq_lock);
3747         if (atomic_dec_and_test(&nr_freq_events))
3748                 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3749         spin_unlock(&nr_freq_lock);
3750 #endif
3751 }
3752
3753 static void unaccount_freq_event(void)
3754 {
3755         if (tick_nohz_full_enabled())
3756                 unaccount_freq_event_nohz();
3757         else
3758                 atomic_dec(&nr_freq_events);
3759 }
3760
3761 static void unaccount_event(struct perf_event *event)
3762 {
3763         bool dec = false;
3764
3765         if (event->parent)
3766                 return;
3767
3768         if (event->attach_state & PERF_ATTACH_TASK)
3769                 dec = true;
3770         if (event->attr.mmap || event->attr.mmap_data)
3771                 atomic_dec(&nr_mmap_events);
3772         if (event->attr.comm)
3773                 atomic_dec(&nr_comm_events);
3774         if (event->attr.task)
3775                 atomic_dec(&nr_task_events);
3776         if (event->attr.freq)
3777                 unaccount_freq_event();
3778         if (event->attr.context_switch) {
3779                 dec = true;
3780                 atomic_dec(&nr_switch_events);
3781         }
3782         if (is_cgroup_event(event))
3783                 dec = true;
3784         if (has_branch_stack(event))
3785                 dec = true;
3786
3787         if (dec) {
3788                 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3789                         schedule_delayed_work(&perf_sched_work, HZ);
3790         }
3791
3792         unaccount_event_cpu(event, event->cpu);
3793
3794         unaccount_pmu_sb_event(event);
3795 }
3796
3797 static void perf_sched_delayed(struct work_struct *work)
3798 {
3799         mutex_lock(&perf_sched_mutex);
3800         if (atomic_dec_and_test(&perf_sched_count))
3801                 static_branch_disable(&perf_sched_events);
3802         mutex_unlock(&perf_sched_mutex);
3803 }
3804
3805 /*
3806  * The following implement mutual exclusion of events on "exclusive" pmus
3807  * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3808  * at a time, so we disallow creating events that might conflict, namely:
3809  *
3810  *  1) cpu-wide events in the presence of per-task events,
3811  *  2) per-task events in the presence of cpu-wide events,
3812  *  3) two matching events on the same context.
3813  *
3814  * The former two cases are handled in the allocation path (perf_event_alloc(),
3815  * _free_event()), the latter -- before the first perf_install_in_context().
3816  */
3817 static int exclusive_event_init(struct perf_event *event)
3818 {
3819         struct pmu *pmu = event->pmu;
3820
3821         if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3822                 return 0;
3823
3824         /*
3825          * Prevent co-existence of per-task and cpu-wide events on the
3826          * same exclusive pmu.
3827          *
3828          * Negative pmu::exclusive_cnt means there are cpu-wide
3829          * events on this "exclusive" pmu, positive means there are
3830          * per-task events.
3831          *
3832          * Since this is called in perf_event_alloc() path, event::ctx
3833          * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3834          * to mean "per-task event", because unlike other attach states it
3835          * never gets cleared.
3836          */
3837         if (event->attach_state & PERF_ATTACH_TASK) {
3838                 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3839                         return -EBUSY;
3840         } else {
3841                 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3842                         return -EBUSY;
3843         }
3844
3845         return 0;
3846 }
3847
3848 static void exclusive_event_destroy(struct perf_event *event)
3849 {
3850         struct pmu *pmu = event->pmu;
3851
3852         if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3853                 return;
3854
3855         /* see comment in exclusive_event_init() */
3856         if (event->attach_state & PERF_ATTACH_TASK)
3857                 atomic_dec(&pmu->exclusive_cnt);
3858         else
3859                 atomic_inc(&pmu->exclusive_cnt);
3860 }
3861
3862 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3863 {
3864         if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3865             (e1->cpu == e2->cpu ||
3866              e1->cpu == -1 ||
3867              e2->cpu == -1))
3868                 return true;
3869         return false;
3870 }
3871
3872 /* Called under the same ctx::mutex as perf_install_in_context() */
3873 static bool exclusive_event_installable(struct perf_event *event,
3874                                         struct perf_event_context *ctx)
3875 {
3876         struct perf_event *iter_event;
3877         struct pmu *pmu = event->pmu;
3878
3879         if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3880                 return true;
3881
3882         list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3883                 if (exclusive_event_match(iter_event, event))
3884                         return false;
3885         }
3886
3887         return true;
3888 }
3889
3890 static void perf_addr_filters_splice(struct perf_event *event,
3891                                        struct list_head *head);
3892
3893 static void _free_event(struct perf_event *event)
3894 {
3895         irq_work_sync(&event->pending);
3896
3897         unaccount_event(event);
3898
3899         if (event->rb) {
3900                 /*
3901                  * Can happen when we close an event with re-directed output.
3902                  *
3903                  * Since we have a 0 refcount, perf_mmap_close() will skip
3904                  * over us; possibly making our ring_buffer_put() the last.
3905                  */
3906                 mutex_lock(&event->mmap_mutex);
3907                 ring_buffer_attach(event, NULL);
3908                 mutex_unlock(&event->mmap_mutex);
3909         }
3910
3911         if (is_cgroup_event(event))
3912                 perf_detach_cgroup(event);
3913
3914         if (!event->parent) {
3915                 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3916                         put_callchain_buffers();
3917         }
3918
3919         perf_event_free_bpf_prog(event);
3920         perf_addr_filters_splice(event, NULL);
3921         kfree(event->addr_filters_offs);
3922
3923         if (event->destroy)
3924                 event->destroy(event);
3925
3926         if (event->ctx)
3927                 put_ctx(event->ctx);
3928
3929         exclusive_event_destroy(event);
3930         module_put(event->pmu->module);
3931
3932         call_rcu(&event->rcu_head, free_event_rcu);
3933 }
3934
3935 /*
3936  * Used to free events which have a known refcount of 1, such as in error paths
3937  * where the event isn't exposed yet and inherited events.
3938  */
3939 static void free_event(struct perf_event *event)
3940 {
3941         if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3942                                 "unexpected event refcount: %ld; ptr=%p\n",
3943                                 atomic_long_read(&event->refcount), event)) {
3944                 /* leak to avoid use-after-free */
3945                 return;
3946         }
3947
3948         _free_event(event);
3949 }
3950
3951 /*
3952  * Remove user event from the owner task.
3953  */
3954 static void perf_remove_from_owner(struct perf_event *event)
3955 {
3956         struct task_struct *owner;
3957
3958         rcu_read_lock();
3959         /*
3960          * Matches the smp_store_release() in perf_event_exit_task(). If we
3961          * observe !owner it means the list deletion is complete and we can
3962          * indeed free this event, otherwise we need to serialize on
3963          * owner->perf_event_mutex.
3964          */
3965         owner = lockless_dereference(event->owner);
3966         if (owner) {
3967                 /*
3968                  * Since delayed_put_task_struct() also drops the last
3969                  * task reference we can safely take a new reference
3970                  * while holding the rcu_read_lock().
3971                  */
3972                 get_task_struct(owner);
3973         }
3974         rcu_read_unlock();
3975
3976         if (owner) {
3977                 /*
3978                  * If we're here through perf_event_exit_task() we're already
3979                  * holding ctx->mutex which would be an inversion wrt. the
3980                  * normal lock order.
3981                  *
3982                  * However we can safely take this lock because its the child
3983                  * ctx->mutex.
3984                  */
3985                 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3986
3987                 /*
3988                  * We have to re-check the event->owner field, if it is cleared
3989                  * we raced with perf_event_exit_task(), acquiring the mutex
3990                  * ensured they're done, and we can proceed with freeing the
3991                  * event.
3992                  */
3993                 if (event->owner) {
3994                         list_del_init(&event->owner_entry);
3995                         smp_store_release(&event->owner, NULL);
3996                 }
3997                 mutex_unlock(&owner->perf_event_mutex);
3998                 put_task_struct(owner);
3999         }
4000 }
4001
4002 static void put_event(struct perf_event *event)
4003 {
4004         if (!atomic_long_dec_and_test(&event->refcount))
4005                 return;
4006
4007         _free_event(event);
4008 }
4009
4010 /*
4011  * Kill an event dead; while event:refcount will preserve the event
4012  * object, it will not preserve its functionality. Once the last 'user'
4013  * gives up the object, we'll destroy the thing.
4014  */
4015 int perf_event_release_kernel(struct perf_event *event)
4016 {
4017         struct perf_event_context *ctx = event->ctx;
4018         struct perf_event *child, *tmp;
4019
4020         /*
4021          * If we got here through err_file: fput(event_file); we will not have
4022          * attached to a context yet.
4023          */
4024         if (!ctx) {
4025                 WARN_ON_ONCE(event->attach_state &
4026                                 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4027                 goto no_ctx;
4028         }
4029
4030         if (!is_kernel_event(event))
4031                 perf_remove_from_owner(event);
4032
4033         ctx = perf_event_ctx_lock(event);
4034         WARN_ON_ONCE(ctx->parent_ctx);
4035         perf_remove_from_context(event, DETACH_GROUP);
4036
4037         raw_spin_lock_irq(&ctx->lock);
4038         /*
4039          * Mark this even as STATE_DEAD, there is no external reference to it
4040          * anymore.
4041          *
4042          * Anybody acquiring event->child_mutex after the below loop _must_
4043          * also see this, most importantly inherit_event() which will avoid
4044          * placing more children on the list.
4045          *
4046          * Thus this guarantees that we will in fact observe and kill _ALL_
4047          * child events.
4048          */
4049         event->state = PERF_EVENT_STATE_DEAD;
4050         raw_spin_unlock_irq(&ctx->lock);
4051
4052         perf_event_ctx_unlock(event, ctx);
4053
4054 again:
4055         mutex_lock(&event->child_mutex);
4056         list_for_each_entry(child, &event->child_list, child_list) {
4057
4058                 /*
4059                  * Cannot change, child events are not migrated, see the
4060                  * comment with perf_event_ctx_lock_nested().
4061                  */
4062                 ctx = lockless_dereference(child->ctx);
4063                 /*
4064                  * Since child_mutex nests inside ctx::mutex, we must jump
4065                  * through hoops. We start by grabbing a reference on the ctx.
4066                  *
4067                  * Since the event cannot get freed while we hold the
4068                  * child_mutex, the context must also exist and have a !0
4069                  * reference count.
4070                  */
4071                 get_ctx(ctx);
4072
4073                 /*
4074                  * Now that we have a ctx ref, we can drop child_mutex, and
4075                  * acquire ctx::mutex without fear of it going away. Then we
4076                  * can re-acquire child_mutex.
4077                  */
4078                 mutex_unlock(&event->child_mutex);
4079                 mutex_lock(&ctx->mutex);
4080                 mutex_lock(&event->child_mutex);
4081
4082                 /*
4083                  * Now that we hold ctx::mutex and child_mutex, revalidate our
4084                  * state, if child is still the first entry, it didn't get freed
4085                  * and we can continue doing so.
4086                  */
4087                 tmp = list_first_entry_or_null(&event->child_list,
4088                                                struct perf_event, child_list);
4089                 if (tmp == child) {
4090                         perf_remove_from_context(child, DETACH_GROUP);
4091                         list_del(&child->child_list);
4092                         free_event(child);
4093                         /*
4094                          * This matches the refcount bump in inherit_event();
4095                          * this can't be the last reference.
4096                          */
4097                         put_event(event);
4098                 }
4099
4100                 mutex_unlock(&event->child_mutex);
4101                 mutex_unlock(&ctx->mutex);
4102                 put_ctx(ctx);
4103                 goto again;
4104         }
4105         mutex_unlock(&event->child_mutex);
4106
4107 no_ctx:
4108         put_event(event); /* Must be the 'last' reference */
4109         return 0;
4110 }
4111 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4112
4113 /*
4114  * Called when the last reference to the file is gone.
4115  */
4116 static int perf_release(struct inode *inode, struct file *file)
4117 {
4118         perf_event_release_kernel(file->private_data);
4119         return 0;
4120 }
4121
4122 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
4123 {
4124         struct perf_event *child;
4125         u64 total = 0;
4126
4127         *enabled = 0;
4128         *running = 0;
4129
4130         mutex_lock(&event->child_mutex);
4131
4132         (void)perf_event_read(event, false);
4133         total += perf_event_count(event);
4134
4135         *enabled += event->total_time_enabled +
4136                         atomic64_read(&event->child_total_time_enabled);
4137         *running += event->total_time_running +
4138                         atomic64_read(&event->child_total_time_running);
4139
4140         list_for_each_entry(child, &event->child_list, child_list) {
4141                 (void)perf_event_read(child, false);
4142                 total += perf_event_count(child);
4143                 *enabled += child->total_time_enabled;
4144                 *running += child->total_time_running;
4145         }
4146         mutex_unlock(&event->child_mutex);
4147
4148         return total;
4149 }
4150 EXPORT_SYMBOL_GPL(perf_event_read_value);
4151
4152 static int __perf_read_group_add(struct perf_event *leader,
4153                                         u64 read_format, u64 *values)
4154 {
4155         struct perf_event *sub;
4156         int n = 1; /* skip @nr */
4157         int ret;
4158
4159         ret = perf_event_read(leader, true);
4160         if (ret)
4161                 return ret;
4162
4163         /*
4164          * Since we co-schedule groups, {enabled,running} times of siblings
4165          * will be identical to those of the leader, so we only publish one
4166          * set.
4167          */
4168         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4169                 values[n++] += leader->total_time_enabled +
4170                         atomic64_read(&leader->child_total_time_enabled);
4171         }
4172
4173         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4174                 values[n++] += leader->total_time_running +
4175                         atomic64_read(&leader->child_total_time_running);
4176         }
4177
4178         /*
4179          * Write {count,id} tuples for every sibling.
4180          */
4181         values[n++] += perf_event_count(leader);
4182         if (read_format & PERF_FORMAT_ID)
4183                 values[n++] = primary_event_id(leader);
4184
4185         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4186                 values[n++] += perf_event_count(sub);
4187                 if (read_format & PERF_FORMAT_ID)
4188                         values[n++] = primary_event_id(sub);
4189         }
4190
4191         return 0;
4192 }
4193
4194 static int perf_read_group(struct perf_event *event,
4195                                    u64 read_format, char __user *buf)
4196 {
4197         struct perf_event *leader = event->group_leader, *child;
4198         struct perf_event_context *ctx = leader->ctx;
4199         int ret;
4200         u64 *values;
4201
4202         lockdep_assert_held(&ctx->mutex);
4203
4204         values = kzalloc(event->read_size, GFP_KERNEL);
4205         if (!values)
4206                 return -ENOMEM;
4207
4208         values[0] = 1 + leader->nr_siblings;
4209
4210         /*
4211          * By locking the child_mutex of the leader we effectively
4212          * lock the child list of all siblings.. XXX explain how.
4213          */
4214         mutex_lock(&leader->child_mutex);
4215
4216         ret = __perf_read_group_add(leader, read_format, values);
4217         if (ret)
4218                 goto unlock;
4219
4220         list_for_each_entry(child, &leader->child_list, child_list) {
4221                 ret = __perf_read_group_add(child, read_format, values);
4222                 if (ret)
4223                         goto unlock;
4224         }
4225
4226         mutex_unlock(&leader->child_mutex);
4227
4228         ret = event->read_size;
4229         if (copy_to_user(buf, values, event->read_size))
4230                 ret = -EFAULT;
4231         goto out;
4232
4233 unlock:
4234         mutex_unlock(&leader->child_mutex);
4235 out:
4236         kfree(values);
4237         return ret;
4238 }
4239
4240 static int perf_read_one(struct perf_event *event,
4241                                  u64 read_format, char __user *buf)
4242 {
4243         u64 enabled, running;
4244         u64 values[4];
4245         int n = 0;
4246
4247         values[n++] = perf_event_read_value(event, &enabled, &running);
4248         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4249                 values[n++] = enabled;
4250         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4251                 values[n++] = running;
4252         if (read_format & PERF_FORMAT_ID)
4253                 values[n++] = primary_event_id(event);
4254
4255         if (copy_to_user(buf, values, n * sizeof(u64)))
4256                 return -EFAULT;
4257
4258         return n * sizeof(u64);
4259 }
4260
4261 static bool is_event_hup(struct perf_event *event)
4262 {
4263         bool no_children;
4264
4265         if (event->state > PERF_EVENT_STATE_EXIT)
4266                 return false;
4267
4268         mutex_lock(&event->child_mutex);
4269         no_children = list_empty(&event->child_list);
4270         mutex_unlock(&event->child_mutex);
4271         return no_children;
4272 }
4273
4274 /*
4275  * Read the performance event - simple non blocking version for now
4276  */
4277 static ssize_t
4278 __perf_read(struct perf_event *event, char __user *buf, size_t count)
4279 {
4280         u64 read_format = event->attr.read_format;
4281         int ret;
4282
4283         /*
4284          * Return end-of-file for a read on a event that is in
4285          * error state (i.e. because it was pinned but it couldn't be
4286          * scheduled on to the CPU at some point).
4287          */
4288         if (event->state == PERF_EVENT_STATE_ERROR)
4289                 return 0;
4290
4291         if (count < event->read_size)
4292                 return -ENOSPC;
4293
4294         WARN_ON_ONCE(event->ctx->parent_ctx);
4295         if (read_format & PERF_FORMAT_GROUP)
4296                 ret = perf_read_group(event, read_format, buf);
4297         else
4298                 ret = perf_read_one(event, read_format, buf);
4299
4300         return ret;
4301 }
4302
4303 static ssize_t
4304 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4305 {
4306         struct perf_event *event = file->private_data;
4307         struct perf_event_context *ctx;
4308         int ret;
4309
4310         ctx = perf_event_ctx_lock(event);
4311         ret = __perf_read(event, buf, count);
4312         perf_event_ctx_unlock(event, ctx);
4313
4314         return ret;
4315 }
4316
4317 static unsigned int perf_poll(struct file *file, poll_table *wait)
4318 {
4319         struct perf_event *event = file->private_data;
4320         struct ring_buffer *rb;
4321         unsigned int events = POLLHUP;
4322
4323         poll_wait(file, &event->waitq, wait);
4324
4325         if (is_event_hup(event))
4326                 return events;
4327
4328         /*
4329          * Pin the event->rb by taking event->mmap_mutex; otherwise
4330          * perf_event_set_output() can swizzle our rb and make us miss wakeups.
4331          */
4332         mutex_lock(&event->mmap_mutex);
4333         rb = event->rb;
4334         if (rb)
4335                 events = atomic_xchg(&rb->poll, 0);
4336         mutex_unlock(&event->mmap_mutex);
4337         return events;
4338 }
4339
4340 static void _perf_event_reset(struct perf_event *event)
4341 {
4342         (void)perf_event_read(event, false);
4343         local64_set(&event->count, 0);
4344         perf_event_update_userpage(event);
4345 }
4346
4347 /*
4348  * Holding the top-level event's child_mutex means that any
4349  * descendant process that has inherited this event will block
4350  * in perf_event_exit_event() if it goes to exit, thus satisfying the
4351  * task existence requirements of perf_event_enable/disable.
4352  */
4353 static void perf_event_for_each_child(struct perf_event *event,
4354                                         void (*func)(struct perf_event *))
4355 {
4356         struct perf_event *child;
4357
4358         WARN_ON_ONCE(event->ctx->parent_ctx);
4359
4360         mutex_lock(&event->child_mutex);
4361         func(event);
4362         list_for_each_entry(child, &event->child_list, child_list)
4363                 func(child);
4364         mutex_unlock(&event->child_mutex);
4365 }
4366
4367 static void perf_event_for_each(struct perf_event *event,
4368                                   void (*func)(struct perf_event *))
4369 {
4370         struct perf_event_context *ctx = event->ctx;
4371         struct perf_event *sibling;
4372
4373         lockdep_assert_held(&ctx->mutex);
4374
4375         event = event->group_leader;
4376
4377         perf_event_for_each_child(event, func);
4378         list_for_each_entry(sibling, &event->sibling_list, group_entry)
4379                 perf_event_for_each_child(sibling, func);
4380 }
4381
4382 static void __perf_event_period(struct perf_event *event,
4383                                 struct perf_cpu_context *cpuctx,
4384                                 struct perf_event_context *ctx,
4385                                 void *info)
4386 {
4387         u64 value = *((u64 *)info);
4388         bool active;
4389
4390         if (event->attr.freq) {
4391                 event->attr.sample_freq = value;
4392         } else {
4393                 event->attr.sample_period = value;
4394                 event->hw.sample_period = value;
4395         }
4396
4397         active = (event->state == PERF_EVENT_STATE_ACTIVE);
4398         if (active) {
4399                 perf_pmu_disable(ctx->pmu);
4400                 /*
4401                  * We could be throttled; unthrottle now to avoid the tick
4402                  * trying to unthrottle while we already re-started the event.
4403                  */
4404                 if (event->hw.interrupts == MAX_INTERRUPTS) {
4405                         event->hw.interrupts = 0;
4406                         perf_log_throttle(event, 1);
4407                 }
4408                 event->pmu->stop(event, PERF_EF_UPDATE);
4409         }
4410
4411         local64_set(&event->hw.period_left, 0);
4412
4413         if (active) {
4414                 event->pmu->start(event, PERF_EF_RELOAD);
4415                 perf_pmu_enable(ctx->pmu);
4416         }
4417 }
4418
4419 static int perf_event_period(struct perf_event *event, u64 __user *arg)
4420 {
4421         u64 value;
4422
4423         if (!is_sampling_event(event))
4424                 return -EINVAL;
4425
4426         if (copy_from_user(&value, arg, sizeof(value)))
4427                 return -EFAULT;
4428
4429         if (!value)
4430                 return -EINVAL;
4431
4432         if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4433                 return -EINVAL;
4434
4435         event_function_call(event, __perf_event_period, &value);
4436
4437         return 0;
4438 }
4439
4440 static const struct file_operations perf_fops;
4441
4442 static inline int perf_fget_light(int fd, struct fd *p)
4443 {
4444         struct fd f = fdget(fd);
4445         if (!f.file)
4446                 return -EBADF;
4447
4448         if (f.file->f_op != &perf_fops) {
4449                 fdput(f);
4450                 return -EBADF;
4451         }
4452         *p = f;
4453         return 0;
4454 }
4455
4456 static int perf_event_set_output(struct perf_event *event,
4457                                  struct perf_event *output_event);
4458 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4459 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4460
4461 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
4462 {
4463         void (*func)(struct perf_event *);
4464         u32 flags = arg;
4465
4466         switch (cmd) {
4467         case PERF_EVENT_IOC_ENABLE:
4468                 func = _perf_event_enable;
4469                 break;
4470         case PERF_EVENT_IOC_DISABLE:
4471                 func = _perf_event_disable;
4472                 break;
4473         case PERF_EVENT_IOC_RESET:
4474                 func = _perf_event_reset;
4475                 break;
4476
4477         case PERF_EVENT_IOC_REFRESH:
4478                 return _perf_event_refresh(event, arg);
4479
4480         case PERF_EVENT_IOC_PERIOD:
4481                 return perf_event_period(event, (u64 __user *)arg);
4482
4483         case PERF_EVENT_IOC_ID:
4484         {
4485                 u64 id = primary_event_id(event);
4486
4487                 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4488                         return -EFAULT;
4489                 return 0;
4490         }
4491
4492         case PERF_EVENT_IOC_SET_OUTPUT:
4493         {
4494                 int ret;
4495                 if (arg != -1) {
4496                         struct perf_event *output_event;
4497                         struct fd output;
4498                         ret = perf_fget_light(arg, &output);
4499                         if (ret)
4500                                 return ret;
4501                         output_event = output.file->private_data;
4502                         ret = perf_event_set_output(event, output_event);
4503                         fdput(output);
4504                 } else {
4505                         ret = perf_event_set_output(event, NULL);
4506                 }
4507                 return ret;
4508         }
4509
4510         case PERF_EVENT_IOC_SET_FILTER:
4511                 return perf_event_set_filter(event, (void __user *)arg);
4512
4513         case PERF_EVENT_IOC_SET_BPF:
4514                 return perf_event_set_bpf_prog(event, arg);
4515
4516         case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4517                 struct ring_buffer *rb;
4518
4519                 rcu_read_lock();
4520                 rb = rcu_dereference(event->rb);
4521                 if (!rb || !rb->nr_pages) {
4522                         rcu_read_unlock();
4523                         return -EINVAL;
4524                 }
4525                 rb_toggle_paused(rb, !!arg);
4526                 rcu_read_unlock();
4527                 return 0;
4528         }
4529         default:
4530                 return -ENOTTY;
4531         }
4532
4533         if (flags & PERF_IOC_FLAG_GROUP)
4534                 perf_event_for_each(event, func);
4535         else
4536                 perf_event_for_each_child(event, func);
4537
4538         return 0;
4539 }
4540
4541 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4542 {
4543         struct perf_event *event = file->private_data;
4544         struct perf_event_context *ctx;
4545         long ret;
4546
4547         ctx = perf_event_ctx_lock(event);
4548         ret = _perf_ioctl(event, cmd, arg);
4549         perf_event_ctx_unlock(event, ctx);
4550
4551         return ret;
4552 }
4553
4554 #ifdef CONFIG_COMPAT
4555 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4556                                 unsigned long arg)
4557 {
4558         switch (_IOC_NR(cmd)) {
4559         case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4560         case _IOC_NR(PERF_EVENT_IOC_ID):
4561                 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4562                 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4563                         cmd &= ~IOCSIZE_MASK;
4564                         cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4565                 }
4566                 break;
4567         }
4568         return perf_ioctl(file, cmd, arg);
4569 }
4570 #else
4571 # define perf_compat_ioctl NULL
4572 #endif
4573
4574 int perf_event_task_enable(void)
4575 {
4576         struct perf_event_context *ctx;
4577         struct perf_event *event;
4578
4579         mutex_lock(&current->perf_event_mutex);
4580         list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4581                 ctx = perf_event_ctx_lock(event);
4582                 perf_event_for_each_child(event, _perf_event_enable);
4583                 perf_event_ctx_unlock(event, ctx);
4584         }
4585         mutex_unlock(&current->perf_event_mutex);
4586
4587         return 0;
4588 }
4589
4590 int perf_event_task_disable(void)
4591 {
4592         struct perf_event_context *ctx;
4593         struct perf_event *event;
4594
4595         mutex_lock(&current->perf_event_mutex);
4596         list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4597                 ctx = perf_event_ctx_lock(event);
4598                 perf_event_for_each_child(event, _perf_event_disable);
4599                 perf_event_ctx_unlock(event, ctx);
4600         }
4601         mutex_unlock(&current->perf_event_mutex);
4602
4603         return 0;
4604 }
4605
4606 static int perf_event_index(struct perf_event *event)
4607 {
4608         if (event->hw.state & PERF_HES_STOPPED)
4609                 return 0;
4610
4611         if (event->state != PERF_EVENT_STATE_ACTIVE)
4612                 return 0;
4613
4614         return event->pmu->event_idx(event);
4615 }
4616
4617 static void calc_timer_values(struct perf_event *event,
4618                                 u64 *now,
4619                                 u64 *enabled,
4620                                 u64 *running)
4621 {
4622         u64 ctx_time;
4623
4624         *now = perf_clock();
4625         ctx_time = event->shadow_ctx_time + *now;
4626         *enabled = ctx_time - event->tstamp_enabled;
4627         *running = ctx_time - event->tstamp_running;
4628 }
4629
4630 static void perf_event_init_userpage(struct perf_event *event)
4631 {
4632         struct perf_event_mmap_page *userpg;
4633         struct ring_buffer *rb;
4634
4635         rcu_read_lock();
4636         rb = rcu_dereference(event->rb);
4637         if (!rb)
4638                 goto unlock;
4639
4640         userpg = rb->user_page;
4641
4642         /* Allow new userspace to detect that bit 0 is deprecated */
4643         userpg->cap_bit0_is_deprecated = 1;
4644         userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
4645         userpg->data_offset = PAGE_SIZE;
4646         userpg->data_size = perf_data_size(rb);
4647
4648 unlock:
4649         rcu_read_unlock();
4650 }
4651
4652 void __weak arch_perf_update_userpage(
4653         struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4654 {
4655 }
4656
4657 /*
4658  * Callers need to ensure there can be no nesting of this function, otherwise
4659  * the seqlock logic goes bad. We can not serialize this because the arch
4660  * code calls this from NMI context.
4661  */
4662 void perf_event_update_userpage(struct perf_event *event)
4663 {
4664         struct perf_event_mmap_page *userpg;
4665         struct ring_buffer *rb;
4666         u64 enabled, running, now;
4667
4668         rcu_read_lock();
4669         rb = rcu_dereference(event->rb);
4670         if (!rb)
4671                 goto unlock;
4672
4673         /*
4674          * compute total_time_enabled, total_time_running
4675          * based on snapshot values taken when the event
4676          * was last scheduled in.
4677          *
4678          * we cannot simply called update_context_time()
4679          * because of locking issue as we can be called in
4680          * NMI context
4681          */
4682         calc_timer_values(event, &now, &enabled, &running);
4683
4684         userpg = rb->user_page;
4685         /*
4686          * Disable preemption so as to not let the corresponding user-space
4687          * spin too long if we get preempted.
4688          */
4689         preempt_disable();
4690         ++userpg->lock;
4691         barrier();
4692         userpg->index = perf_event_index(event);
4693         userpg->offset = perf_event_count(event);
4694         if (userpg->index)
4695                 userpg->offset -= local64_read(&event->hw.prev_count);
4696
4697         userpg->time_enabled = enabled +
4698                         atomic64_read(&event->child_total_time_enabled);
4699
4700         userpg->time_running = running +
4701                         atomic64_read(&event->child_total_time_running);
4702
4703         arch_perf_update_userpage(event, userpg, now);
4704
4705         barrier();
4706         ++userpg->lock;
4707         preempt_enable();
4708 unlock:
4709         rcu_read_unlock();
4710 }
4711
4712 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4713 {
4714         struct perf_event *event = vma->vm_file->private_data;
4715         struct ring_buffer *rb;
4716         int ret = VM_FAULT_SIGBUS;
4717
4718         if (vmf->flags & FAULT_FLAG_MKWRITE) {
4719                 if (vmf->pgoff == 0)
4720                         ret = 0;
4721                 return ret;
4722         }
4723
4724         rcu_read_lock();
4725         rb = rcu_dereference(event->rb);
4726         if (!rb)
4727                 goto unlock;
4728
4729         if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4730                 goto unlock;
4731
4732         vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
4733         if (!vmf->page)
4734                 goto unlock;
4735
4736         get_page(vmf->page);
4737         vmf->page->mapping = vma->vm_file->f_mapping;
4738         vmf->page->index   = vmf->pgoff;
4739
4740         ret = 0;
4741 unlock:
4742         rcu_read_unlock();
4743
4744         return ret;
4745 }
4746
4747 static void ring_buffer_attach(struct perf_event *event,
4748                                struct ring_buffer *rb)
4749 {
4750         struct ring_buffer *old_rb = NULL;
4751         unsigned long flags;
4752
4753         if (event->rb) {
4754                 /*
4755                  * Should be impossible, we set this when removing
4756                  * event->rb_entry and wait/clear when adding event->rb_entry.
4757                  */
4758                 WARN_ON_ONCE(event->rcu_pending);
4759
4760                 old_rb = event->rb;
4761                 spin_lock_irqsave(&old_rb->event_lock, flags);
4762                 list_del_rcu(&event->rb_entry);
4763                 spin_unlock_irqrestore(&old_rb->event_lock, flags);
4764
4765                 event->rcu_batches = get_state_synchronize_rcu();
4766                 event->rcu_pending = 1;
4767         }
4768
4769         if (rb) {
4770                 if (event->rcu_pending) {
4771                         cond_synchronize_rcu(event->rcu_batches);
4772                         event->rcu_pending = 0;
4773                 }
4774
4775                 spin_lock_irqsave(&rb->event_lock, flags);
4776                 list_add_rcu(&event->rb_entry, &rb->event_list);
4777                 spin_unlock_irqrestore(&rb->event_lock, flags);
4778         }
4779
4780         rcu_assign_pointer(event->rb, rb);
4781
4782         if (old_rb) {
4783                 ring_buffer_put(old_rb);
4784                 /*
4785                  * Since we detached before setting the new rb, so that we
4786                  * could attach the new rb, we could have missed a wakeup.
4787                  * Provide it now.
4788                  */
4789                 wake_up_all(&event->waitq);
4790         }
4791 }
4792
4793 static void ring_buffer_wakeup(struct perf_event *event)
4794 {
4795         struct ring_buffer *rb;
4796
4797         rcu_read_lock();
4798         rb = rcu_dereference(event->rb);
4799         if (rb) {
4800                 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4801                         wake_up_all(&event->waitq);
4802         }
4803         rcu_read_unlock();
4804 }
4805
4806 struct ring_buffer *ring_buffer_get(struct perf_event *event)
4807 {
4808         struct ring_buffer *rb;
4809
4810         rcu_read_lock();
4811         rb = rcu_dereference(event->rb);
4812         if (rb) {
4813                 if (!atomic_inc_not_zero(&rb->refcount))
4814                         rb = NULL;
4815         }
4816         rcu_read_unlock();
4817
4818         return rb;
4819 }
4820
4821 void ring_buffer_put(struct ring_buffer *rb)
4822 {
4823         if (!atomic_dec_and_test(&rb->refcount))
4824                 return;
4825
4826         WARN_ON_ONCE(!list_empty(&rb->event_list));
4827
4828         call_rcu(&rb->rcu_head, rb_free_rcu);
4829 }
4830
4831 static void perf_mmap_open(struct vm_area_struct *vma)
4832 {
4833         struct perf_event *event = vma->vm_file->private_data;
4834
4835         atomic_inc(&event->mmap_count);
4836         atomic_inc(&event->rb->mmap_count);
4837
4838         if (vma->vm_pgoff)
4839                 atomic_inc(&event->rb->aux_mmap_count);
4840
4841         if (event->pmu->event_mapped)
4842                 event->pmu->event_mapped(event);
4843 }
4844
4845 static void perf_pmu_output_stop(struct perf_event *event);
4846
4847 /*
4848  * A buffer can be mmap()ed multiple times; either directly through the same
4849  * event, or through other events by use of perf_event_set_output().
4850  *
4851  * In order to undo the VM accounting done by perf_mmap() we need to destroy
4852  * the buffer here, where we still have a VM context. This means we need
4853  * to detach all events redirecting to us.
4854  */
4855 static void perf_mmap_close(struct vm_area_struct *vma)
4856 {
4857         struct perf_event *event = vma->vm_file->private_data;
4858
4859         struct ring_buffer *rb = ring_buffer_get(event);
4860         struct user_struct *mmap_user = rb->mmap_user;
4861         int mmap_locked = rb->mmap_locked;
4862         unsigned long size = perf_data_size(rb);
4863
4864         if (event->pmu->event_unmapped)
4865                 event->pmu->event_unmapped(event);
4866
4867         /*
4868          * rb->aux_mmap_count will always drop before rb->mmap_count and
4869          * event->mmap_count, so it is ok to use event->mmap_mutex to
4870          * serialize with perf_mmap here.
4871          */
4872         if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4873             atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
4874                 /*
4875                  * Stop all AUX events that are writing to this buffer,
4876                  * so that we can free its AUX pages and corresponding PMU
4877                  * data. Note that after rb::aux_mmap_count dropped to zero,
4878                  * they won't start any more (see perf_aux_output_begin()).
4879                  */
4880                 perf_pmu_output_stop(event);
4881
4882                 /* now it's safe to free the pages */
4883                 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4884                 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4885
4886                 /* this has to be the last one */
4887                 rb_free_aux(rb);
4888                 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
4889
4890                 mutex_unlock(&event->mmap_mutex);
4891         }
4892
4893         atomic_dec(&rb->mmap_count);
4894
4895         if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
4896                 goto out_put;
4897
4898         ring_buffer_attach(event, NULL);
4899         mutex_unlock(&event->mmap_mutex);
4900
4901         /* If there's still other mmap()s of this buffer, we're done. */
4902         if (atomic_read(&rb->mmap_count))
4903                 goto out_put;
4904
4905         /*
4906          * No other mmap()s, detach from all other events that might redirect
4907          * into the now unreachable buffer. Somewhat complicated by the
4908          * fact that rb::event_lock otherwise nests inside mmap_mutex.
4909          */
4910 again:
4911         rcu_read_lock();
4912         list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4913                 if (!atomic_long_inc_not_zero(&event->refcount)) {
4914                         /*
4915                          * This event is en-route to free_event() which will
4916                          * detach it and remove it from the list.
4917                          */
4918                         continue;
4919                 }
4920                 rcu_read_unlock();
4921
4922                 mutex_lock(&event->mmap_mutex);
4923                 /*
4924                  * Check we didn't race with perf_event_set_output() which can
4925                  * swizzle the rb from under us while we were waiting to
4926                  * acquire mmap_mutex.
4927                  *
4928                  * If we find a different rb; ignore this event, a next
4929                  * iteration will no longer find it on the list. We have to
4930                  * still restart the iteration to make sure we're not now
4931                  * iterating the wrong list.
4932                  */
4933                 if (event->rb == rb)
4934                         ring_buffer_attach(event, NULL);
4935
4936                 mutex_unlock(&event->mmap_mutex);
4937                 put_event(event);
4938
4939                 /*
4940                  * Restart the iteration; either we're on the wrong list or
4941                  * destroyed its integrity by doing a deletion.
4942                  */
4943                 goto again;
4944         }
4945         rcu_read_unlock();
4946
4947         /*
4948          * It could be there's still a few 0-ref events on the list; they'll
4949          * get cleaned up by free_event() -- they'll also still have their
4950          * ref on the rb and will free it whenever they are done with it.
4951          *
4952          * Aside from that, this buffer is 'fully' detached and unmapped,
4953          * undo the VM accounting.
4954          */
4955
4956         atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4957         vma->vm_mm->pinned_vm -= mmap_locked;
4958         free_uid(mmap_user);
4959
4960 out_put:
4961         ring_buffer_put(rb); /* could be last */
4962 }
4963
4964 static const struct vm_operations_struct perf_mmap_vmops = {
4965         .open           = perf_mmap_open,
4966         .close          = perf_mmap_close, /* non mergable */
4967         .fault          = perf_mmap_fault,
4968         .page_mkwrite   = perf_mmap_fault,
4969 };
4970
4971 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4972 {
4973         struct perf_event *event = file->private_data;
4974         unsigned long user_locked, user_lock_limit;
4975         struct user_struct *user = current_user();
4976         unsigned long locked, lock_limit;
4977         struct ring_buffer *rb = NULL;
4978         unsigned long vma_size;
4979         unsigned long nr_pages;
4980         long user_extra = 0, extra = 0;
4981         int ret = 0, flags = 0;
4982
4983         /*
4984          * Don't allow mmap() of inherited per-task counters. This would
4985          * create a performance issue due to all children writing to the
4986          * same rb.
4987          */
4988         if (event->cpu == -1 && event->attr.inherit)
4989                 return -EINVAL;
4990
4991         if (!(vma->vm_flags & VM_SHARED))
4992                 return -EINVAL;
4993
4994         vma_size = vma->vm_end - vma->vm_start;
4995
4996         if (vma->vm_pgoff == 0) {
4997                 nr_pages = (vma_size / PAGE_SIZE) - 1;
4998         } else {
4999                 /*
5000                  * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5001                  * mapped, all subsequent mappings should have the same size
5002                  * and offset. Must be above the normal perf buffer.
5003                  */
5004                 u64 aux_offset, aux_size;
5005
5006                 if (!event->rb)
5007                         return -EINVAL;
5008
5009                 nr_pages = vma_size / PAGE_SIZE;
5010
5011                 mutex_lock(&event->mmap_mutex);
5012                 ret = -EINVAL;
5013
5014                 rb = event->rb;
5015                 if (!rb)
5016                         goto aux_unlock;
5017
5018                 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5019                 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5020
5021                 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5022                         goto aux_unlock;
5023
5024                 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5025                         goto aux_unlock;
5026
5027                 /* already mapped with a different offset */
5028                 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5029                         goto aux_unlock;
5030
5031                 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5032                         goto aux_unlock;
5033
5034                 /* already mapped with a different size */
5035                 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5036                         goto aux_unlock;
5037
5038                 if (!is_power_of_2(nr_pages))
5039                         goto aux_unlock;
5040
5041                 if (!atomic_inc_not_zero(&rb->mmap_count))
5042                         goto aux_unlock;
5043
5044                 if (rb_has_aux(rb)) {
5045                         atomic_inc(&rb->aux_mmap_count);
5046                         ret = 0;
5047                         goto unlock;
5048                 }
5049
5050                 atomic_set(&rb->aux_mmap_count, 1);
5051                 user_extra = nr_pages;
5052
5053                 goto accounting;
5054         }
5055
5056         /*
5057          * If we have rb pages ensure they're a power-of-two number, so we
5058          * can do bitmasks instead of modulo.
5059          */
5060         if (nr_pages != 0 && !is_power_of_2(nr_pages))
5061                 return -EINVAL;
5062
5063         if (vma_size != PAGE_SIZE * (1 + nr_pages))
5064                 return -EINVAL;
5065
5066         WARN_ON_ONCE(event->ctx->parent_ctx);
5067 again:
5068         mutex_lock(&event->mmap_mutex);
5069         if (event->rb) {
5070                 if (event->rb->nr_pages != nr_pages) {
5071                         ret = -EINVAL;
5072                         goto unlock;
5073                 }
5074
5075                 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5076                         /*
5077                          * Raced against perf_mmap_close() through
5078                          * perf_event_set_output(). Try again, hope for better
5079                          * luck.
5080                          */
5081                         mutex_unlock(&event->mmap_mutex);
5082                         goto again;
5083                 }
5084
5085                 goto unlock;
5086         }
5087
5088         user_extra = nr_pages + 1;
5089
5090 accounting:
5091         user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
5092
5093         /*
5094          * Increase the limit linearly with more CPUs:
5095          */
5096         user_lock_limit *= num_online_cpus();
5097
5098         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
5099
5100         if (user_locked > user_lock_limit)
5101                 extra = user_locked - user_lock_limit;
5102
5103         lock_limit = rlimit(RLIMIT_MEMLOCK);
5104         lock_limit >>= PAGE_SHIFT;
5105         locked = vma->vm_mm->pinned_vm + extra;
5106
5107         if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5108                 !capable(CAP_IPC_LOCK)) {
5109                 ret = -EPERM;
5110                 goto unlock;
5111         }
5112
5113         WARN_ON(!rb && event->rb);
5114
5115         if (vma->vm_flags & VM_WRITE)
5116                 flags |= RING_BUFFER_WRITABLE;
5117
5118         if (!rb) {
5119                 rb = rb_alloc(nr_pages,
5120                               event->attr.watermark ? event->attr.wakeup_watermark : 0,
5121                               event->cpu, flags);
5122
5123                 if (!rb) {
5124                         ret = -ENOMEM;
5125                         goto unlock;
5126                 }
5127
5128                 atomic_set(&rb->mmap_count, 1);
5129                 rb->mmap_user = get_current_user();
5130                 rb->mmap_locked = extra;
5131
5132                 ring_buffer_attach(event, rb);
5133
5134                 perf_event_init_userpage(event);
5135                 perf_event_update_userpage(event);
5136         } else {
5137                 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5138                                    event->attr.aux_watermark, flags);
5139                 if (!ret)
5140                         rb->aux_mmap_locked = extra;
5141         }
5142
5143 unlock:
5144         if (!ret) {
5145                 atomic_long_add(user_extra, &user->locked_vm);
5146                 vma->vm_mm->pinned_vm += extra;
5147
5148                 atomic_inc(&event->mmap_count);
5149         } else if (rb) {
5150                 atomic_dec(&rb->mmap_count);
5151         }
5152 aux_unlock:
5153         mutex_unlock(&event->mmap_mutex);
5154
5155         /*
5156          * Since pinned accounting is per vm we cannot allow fork() to copy our
5157          * vma.
5158          */
5159         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
5160         vma->vm_ops = &perf_mmap_vmops;
5161
5162         if (event->pmu->event_mapped)
5163                 event->pmu->event_mapped(event);
5164
5165         return ret;
5166 }
5167
5168 static int perf_fasync(int fd, struct file *filp, int on)
5169 {
5170         struct inode *inode = file_inode(filp);
5171         struct perf_event *event = filp->private_data;
5172         int retval;
5173
5174         inode_lock(inode);
5175         retval = fasync_helper(fd, filp, on, &event->fasync);
5176         inode_unlock(inode);
5177
5178         if (retval < 0)
5179                 return retval;
5180
5181         return 0;
5182 }
5183
5184 static const struct file_operations perf_fops = {
5185         .llseek                 = no_llseek,
5186         .release                = perf_release,
5187         .read                   = perf_read,
5188         .poll                   = perf_poll,
5189         .unlocked_ioctl         = perf_ioctl,
5190         .compat_ioctl           = perf_compat_ioctl,
5191         .mmap                   = perf_mmap,
5192         .fasync                 = perf_fasync,
5193 };
5194
5195 /*
5196  * Perf event wakeup
5197  *
5198  * If there's data, ensure we set the poll() state and publish everything
5199  * to user-space before waking everybody up.
5200  */
5201
5202 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5203 {
5204         /* only the parent has fasync state */
5205         if (event->parent)
5206                 event = event->parent;
5207         return &event->fasync;
5208 }
5209
5210 void perf_event_wakeup(struct perf_event *event)
5211 {
5212         ring_buffer_wakeup(event);
5213
5214         if (event->pending_kill) {
5215                 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
5216                 event->pending_kill = 0;
5217         }
5218 }
5219
5220 static void perf_pending_event(struct irq_work *entry)
5221 {
5222         struct perf_event *event = container_of(entry,
5223                         struct perf_event, pending);
5224         int rctx;
5225
5226         rctx = perf_swevent_get_recursion_context();
5227         /*
5228          * If we 'fail' here, that's OK, it means recursion is already disabled
5229          * and we won't recurse 'further'.
5230          */
5231
5232         if (event->pending_disable) {
5233                 event->pending_disable = 0;
5234                 perf_event_disable_local(event);
5235         }
5236
5237         if (event->pending_wakeup) {
5238                 event->pending_wakeup = 0;
5239                 perf_event_wakeup(event);
5240         }
5241
5242         if (rctx >= 0)
5243                 perf_swevent_put_recursion_context(rctx);
5244 }
5245
5246 /*
5247  * We assume there is only KVM supporting the callbacks.
5248  * Later on, we might change it to a list if there is
5249  * another virtualization implementation supporting the callbacks.
5250  */
5251 struct perf_guest_info_callbacks *perf_guest_cbs;
5252
5253 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5254 {
5255         perf_guest_cbs = cbs;
5256         return 0;
5257 }
5258 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5259
5260 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5261 {
5262         perf_guest_cbs = NULL;
5263         return 0;
5264 }
5265 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5266
5267 static void
5268 perf_output_sample_regs(struct perf_output_handle *handle,
5269                         struct pt_regs *regs, u64 mask)
5270 {
5271         int bit;
5272
5273         for_each_set_bit(bit, (const unsigned long *) &mask,
5274                          sizeof(mask) * BITS_PER_BYTE) {
5275                 u64 val;
5276
5277                 val = perf_reg_value(regs, bit);
5278                 perf_output_put(handle, val);
5279         }
5280 }
5281
5282 static void perf_sample_regs_user(struct perf_regs *regs_user,
5283                                   struct pt_regs *regs,
5284                                   struct pt_regs *regs_user_copy)
5285 {
5286         if (user_mode(regs)) {
5287                 regs_user->abi = perf_reg_abi(current);
5288                 regs_user->regs = regs;
5289         } else if (current->mm) {
5290                 perf_get_regs_user(regs_user, regs, regs_user_copy);
5291         } else {
5292                 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5293                 regs_user->regs = NULL;
5294         }
5295 }
5296
5297 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5298                                   struct pt_regs *regs)
5299 {
5300         regs_intr->regs = regs;
5301         regs_intr->abi  = perf_reg_abi(current);
5302 }
5303
5304
5305 /*
5306  * Get remaining task size from user stack pointer.
5307  *
5308  * It'd be better to take stack vma map and limit this more
5309  * precisly, but there's no way to get it safely under interrupt,
5310  * so using TASK_SIZE as limit.
5311  */
5312 static u64 perf_ustack_task_size(struct pt_regs *regs)
5313 {
5314         unsigned long addr = perf_user_stack_pointer(regs);
5315
5316         if (!addr || addr >= TASK_SIZE)
5317                 return 0;
5318
5319         return TASK_SIZE - addr;
5320 }
5321
5322 static u16
5323 perf_sample_ustack_size(u16 stack_size, u16 header_size,
5324                         struct pt_regs *regs)
5325 {
5326         u64 task_size;
5327
5328         /* No regs, no stack pointer, no dump. */
5329         if (!regs)
5330                 return 0;
5331
5332         /*
5333          * Check if we fit in with the requested stack size into the:
5334          * - TASK_SIZE
5335          *   If we don't, we limit the size to the TASK_SIZE.
5336          *
5337          * - remaining sample size
5338          *   If we don't, we customize the stack size to
5339          *   fit in to the remaining sample size.
5340          */
5341
5342         task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5343         stack_size = min(stack_size, (u16) task_size);
5344
5345         /* Current header size plus static size and dynamic size. */
5346         header_size += 2 * sizeof(u64);
5347
5348         /* Do we fit in with the current stack dump size? */
5349         if ((u16) (header_size + stack_size) < header_size) {
5350                 /*
5351                  * If we overflow the maximum size for the sample,
5352                  * we customize the stack dump size to fit in.
5353                  */
5354                 stack_size = USHRT_MAX - header_size - sizeof(u64);
5355                 stack_size = round_up(stack_size, sizeof(u64));
5356         }
5357
5358         return stack_size;
5359 }
5360
5361 static void
5362 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5363                           struct pt_regs *regs)
5364 {
5365         /* Case of a kernel thread, nothing to dump */
5366         if (!regs) {
5367                 u64 size = 0;
5368                 perf_output_put(handle, size);
5369         } else {
5370                 unsigned long sp;
5371                 unsigned int rem;
5372                 u64 dyn_size;
5373
5374                 /*
5375                  * We dump:
5376                  * static size
5377                  *   - the size requested by user or the best one we can fit
5378                  *     in to the sample max size
5379                  * data
5380                  *   - user stack dump data
5381                  * dynamic size
5382                  *   - the actual dumped size
5383                  */
5384
5385                 /* Static size. */
5386                 perf_output_put(handle, dump_size);
5387
5388                 /* Data. */
5389                 sp = perf_user_stack_pointer(regs);
5390                 rem = __output_copy_user(handle, (void *) sp, dump_size);
5391                 dyn_size = dump_size - rem;
5392
5393                 perf_output_skip(handle, rem);
5394
5395                 /* Dynamic size. */
5396                 perf_output_put(handle, dyn_size);
5397         }
5398 }
5399
5400 static void __perf_event_header__init_id(struct perf_event_header *header,
5401                                          struct perf_sample_data *data,
5402                                          struct perf_event *event)
5403 {
5404         u64 sample_type = event->attr.sample_type;
5405
5406         data->type = sample_type;
5407         header->size += event->id_header_size;
5408
5409         if (sample_type & PERF_SAMPLE_TID) {
5410                 /* namespace issues */
5411                 data->tid_entry.pid = perf_event_pid(event, current);
5412                 data->tid_entry.tid = perf_event_tid(event, current);
5413         }
5414
5415         if (sample_type & PERF_SAMPLE_TIME)
5416                 data->time = perf_event_clock(event);
5417
5418         if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
5419                 data->id = primary_event_id(event);
5420
5421         if (sample_type & PERF_SAMPLE_STREAM_ID)
5422                 data->stream_id = event->id;
5423
5424         if (sample_type & PERF_SAMPLE_CPU) {
5425                 data->cpu_entry.cpu      = raw_smp_processor_id();
5426                 data->cpu_entry.reserved = 0;
5427         }
5428 }
5429
5430 void perf_event_header__init_id(struct perf_event_header *header,
5431                                 struct perf_sample_data *data,
5432                                 struct perf_event *event)
5433 {
5434         if (event->attr.sample_id_all)
5435                 __perf_event_header__init_id(header, data, event);
5436 }
5437
5438 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5439                                            struct perf_sample_data *data)
5440 {
5441         u64 sample_type = data->type;
5442
5443         if (sample_type & PERF_SAMPLE_TID)
5444                 perf_output_put(handle, data->tid_entry);
5445
5446         if (sample_type & PERF_SAMPLE_TIME)
5447                 perf_output_put(handle, data->time);
5448
5449         if (sample_type & PERF_SAMPLE_ID)
5450                 perf_output_put(handle, data->id);
5451
5452         if (sample_type & PERF_SAMPLE_STREAM_ID)
5453                 perf_output_put(handle, data->stream_id);
5454
5455         if (sample_type & PERF_SAMPLE_CPU)
5456                 perf_output_put(handle, data->cpu_entry);
5457
5458         if (sample_type & PERF_SAMPLE_IDENTIFIER)
5459                 perf_output_put(handle, data->id);
5460 }
5461
5462 void perf_event__output_id_sample(struct perf_event *event,
5463                                   struct perf_output_handle *handle,
5464                                   struct perf_sample_data *sample)
5465 {
5466         if (event->attr.sample_id_all)
5467                 __perf_event__output_id_sample(handle, sample);
5468 }
5469
5470 static void perf_output_read_one(struct perf_output_handle *handle,
5471                                  struct perf_event *event,
5472                                  u64 enabled, u64 running)
5473 {
5474         u64 read_format = event->attr.read_format;
5475         u64 values[4];
5476         int n = 0;
5477
5478         values[n++] = perf_event_count(event);
5479         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5480                 values[n++] = enabled +
5481                         atomic64_read(&event->child_total_time_enabled);
5482         }
5483         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5484                 values[n++] = running +
5485                         atomic64_read(&event->child_total_time_running);
5486         }
5487         if (read_format & PERF_FORMAT_ID)
5488                 values[n++] = primary_event_id(event);
5489
5490         __output_copy(handle, values, n * sizeof(u64));
5491 }
5492
5493 /*
5494  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
5495  */
5496 static void perf_output_read_group(struct perf_output_handle *handle,
5497                             struct perf_event *event,
5498                             u64 enabled, u64 running)
5499 {
5500         struct perf_event *leader = event->group_leader, *sub;
5501         u64 read_format = event->attr.read_format;
5502         u64 values[5];
5503         int n = 0;
5504
5505         values[n++] = 1 + leader->nr_siblings;
5506
5507         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5508                 values[n++] = enabled;
5509
5510         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5511                 values[n++] = running;
5512
5513         if (leader != event)
5514                 leader->pmu->read(leader);
5515
5516         values[n++] = perf_event_count(leader);
5517         if (read_format & PERF_FORMAT_ID)
5518                 values[n++] = primary_event_id(leader);
5519
5520         __output_copy(handle, values, n * sizeof(u64));
5521
5522         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5523                 n = 0;
5524
5525                 if ((sub != event) &&
5526                     (sub->state == PERF_EVENT_STATE_ACTIVE))
5527                         sub->pmu->read(sub);
5528
5529                 values[n++] = perf_event_count(sub);
5530                 if (read_format & PERF_FORMAT_ID)
5531                         values[n++] = primary_event_id(sub);
5532
5533                 __output_copy(handle, values, n * sizeof(u64));
5534         }
5535 }
5536
5537 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5538                                  PERF_FORMAT_TOTAL_TIME_RUNNING)
5539
5540 static void perf_output_read(struct perf_output_handle *handle,
5541                              struct perf_event *event)
5542 {
5543         u64 enabled = 0, running = 0, now;
5544         u64 read_format = event->attr.read_format;
5545
5546         /*
5547          * compute total_time_enabled, total_time_running
5548          * based on snapshot values taken when the event
5549          * was last scheduled in.
5550          *
5551          * we cannot simply called update_context_time()
5552          * because of locking issue as we are called in
5553          * NMI context
5554          */
5555         if (read_format & PERF_FORMAT_TOTAL_TIMES)
5556                 calc_timer_values(event, &now, &enabled, &running);
5557
5558         if (event->attr.read_format & PERF_FORMAT_GROUP)
5559                 perf_output_read_group(handle, event, enabled, running);
5560         else
5561                 perf_output_read_one(handle, event, enabled, running);
5562 }
5563
5564 void perf_output_sample(struct perf_output_handle *handle,
5565                         struct perf_event_header *header,
5566                         struct perf_sample_data *data,
5567                         struct perf_event *event)
5568 {
5569         u64 sample_type = data->type;
5570
5571         perf_output_put(handle, *header);
5572
5573         if (sample_type & PERF_SAMPLE_IDENTIFIER)
5574                 perf_output_put(handle, data->id);
5575
5576         if (sample_type & PERF_SAMPLE_IP)
5577                 perf_output_put(handle, data->ip);
5578
5579         if (sample_type & PERF_SAMPLE_TID)
5580                 perf_output_put(handle, data->tid_entry);
5581
5582         if (sample_type & PERF_SAMPLE_TIME)
5583                 perf_output_put(handle, data->time);
5584
5585         if (sample_type & PERF_SAMPLE_ADDR)
5586                 perf_output_put(handle, data->addr);
5587
5588         if (sample_type & PERF_SAMPLE_ID)
5589                 perf_output_put(handle, data->id);
5590
5591         if (sample_type & PERF_SAMPLE_STREAM_ID)
5592                 perf_output_put(handle, data->stream_id);
5593
5594         if (sample_type & PERF_SAMPLE_CPU)
5595                 perf_output_put(handle, data->cpu_entry);
5596
5597         if (sample_type & PERF_SAMPLE_PERIOD)
5598                 perf_output_put(handle, data->period);
5599
5600         if (sample_type & PERF_SAMPLE_READ)
5601                 perf_output_read(handle, event);
5602
5603         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5604                 if (data->callchain) {
5605                         int size = 1;
5606
5607                         if (data->callchain)
5608                                 size += data->callchain->nr;
5609
5610                         size *= sizeof(u64);
5611
5612                         __output_copy(handle, data->callchain, size);
5613                 } else {
5614                         u64 nr = 0;
5615                         perf_output_put(handle, nr);
5616                 }
5617         }
5618
5619         if (sample_type & PERF_SAMPLE_RAW) {
5620                 struct perf_raw_record *raw = data->raw;
5621
5622                 if (raw) {
5623                         struct perf_raw_frag *frag = &raw->frag;
5624
5625                         perf_output_put(handle, raw->size);
5626                         do {
5627                                 if (frag->copy) {
5628                                         __output_custom(handle, frag->copy,
5629                                                         frag->data, frag->size);
5630                                 } else {
5631                                         __output_copy(handle, frag->data,
5632                                                       frag->size);
5633                                 }
5634                                 if (perf_raw_frag_last(frag))
5635                                         break;
5636                                 frag = frag->next;
5637                         } while (1);
5638                         if (frag->pad)
5639                                 __output_skip(handle, NULL, frag->pad);
5640                 } else {
5641                         struct {
5642                                 u32     size;
5643                                 u32     data;
5644                         } raw = {
5645                                 .size = sizeof(u32),
5646                                 .data = 0,
5647                         };
5648                         perf_output_put(handle, raw);
5649                 }
5650         }
5651
5652         if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5653                 if (data->br_stack) {
5654                         size_t size;
5655
5656                         size = data->br_stack->nr
5657                              * sizeof(struct perf_branch_entry);
5658
5659                         perf_output_put(handle, data->br_stack->nr);
5660                         perf_output_copy(handle, data->br_stack->entries, size);
5661                 } else {
5662                         /*
5663                          * we always store at least the value of nr
5664                          */
5665                         u64 nr = 0;
5666                         perf_output_put(handle, nr);
5667                 }
5668         }
5669
5670         if (sample_type & PERF_SAMPLE_REGS_USER) {
5671                 u64 abi = data->regs_user.abi;
5672
5673                 /*
5674                  * If there are no regs to dump, notice it through
5675                  * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5676                  */
5677                 perf_output_put(handle, abi);
5678
5679                 if (abi) {
5680                         u64 mask = event->attr.sample_regs_user;
5681                         perf_output_sample_regs(handle,
5682                                                 data->regs_user.regs,
5683                                                 mask);
5684                 }
5685         }
5686
5687         if (sample_type & PERF_SAMPLE_STACK_USER) {
5688                 perf_output_sample_ustack(handle,
5689                                           data->stack_user_size,
5690                                           data->regs_user.regs);
5691         }
5692
5693         if (sample_type & PERF_SAMPLE_WEIGHT)
5694                 perf_output_put(handle, data->weight);
5695
5696         if (sample_type & PERF_SAMPLE_DATA_SRC)
5697                 perf_output_put(handle, data->data_src.val);
5698
5699         if (sample_type & PERF_SAMPLE_TRANSACTION)
5700                 perf_output_put(handle, data->txn);
5701
5702         if (sample_type & PERF_SAMPLE_REGS_INTR) {
5703                 u64 abi = data->regs_intr.abi;
5704                 /*
5705                  * If there are no regs to dump, notice it through
5706                  * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5707                  */
5708                 perf_output_put(handle, abi);
5709
5710                 if (abi) {
5711                         u64 mask = event->attr.sample_regs_intr;
5712
5713                         perf_output_sample_regs(handle,
5714                                                 data->regs_intr.regs,
5715                                                 mask);
5716                 }
5717         }
5718
5719         if (!event->attr.watermark) {
5720                 int wakeup_events = event->attr.wakeup_events;
5721
5722                 if (wakeup_events) {
5723                         struct ring_buffer *rb = handle->rb;
5724                         int events = local_inc_return(&rb->events);
5725
5726                         if (events >= wakeup_events) {
5727                                 local_sub(wakeup_events, &rb->events);
5728                                 local_inc(&rb->wakeup);
5729                         }
5730                 }
5731         }
5732 }
5733
5734 void perf_prepare_sample(struct perf_event_header *header,
5735                          struct perf_sample_data *data,
5736                          struct perf_event *event,
5737                          struct pt_regs *regs)
5738 {
5739         u64 sample_type = event->attr.sample_type;
5740
5741         header->type = PERF_RECORD_SAMPLE;
5742         header->size = sizeof(*header) + event->header_size;
5743
5744         header->misc = 0;
5745         header->misc |= perf_misc_flags(regs);
5746
5747         __perf_event_header__init_id(header, data, event);
5748
5749         if (sample_type & PERF_SAMPLE_IP)
5750                 data->ip = perf_instruction_pointer(regs);
5751
5752         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5753                 int size = 1;
5754
5755                 data->callchain = perf_callchain(event, regs);
5756
5757                 if (data->callchain)
5758                         size += data->callchain->nr;
5759
5760                 header->size += size * sizeof(u64);
5761         }
5762
5763         if (sample_type & PERF_SAMPLE_RAW) {
5764                 struct perf_raw_record *raw = data->raw;
5765                 int size;
5766
5767                 if (raw) {
5768                         struct perf_raw_frag *frag = &raw->frag;
5769                         u32 sum = 0;
5770
5771                         do {
5772                                 sum += frag->size;
5773                                 if (perf_raw_frag_last(frag))
5774                                         break;
5775                                 frag = frag->next;
5776                         } while (1);
5777
5778                         size = round_up(sum + sizeof(u32), sizeof(u64));
5779                         raw->size = size - sizeof(u32);
5780                         frag->pad = raw->size - sum;
5781                 } else {
5782                         size = sizeof(u64);
5783                 }
5784
5785                 header->size += size;
5786         }
5787
5788         if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5789                 int size = sizeof(u64); /* nr */
5790                 if (data->br_stack) {
5791                         size += data->br_stack->nr
5792                               * sizeof(struct perf_branch_entry);
5793                 }
5794                 header->size += size;
5795         }
5796
5797         if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
5798                 perf_sample_regs_user(&data->regs_user, regs,
5799                                       &data->regs_user_copy);
5800
5801         if (sample_type & PERF_SAMPLE_REGS_USER) {
5802                 /* regs dump ABI info */
5803                 int size = sizeof(u64);
5804
5805                 if (data->regs_user.regs) {
5806                         u64 mask = event->attr.sample_regs_user;
5807                         size += hweight64(mask) * sizeof(u64);
5808                 }
5809
5810                 header->size += size;
5811         }
5812
5813         if (sample_type & PERF_SAMPLE_STACK_USER) {
5814                 /*
5815                  * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5816                  * processed as the last one or have additional check added
5817                  * in case new sample type is added, because we could eat
5818                  * up the rest of the sample size.
5819                  */
5820                 u16 stack_size = event->attr.sample_stack_user;
5821                 u16 size = sizeof(u64);
5822
5823                 stack_size = perf_sample_ustack_size(stack_size, header->size,
5824                                                      data->regs_user.regs);
5825
5826                 /*
5827                  * If there is something to dump, add space for the dump
5828                  * itself and for the field that tells the dynamic size,
5829                  * which is how many have been actually dumped.
5830                  */
5831                 if (stack_size)
5832                         size += sizeof(u64) + stack_size;
5833
5834                 data->stack_user_size = stack_size;
5835                 header->size += size;
5836         }
5837
5838         if (sample_type & PERF_SAMPLE_REGS_INTR) {
5839                 /* regs dump ABI info */
5840                 int size = sizeof(u64);
5841
5842                 perf_sample_regs_intr(&data->regs_intr, regs);
5843
5844                 if (data->regs_intr.regs) {
5845                         u64 mask = event->attr.sample_regs_intr;
5846
5847                         size += hweight64(mask) * sizeof(u64);
5848                 }
5849
5850                 header->size += size;
5851         }
5852 }
5853
5854 static void __always_inline
5855 __perf_event_output(struct perf_event *event,
5856                     struct perf_sample_data *data,
5857                     struct pt_regs *regs,
5858                     int (*output_begin)(struct perf_output_handle *,
5859                                         struct perf_event *,
5860                                         unsigned int))
5861 {
5862         struct perf_output_handle handle;
5863         struct perf_event_header header;
5864
5865         /* protect the callchain buffers */
5866         rcu_read_lock();
5867
5868         perf_prepare_sample(&header, data, event, regs);
5869
5870         if (output_begin(&handle, event, header.size))
5871                 goto exit;
5872
5873         perf_output_sample(&handle, &header, data, event);
5874
5875         perf_output_end(&handle);
5876
5877 exit:
5878         rcu_read_unlock();
5879 }
5880
5881 void
5882 perf_event_output_forward(struct perf_event *event,
5883                          struct perf_sample_data *data,
5884                          struct pt_regs *regs)
5885 {
5886         __perf_event_output(event, data, regs, perf_output_begin_forward);
5887 }
5888
5889 void
5890 perf_event_output_backward(struct perf_event *event,
5891                            struct perf_sample_data *data,
5892                            struct pt_regs *regs)
5893 {
5894         __perf_event_output(event, data, regs, perf_output_begin_backward);
5895 }
5896
5897 void
5898 perf_event_output(struct perf_event *event,
5899                   struct perf_sample_data *data,
5900                   struct pt_regs *regs)
5901 {
5902         __perf_event_output(event, data, regs, perf_output_begin);
5903 }
5904
5905 /*
5906  * read event_id
5907  */
5908
5909 struct perf_read_event {
5910         struct perf_event_header        header;
5911
5912         u32                             pid;
5913         u32                             tid;
5914 };
5915
5916 static void
5917 perf_event_read_event(struct perf_event *event,
5918                         struct task_struct *task)
5919 {
5920         struct perf_output_handle handle;
5921         struct perf_sample_data sample;
5922         struct perf_read_event read_event = {
5923                 .header = {
5924                         .type = PERF_RECORD_READ,
5925                         .misc = 0,
5926                         .size = sizeof(read_event) + event->read_size,
5927                 },
5928                 .pid = perf_event_pid(event, task),
5929                 .tid = perf_event_tid(event, task),
5930         };
5931         int ret;
5932
5933         perf_event_header__init_id(&read_event.header, &sample, event);
5934         ret = perf_output_begin(&handle, event, read_event.header.size);
5935         if (ret)
5936                 return;
5937
5938         perf_output_put(&handle, read_event);
5939         perf_output_read(&handle, event);
5940         perf_event__output_id_sample(event, &handle, &sample);
5941
5942         perf_output_end(&handle);
5943 }
5944
5945 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
5946
5947 static void
5948 perf_iterate_ctx(struct perf_event_context *ctx,
5949                    perf_iterate_f output,
5950                    void *data, bool all)
5951 {
5952         struct perf_event *event;
5953
5954         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5955                 if (!all) {
5956                         if (event->state < PERF_EVENT_STATE_INACTIVE)
5957                                 continue;
5958                         if (!event_filter_match(event))
5959                                 continue;
5960                 }
5961
5962                 output(event, data);
5963         }
5964 }
5965
5966 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
5967 {
5968         struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
5969         struct perf_event *event;
5970
5971         list_for_each_entry_rcu(event, &pel->list, sb_list) {
5972                 if (event->state < PERF_EVENT_STATE_INACTIVE)
5973                         continue;
5974                 if (!event_filter_match(event))
5975                         continue;
5976                 output(event, data);
5977         }
5978 }
5979
5980 /*
5981  * Iterate all events that need to receive side-band events.
5982  *
5983  * For new callers; ensure that account_pmu_sb_event() includes
5984  * your event, otherwise it might not get delivered.
5985  */
5986 static void
5987 perf_iterate_sb(perf_iterate_f output, void *data,
5988                struct perf_event_context *task_ctx)
5989 {
5990         struct perf_event_context *ctx;
5991         int ctxn;
5992
5993         rcu_read_lock();
5994         preempt_disable();
5995
5996         /*
5997          * If we have task_ctx != NULL we only notify the task context itself.
5998          * The task_ctx is set only for EXIT events before releasing task
5999          * context.
6000          */
6001         if (task_ctx) {
6002                 perf_iterate_ctx(task_ctx, output, data, false);
6003                 goto done;
6004         }
6005
6006         perf_iterate_sb_cpu(output, data);
6007
6008         for_each_task_context_nr(ctxn) {
6009                 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6010                 if (ctx)
6011                         perf_iterate_ctx(ctx, output, data, false);
6012         }
6013 done:
6014         preempt_enable();
6015         rcu_read_unlock();
6016 }
6017
6018 /*
6019  * Clear all file-based filters at exec, they'll have to be
6020  * re-instated when/if these objects are mmapped again.
6021  */
6022 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6023 {
6024         struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6025         struct perf_addr_filter *filter;
6026         unsigned int restart = 0, count = 0;
6027         unsigned long flags;
6028
6029         if (!has_addr_filter(event))
6030                 return;
6031
6032         raw_spin_lock_irqsave(&ifh->lock, flags);
6033         list_for_each_entry(filter, &ifh->list, entry) {
6034                 if (filter->inode) {
6035                         event->addr_filters_offs[count] = 0;
6036                         restart++;
6037                 }
6038
6039                 count++;
6040         }
6041
6042         if (restart)
6043                 event->addr_filters_gen++;
6044         raw_spin_unlock_irqrestore(&ifh->lock, flags);
6045
6046         if (restart)
6047                 perf_event_restart(event);
6048 }
6049
6050 void perf_event_exec(void)
6051 {
6052         struct perf_event_context *ctx;
6053         int ctxn;
6054
6055         rcu_read_lock();
6056         for_each_task_context_nr(ctxn) {
6057                 ctx = current->perf_event_ctxp[ctxn];
6058                 if (!ctx)
6059                         continue;
6060
6061                 perf_event_enable_on_exec(ctxn);
6062
6063                 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
6064                                    true);
6065         }
6066         rcu_read_unlock();
6067 }
6068
6069 struct remote_output {
6070         struct ring_buffer      *rb;
6071         int                     err;
6072 };
6073
6074 static void __perf_event_output_stop(struct perf_event *event, void *data)
6075 {
6076         struct perf_event *parent = event->parent;
6077         struct remote_output *ro = data;
6078         struct ring_buffer *rb = ro->rb;
6079         struct stop_event_data sd = {
6080                 .event  = event,
6081         };
6082
6083         if (!has_aux(event))
6084                 return;
6085
6086         if (!parent)
6087                 parent = event;
6088
6089         /*
6090          * In case of inheritance, it will be the parent that links to the
6091          * ring-buffer, but it will be the child that's actually using it:
6092          */
6093         if (rcu_dereference(parent->rb) == rb)
6094                 ro->err = __perf_event_stop(&sd);
6095 }
6096
6097 static int __perf_pmu_output_stop(void *info)
6098 {
6099         struct perf_event *event = info;
6100         struct pmu *pmu = event->pmu;
6101         struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
6102         struct remote_output ro = {
6103                 .rb     = event->rb,
6104         };
6105
6106         rcu_read_lock();
6107         perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
6108         if (cpuctx->task_ctx)
6109                 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
6110                                    &ro, false);
6111         rcu_read_unlock();
6112
6113         return ro.err;
6114 }
6115
6116 static void perf_pmu_output_stop(struct perf_event *event)
6117 {
6118         struct perf_event *iter;
6119         int err, cpu;
6120
6121 restart:
6122         rcu_read_lock();
6123         list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6124                 /*
6125                  * For per-CPU events, we need to make sure that neither they
6126                  * nor their children are running; for cpu==-1 events it's
6127                  * sufficient to stop the event itself if it's active, since
6128                  * it can't have children.
6129                  */
6130                 cpu = iter->cpu;
6131                 if (cpu == -1)
6132                         cpu = READ_ONCE(iter->oncpu);
6133
6134                 if (cpu == -1)
6135                         continue;
6136
6137                 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6138                 if (err == -EAGAIN) {
6139                         rcu_read_unlock();
6140                         goto restart;
6141                 }
6142         }
6143         rcu_read_unlock();
6144 }
6145
6146 /*
6147  * task tracking -- fork/exit
6148  *
6149  * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
6150  */
6151
6152 struct perf_task_event {
6153         struct task_struct              *task;
6154         struct perf_event_context       *task_ctx;
6155
6156         struct {
6157                 struct perf_event_header        header;
6158
6159                 u32                             pid;
6160                 u32                             ppid;
6161                 u32                             tid;
6162                 u32                             ptid;
6163                 u64                             time;
6164         } event_id;
6165 };
6166
6167 static int perf_event_task_match(struct perf_event *event)
6168 {
6169         return event->attr.comm  || event->attr.mmap ||
6170                event->attr.mmap2 || event->attr.mmap_data ||
6171                event->attr.task;
6172 }
6173
6174 static void perf_event_task_output(struct perf_event *event,
6175                                    void *data)
6176 {
6177         struct perf_task_event *task_event = data;
6178         struct perf_output_handle handle;
6179         struct perf_sample_data sample;
6180         struct task_struct *task = task_event->task;
6181         int ret, size = task_event->event_id.header.size;
6182
6183         if (!perf_event_task_match(event))
6184                 return;
6185
6186         perf_event_header__init_id(&task_event->event_id.header, &sample, event);
6187
6188         ret = perf_output_begin(&handle, event,
6189                                 task_event->event_id.header.size);
6190         if (ret)
6191                 goto out;
6192
6193         task_event->event_id.pid = perf_event_pid(event, task);
6194         task_event->event_id.ppid = perf_event_pid(event, current);
6195
6196         task_event->event_id.tid = perf_event_tid(event, task);
6197         task_event->event_id.ptid = perf_event_tid(event, current);
6198
6199         task_event->event_id.time = perf_event_clock(event);
6200
6201         perf_output_put(&handle, task_event->event_id);
6202
6203         perf_event__output_id_sample(event, &handle, &sample);
6204
6205         perf_output_end(&handle);
6206 out:
6207         task_event->event_id.header.size = size;
6208 }
6209
6210 static void perf_event_task(struct task_struct *task,
6211                               struct perf_event_context *task_ctx,
6212                               int new)
6213 {
6214         struct perf_task_event task_event;
6215
6216         if (!atomic_read(&nr_comm_events) &&
6217             !atomic_read(&nr_mmap_events) &&
6218             !atomic_read(&nr_task_events))
6219                 return;
6220
6221         task_event = (struct perf_task_event){
6222                 .task     = task,
6223                 .task_ctx = task_ctx,
6224                 .event_id    = {
6225                         .header = {
6226                                 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
6227                                 .misc = 0,
6228                                 .size = sizeof(task_event.event_id),
6229                         },
6230                         /* .pid  */
6231                         /* .ppid */
6232                         /* .tid  */
6233                         /* .ptid */
6234                         /* .time */
6235                 },
6236         };
6237
6238         perf_iterate_sb(perf_event_task_output,
6239                        &task_event,
6240                        task_ctx);
6241 }
6242
6243 void perf_event_fork(struct task_struct *task)
6244 {
6245         perf_event_task(task, NULL, 1);
6246 }
6247
6248 /*
6249  * comm tracking
6250  */
6251
6252 struct perf_comm_event {
6253         struct task_struct      *task;
6254         char                    *comm;
6255         int                     comm_size;
6256
6257         struct {
6258                 struct perf_event_header        header;
6259
6260                 u32                             pid;
6261                 u32                             tid;
6262         } event_id;
6263 };
6264
6265 static int perf_event_comm_match(struct perf_event *event)
6266 {
6267         return event->attr.comm;
6268 }
6269
6270 static void perf_event_comm_output(struct perf_event *event,
6271                                    void *data)
6272 {
6273         struct perf_comm_event *comm_event = data;
6274         struct perf_output_handle handle;
6275         struct perf_sample_data sample;
6276         int size = comm_event->event_id.header.size;
6277         int ret;
6278
6279         if (!perf_event_comm_match(event))
6280                 return;
6281
6282         perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6283         ret = perf_output_begin(&handle, event,
6284                                 comm_event->event_id.header.size);
6285
6286         if (ret)
6287                 goto out;
6288
6289         comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6290         comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
6291
6292         perf_output_put(&handle, comm_event->event_id);
6293         __output_copy(&handle, comm_event->comm,
6294                                    comm_event->comm_size);
6295
6296         perf_event__output_id_sample(event, &handle, &sample);
6297
6298         perf_output_end(&handle);
6299 out:
6300         comm_event->event_id.header.size = size;
6301 }
6302
6303 static void perf_event_comm_event(struct perf_comm_event *comm_event)
6304 {
6305         char comm[TASK_COMM_LEN];
6306         unsigned int size;
6307
6308         memset(comm, 0, sizeof(comm));
6309         strlcpy(comm, comm_event->task->comm, sizeof(comm));
6310         size = ALIGN(strlen(comm)+1, sizeof(u64));
6311
6312         comm_event->comm = comm;
6313         comm_event->comm_size = size;
6314
6315         comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
6316
6317         perf_iterate_sb(perf_event_comm_output,
6318                        comm_event,
6319                        NULL);
6320 }
6321
6322 void perf_event_comm(struct task_struct *task, bool exec)
6323 {
6324         struct perf_comm_event comm_event;
6325
6326         if (!atomic_read(&nr_comm_events))
6327                 return;
6328
6329         comm_event = (struct perf_comm_event){
6330                 .task   = task,
6331                 /* .comm      */
6332                 /* .comm_size */
6333                 .event_id  = {
6334                         .header = {
6335                                 .type = PERF_RECORD_COMM,
6336                                 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
6337                                 /* .size */
6338                         },
6339                         /* .pid */
6340                         /* .tid */
6341                 },
6342         };
6343
6344         perf_event_comm_event(&comm_event);
6345 }
6346
6347 /*
6348  * mmap tracking
6349  */
6350
6351 struct perf_mmap_event {
6352         struct vm_area_struct   *vma;
6353
6354         const char              *file_name;
6355         int                     file_size;
6356         int                     maj, min;
6357         u64                     ino;
6358         u64                     ino_generation;
6359         u32                     prot, flags;
6360
6361         struct {
6362                 struct perf_event_header        header;
6363
6364                 u32                             pid;
6365                 u32                             tid;
6366                 u64                             start;
6367                 u64                             len;
6368                 u64                             pgoff;
6369         } event_id;
6370 };
6371
6372 static int perf_event_mmap_match(struct perf_event *event,
6373                                  void *data)
6374 {
6375         struct perf_mmap_event *mmap_event = data;
6376         struct vm_area_struct *vma = mmap_event->vma;
6377         int executable = vma->vm_flags & VM_EXEC;
6378
6379         return (!executable && event->attr.mmap_data) ||
6380                (executable && (event->attr.mmap || event->attr.mmap2));
6381 }
6382
6383 static void perf_event_mmap_output(struct perf_event *event,
6384                                    void *data)
6385 {
6386         struct perf_mmap_event *mmap_event = data;
6387         struct perf_output_handle handle;
6388         struct perf_sample_data sample;
6389         int size = mmap_event->event_id.header.size;
6390         int ret;
6391
6392         if (!perf_event_mmap_match(event, data))
6393                 return;
6394
6395         if (event->attr.mmap2) {
6396                 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6397                 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6398                 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6399                 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
6400                 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
6401                 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6402                 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
6403         }
6404
6405         perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6406         ret = perf_output_begin(&handle, event,
6407                                 mmap_event->event_id.header.size);
6408         if (ret)
6409                 goto out;
6410
6411         mmap_event->event_id.pid = perf_event_pid(event, current);
6412         mmap_event->event_id.tid = perf_event_tid(event, current);
6413
6414         perf_output_put(&handle, mmap_event->event_id);
6415
6416         if (event->attr.mmap2) {
6417                 perf_output_put(&handle, mmap_event->maj);
6418                 perf_output_put(&handle, mmap_event->min);
6419                 perf_output_put(&handle, mmap_event->ino);
6420                 perf_output_put(&handle, mmap_event->ino_generation);
6421                 perf_output_put(&handle, mmap_event->prot);
6422                 perf_output_put(&handle, mmap_event->flags);
6423         }
6424
6425         __output_copy(&handle, mmap_event->file_name,
6426                                    mmap_event->file_size);
6427
6428         perf_event__output_id_sample(event, &handle, &sample);
6429
6430         perf_output_end(&handle);
6431 out:
6432         mmap_event->event_id.header.size = size;
6433 }
6434
6435 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6436 {
6437         struct vm_area_struct *vma = mmap_event->vma;
6438         struct file *file = vma->vm_file;
6439         int maj = 0, min = 0;
6440         u64 ino = 0, gen = 0;
6441         u32 prot = 0, flags = 0;
6442         unsigned int size;
6443         char tmp[16];
6444         char *buf = NULL;
6445         char *name;
6446
6447         if (file) {
6448                 struct inode *inode;
6449                 dev_t dev;
6450
6451                 buf = kmalloc(PATH_MAX, GFP_KERNEL);
6452                 if (!buf) {
6453                         name = "//enomem";
6454                         goto cpy_name;
6455                 }
6456                 /*
6457                  * d_path() works from the end of the rb backwards, so we
6458                  * need to add enough zero bytes after the string to handle
6459                  * the 64bit alignment we do later.
6460                  */
6461                 name = file_path(file, buf, PATH_MAX - sizeof(u64));
6462                 if (IS_ERR(name)) {
6463                         name = "//toolong";
6464                         goto cpy_name;
6465                 }
6466                 inode = file_inode(vma->vm_file);
6467                 dev = inode->i_sb->s_dev;
6468                 ino = inode->i_ino;
6469                 gen = inode->i_generation;
6470                 maj = MAJOR(dev);
6471                 min = MINOR(dev);
6472
6473                 if (vma->vm_flags & VM_READ)
6474                         prot |= PROT_READ;
6475                 if (vma->vm_flags & VM_WRITE)
6476                         prot |= PROT_WRITE;
6477                 if (vma->vm_flags & VM_EXEC)
6478                         prot |= PROT_EXEC;
6479
6480                 if (vma->vm_flags & VM_MAYSHARE)
6481                         flags = MAP_SHARED;
6482                 else
6483                         flags = MAP_PRIVATE;
6484
6485                 if (vma->vm_flags & VM_DENYWRITE)
6486                         flags |= MAP_DENYWRITE;
6487                 if (vma->vm_flags & VM_MAYEXEC)
6488                         flags |= MAP_EXECUTABLE;
6489                 if (vma->vm_flags & VM_LOCKED)
6490                         flags |= MAP_LOCKED;
6491                 if (vma->vm_flags & VM_HUGETLB)
6492                         flags |= MAP_HUGETLB;
6493
6494                 goto got_name;
6495         } else {
6496                 if (vma->vm_ops && vma->vm_ops->name) {
6497                         name = (char *) vma->vm_ops->name(vma);
6498                         if (name)
6499                                 goto cpy_name;
6500                 }
6501
6502                 name = (char *)arch_vma_name(vma);
6503                 if (name)
6504                         goto cpy_name;
6505
6506                 if (vma->vm_start <= vma->vm_mm->start_brk &&
6507                                 vma->vm_end >= vma->vm_mm->brk) {
6508                         name = "[heap]";
6509                         goto cpy_name;
6510                 }
6511                 if (vma->vm_start <= vma->vm_mm->start_stack &&
6512                                 vma->vm_end >= vma->vm_mm->start_stack) {
6513                         name = "[stack]";
6514                         goto cpy_name;
6515                 }
6516
6517                 name = "//anon";
6518                 goto cpy_name;
6519         }
6520
6521 cpy_name:
6522         strlcpy(tmp, name, sizeof(tmp));
6523         name = tmp;
6524 got_name:
6525         /*
6526          * Since our buffer works in 8 byte units we need to align our string
6527          * size to a multiple of 8. However, we must guarantee the tail end is
6528          * zero'd out to avoid leaking random bits to userspace.
6529          */
6530         size = strlen(name)+1;
6531         while (!IS_ALIGNED(size, sizeof(u64)))
6532                 name[size++] = '\0';
6533
6534         mmap_event->file_name = name;
6535         mmap_event->file_size = size;
6536         mmap_event->maj = maj;
6537         mmap_event->min = min;
6538         mmap_event->ino = ino;
6539         mmap_event->ino_generation = gen;
6540         mmap_event->prot = prot;
6541         mmap_event->flags = flags;
6542
6543         if (!(vma->vm_flags & VM_EXEC))
6544                 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6545
6546         mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
6547
6548         perf_iterate_sb(perf_event_mmap_output,
6549                        mmap_event,
6550                        NULL);
6551
6552         kfree(buf);
6553 }
6554
6555 /*
6556  * Whether this @filter depends on a dynamic object which is not loaded
6557  * yet or its load addresses are not known.
6558  */
6559 static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
6560 {
6561         return filter->filter && filter->inode;
6562 }
6563
6564 /*
6565  * Check whether inode and address range match filter criteria.
6566  */
6567 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6568                                      struct file *file, unsigned long offset,
6569                                      unsigned long size)
6570 {
6571         if (filter->inode != file->f_inode)
6572                 return false;
6573
6574         if (filter->offset > offset + size)
6575                 return false;
6576
6577         if (filter->offset + filter->size < offset)
6578                 return false;
6579
6580         return true;
6581 }
6582
6583 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6584 {
6585         struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6586         struct vm_area_struct *vma = data;
6587         unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6588         struct file *file = vma->vm_file;
6589         struct perf_addr_filter *filter;
6590         unsigned int restart = 0, count = 0;
6591
6592         if (!has_addr_filter(event))
6593                 return;
6594
6595         if (!file)
6596                 return;
6597
6598         raw_spin_lock_irqsave(&ifh->lock, flags);
6599         list_for_each_entry(filter, &ifh->list, entry) {
6600                 if (perf_addr_filter_match(filter, file, off,
6601                                              vma->vm_end - vma->vm_start)) {
6602                         event->addr_filters_offs[count] = vma->vm_start;
6603                         restart++;
6604                 }
6605
6606                 count++;
6607         }
6608
6609         if (restart)
6610                 event->addr_filters_gen++;
6611         raw_spin_unlock_irqrestore(&ifh->lock, flags);
6612
6613         if (restart)
6614                 perf_event_restart(event);
6615 }
6616
6617 /*
6618  * Adjust all task's events' filters to the new vma
6619  */
6620 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
6621 {
6622         struct perf_event_context *ctx;
6623         int ctxn;
6624
6625         rcu_read_lock();
6626         for_each_task_context_nr(ctxn) {
6627                 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6628                 if (!ctx)
6629                         continue;
6630
6631                 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
6632         }
6633         rcu_read_unlock();
6634 }
6635
6636 void perf_event_mmap(struct vm_area_struct *vma)
6637 {
6638         struct perf_mmap_event mmap_event;
6639
6640         if (!atomic_read(&nr_mmap_events))
6641                 return;
6642
6643         mmap_event = (struct perf_mmap_event){
6644                 .vma    = vma,
6645                 /* .file_name */
6646                 /* .file_size */
6647                 .event_id  = {
6648                         .header = {
6649                                 .type = PERF_RECORD_MMAP,
6650                                 .misc = PERF_RECORD_MISC_USER,
6651                                 /* .size */
6652                         },
6653                         /* .pid */
6654                         /* .tid */
6655                         .start  = vma->vm_start,
6656                         .len    = vma->vm_end - vma->vm_start,
6657                         .pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
6658                 },
6659                 /* .maj (attr_mmap2 only) */
6660                 /* .min (attr_mmap2 only) */
6661                 /* .ino (attr_mmap2 only) */
6662                 /* .ino_generation (attr_mmap2 only) */
6663                 /* .prot (attr_mmap2 only) */
6664                 /* .flags (attr_mmap2 only) */
6665         };
6666
6667         perf_addr_filters_adjust(vma);
6668         perf_event_mmap_event(&mmap_event);
6669 }
6670
6671 void perf_event_aux_event(struct perf_event *event, unsigned long head,
6672                           unsigned long size, u64 flags)
6673 {
6674         struct perf_output_handle handle;
6675         struct perf_sample_data sample;
6676         struct perf_aux_event {
6677                 struct perf_event_header        header;
6678                 u64                             offset;
6679                 u64                             size;
6680                 u64                             flags;
6681         } rec = {
6682                 .header = {
6683                         .type = PERF_RECORD_AUX,
6684                         .misc = 0,
6685                         .size = sizeof(rec),
6686                 },
6687                 .offset         = head,
6688                 .size           = size,
6689                 .flags          = flags,
6690         };
6691         int ret;
6692
6693         perf_event_header__init_id(&rec.header, &sample, event);
6694         ret = perf_output_begin(&handle, event, rec.header.size);
6695
6696         if (ret)
6697                 return;
6698
6699         perf_output_put(&handle, rec);
6700         perf_event__output_id_sample(event, &handle, &sample);
6701
6702         perf_output_end(&handle);
6703 }
6704
6705 /*
6706  * Lost/dropped samples logging
6707  */
6708 void perf_log_lost_samples(struct perf_event *event, u64 lost)
6709 {
6710         struct perf_output_handle handle;
6711         struct perf_sample_data sample;
6712         int ret;
6713
6714         struct {
6715                 struct perf_event_header        header;
6716                 u64                             lost;
6717         } lost_samples_event = {
6718                 .header = {
6719                         .type = PERF_RECORD_LOST_SAMPLES,
6720                         .misc = 0,
6721                         .size = sizeof(lost_samples_event),
6722                 },
6723                 .lost           = lost,
6724         };
6725
6726         perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6727
6728         ret = perf_output_begin(&handle, event,
6729                                 lost_samples_event.header.size);
6730         if (ret)
6731                 return;
6732
6733         perf_output_put(&handle, lost_samples_event);
6734         perf_event__output_id_sample(event, &handle, &sample);
6735         perf_output_end(&handle);
6736 }
6737
6738 /*
6739  * context_switch tracking
6740  */
6741
6742 struct perf_switch_event {
6743         struct task_struct      *task;
6744         struct task_struct      *next_prev;
6745
6746         struct {
6747                 struct perf_event_header        header;
6748                 u32                             next_prev_pid;
6749                 u32                             next_prev_tid;
6750         } event_id;
6751 };
6752
6753 static int perf_event_switch_match(struct perf_event *event)
6754 {
6755         return event->attr.context_switch;
6756 }
6757
6758 static void perf_event_switch_output(struct perf_event *event, void *data)
6759 {
6760         struct perf_switch_event *se = data;
6761         struct perf_output_handle handle;
6762         struct perf_sample_data sample;
6763         int ret;
6764
6765         if (!perf_event_switch_match(event))
6766                 return;
6767
6768         /* Only CPU-wide events are allowed to see next/prev pid/tid */
6769         if (event->ctx->task) {
6770                 se->event_id.header.type = PERF_RECORD_SWITCH;
6771                 se->event_id.header.size = sizeof(se->event_id.header);
6772         } else {
6773                 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6774                 se->event_id.header.size = sizeof(se->event_id);
6775                 se->event_id.next_prev_pid =
6776                                         perf_event_pid(event, se->next_prev);
6777                 se->event_id.next_prev_tid =
6778                                         perf_event_tid(event, se->next_prev);
6779         }
6780
6781         perf_event_header__init_id(&se->event_id.header, &sample, event);
6782
6783         ret = perf_output_begin(&handle, event, se->event_id.header.size);
6784         if (ret)
6785                 return;
6786
6787         if (event->ctx->task)
6788                 perf_output_put(&handle, se->event_id.header);
6789         else
6790                 perf_output_put(&handle, se->event_id);
6791
6792         perf_event__output_id_sample(event, &handle, &sample);
6793
6794         perf_output_end(&handle);
6795 }
6796
6797 static void perf_event_switch(struct task_struct *task,
6798                               struct task_struct *next_prev, bool sched_in)
6799 {
6800         struct perf_switch_event switch_event;
6801
6802         /* N.B. caller checks nr_switch_events != 0 */
6803
6804         switch_event = (struct perf_switch_event){
6805                 .task           = task,
6806                 .next_prev      = next_prev,
6807                 .event_id       = {
6808                         .header = {
6809                                 /* .type */
6810                                 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6811                                 /* .size */
6812                         },
6813                         /* .next_prev_pid */
6814                         /* .next_prev_tid */
6815                 },
6816         };
6817
6818         perf_iterate_sb(perf_event_switch_output,
6819                        &switch_event,
6820                        NULL);
6821 }
6822
6823 /*
6824  * IRQ throttle logging
6825  */
6826
6827 static void perf_log_throttle(struct perf_event *event, int enable)
6828 {
6829         struct perf_output_handle handle;
6830         struct perf_sample_data sample;
6831         int ret;
6832
6833         struct {
6834                 struct perf_event_header        header;
6835                 u64                             time;
6836                 u64                             id;
6837                 u64                             stream_id;
6838         } throttle_event = {
6839                 .header = {
6840                         .type = PERF_RECORD_THROTTLE,
6841                         .misc = 0,
6842                         .size = sizeof(throttle_event),
6843                 },
6844                 .time           = perf_event_clock(event),
6845                 .id             = primary_event_id(event),
6846                 .stream_id      = event->id,
6847         };
6848
6849         if (enable)
6850                 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
6851
6852         perf_event_header__init_id(&throttle_event.header, &sample, event);
6853
6854         ret = perf_output_begin(&handle, event,
6855                                 throttle_event.header.size);
6856         if (ret)
6857                 return;
6858
6859         perf_output_put(&handle, throttle_event);
6860         perf_event__output_id_sample(event, &handle, &sample);
6861         perf_output_end(&handle);
6862 }
6863
6864 static void perf_log_itrace_start(struct perf_event *event)
6865 {
6866         struct perf_output_handle handle;
6867         struct perf_sample_data sample;
6868         struct perf_aux_event {
6869                 struct perf_event_header        header;
6870                 u32                             pid;
6871                 u32                             tid;
6872         } rec;
6873         int ret;
6874
6875         if (event->parent)
6876                 event = event->parent;
6877
6878         if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6879             event->hw.itrace_started)
6880                 return;
6881
6882         rec.header.type = PERF_RECORD_ITRACE_START;
6883         rec.header.misc = 0;
6884         rec.header.size = sizeof(rec);
6885         rec.pid = perf_event_pid(event, current);
6886         rec.tid = perf_event_tid(event, current);
6887
6888         perf_event_header__init_id(&rec.header, &sample, event);
6889         ret = perf_output_begin(&handle, event, rec.header.size);
6890
6891         if (ret)
6892                 return;
6893
6894         perf_output_put(&handle, rec);
6895         perf_event__output_id_sample(event, &handle, &sample);
6896
6897         perf_output_end(&handle);
6898 }
6899
6900 /*
6901  * Generic event overflow handling, sampling.
6902  */
6903
6904 static int __perf_event_overflow(struct perf_event *event,
6905                                    int throttle, struct perf_sample_data *data,
6906                                    struct pt_regs *regs)
6907 {
6908         int events = atomic_read(&event->event_limit);
6909         struct hw_perf_event *hwc = &event->hw;
6910         u64 seq;
6911         int ret = 0;
6912
6913         /*
6914          * Non-sampling counters might still use the PMI to fold short
6915          * hardware counters, ignore those.
6916          */
6917         if (unlikely(!is_sampling_event(event)))
6918                 return 0;
6919
6920         seq = __this_cpu_read(perf_throttled_seq);
6921         if (seq != hwc->interrupts_seq) {
6922                 hwc->interrupts_seq = seq;
6923                 hwc->interrupts = 1;
6924         } else {
6925                 hwc->interrupts++;
6926                 if (unlikely(throttle
6927                              && hwc->interrupts >= max_samples_per_tick)) {
6928                         __this_cpu_inc(perf_throttled_count);
6929                         tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
6930                         hwc->interrupts = MAX_INTERRUPTS;
6931                         perf_log_throttle(event, 0);
6932                         ret = 1;
6933                 }
6934         }
6935
6936         if (event->attr.freq) {
6937                 u64 now = perf_clock();
6938                 s64 delta = now - hwc->freq_time_stamp;
6939
6940                 hwc->freq_time_stamp = now;
6941
6942                 if (delta > 0 && delta < 2*TICK_NSEC)
6943                         perf_adjust_period(event, delta, hwc->last_period, true);
6944         }
6945
6946         /*
6947          * XXX event_limit might not quite work as expected on inherited
6948          * events
6949          */
6950
6951         event->pending_kill = POLL_IN;
6952         if (events && atomic_dec_and_test(&event->event_limit)) {
6953                 ret = 1;
6954                 event->pending_kill = POLL_HUP;
6955                 event->pending_disable = 1;
6956                 irq_work_queue(&event->pending);
6957         }
6958
6959         event->overflow_handler(event, data, regs);
6960
6961         if (*perf_event_fasync(event) && event->pending_kill) {
6962                 event->pending_wakeup = 1;
6963                 irq_work_queue(&event->pending);
6964         }
6965
6966         return ret;
6967 }
6968
6969 int perf_event_overflow(struct perf_event *event,
6970                           struct perf_sample_data *data,
6971                           struct pt_regs *regs)
6972 {
6973         return __perf_event_overflow(event, 1, data, regs);
6974 }
6975
6976 /*
6977  * Generic software event infrastructure
6978  */
6979
6980 struct swevent_htable {
6981         struct swevent_hlist            *swevent_hlist;
6982         struct mutex                    hlist_mutex;
6983         int                             hlist_refcount;
6984
6985         /* Recursion avoidance in each contexts */
6986         int                             recursion[PERF_NR_CONTEXTS];
6987 };
6988
6989 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
6990
6991 /*
6992  * We directly increment event->count and keep a second value in
6993  * event->hw.period_left to count intervals. This period event
6994  * is kept in the range [-sample_period, 0] so that we can use the
6995  * sign as trigger.
6996  */
6997
6998 u64 perf_swevent_set_period(struct perf_event *event)
6999 {
7000         struct hw_perf_event *hwc = &event->hw;
7001         u64 period = hwc->last_period;
7002         u64 nr, offset;
7003         s64 old, val;
7004
7005         hwc->last_period = hwc->sample_period;
7006
7007 again:
7008         old = val = local64_read(&hwc->period_left);
7009         if (val < 0)
7010                 return 0;
7011
7012         nr = div64_u64(period + val, period);
7013         offset = nr * period;
7014         val -= offset;
7015         if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7016                 goto again;
7017
7018         return nr;
7019 }
7020
7021 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
7022                                     struct perf_sample_data *data,
7023                                     struct pt_regs *regs)
7024 {
7025         struct hw_perf_event *hwc = &event->hw;
7026         int throttle = 0;
7027
7028         if (!overflow)
7029                 overflow = perf_swevent_set_period(event);
7030
7031         if (hwc->interrupts == MAX_INTERRUPTS)
7032                 return;
7033
7034         for (; overflow; overflow--) {
7035                 if (__perf_event_overflow(event, throttle,
7036                                             data, regs)) {
7037                         /*
7038                          * We inhibit the overflow from happening when
7039                          * hwc->interrupts == MAX_INTERRUPTS.
7040                          */
7041                         break;
7042                 }
7043                 throttle = 1;
7044         }
7045 }
7046
7047 static void perf_swevent_event(struct perf_event *event, u64 nr,
7048                                struct perf_sample_data *data,
7049                                struct pt_regs *regs)
7050 {
7051         struct hw_perf_event *hwc = &event->hw;
7052
7053         local64_add(nr, &event->count);
7054
7055         if (!regs)
7056                 return;
7057
7058         if (!is_sampling_event(event))
7059                 return;
7060
7061         if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7062                 data->period = nr;
7063                 return perf_swevent_overflow(event, 1, data, regs);
7064         } else
7065                 data->period = event->hw.last_period;
7066
7067         if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
7068                 return perf_swevent_overflow(event, 1, data, regs);
7069
7070         if (local64_add_negative(nr, &hwc->period_left))
7071                 return;
7072
7073         perf_swevent_overflow(event, 0, data, regs);
7074 }
7075
7076 static int perf_exclude_event(struct perf_event *event,
7077                               struct pt_regs *regs)
7078 {
7079         if (event->hw.state & PERF_HES_STOPPED)
7080                 return 1;
7081
7082         if (regs) {
7083                 if (event->attr.exclude_user && user_mode(regs))
7084                         return 1;
7085
7086                 if (event->attr.exclude_kernel && !user_mode(regs))
7087                         return 1;
7088         }
7089
7090         return 0;
7091 }
7092
7093 static int perf_swevent_match(struct perf_event *event,
7094                                 enum perf_type_id type,
7095                                 u32 event_id,
7096                                 struct perf_sample_data *data,
7097                                 struct pt_regs *regs)
7098 {
7099         if (event->attr.type != type)
7100                 return 0;
7101
7102         if (event->attr.config != event_id)
7103                 return 0;
7104
7105         if (perf_exclude_event(event, regs))
7106                 return 0;
7107
7108         return 1;
7109 }
7110
7111 static inline u64 swevent_hash(u64 type, u32 event_id)
7112 {
7113         u64 val = event_id | (type << 32);
7114
7115         return hash_64(val, SWEVENT_HLIST_BITS);
7116 }
7117
7118 static inline struct hlist_head *
7119 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
7120 {
7121         u64 hash = swevent_hash(type, event_id);
7122
7123         return &hlist->heads[hash];
7124 }
7125
7126 /* For the read side: events when they trigger */
7127 static inline struct hlist_head *
7128 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
7129 {
7130         struct swevent_hlist *hlist;
7131
7132         hlist = rcu_dereference(swhash->swevent_hlist);
7133         if (!hlist)
7134                 return NULL;
7135
7136         return __find_swevent_head(hlist, type, event_id);
7137 }
7138
7139 /* For the event head insertion and removal in the hlist */
7140 static inline struct hlist_head *
7141 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
7142 {
7143         struct swevent_hlist *hlist;
7144         u32 event_id = event->attr.config;
7145         u64 type = event->attr.type;
7146
7147         /*
7148          * Event scheduling is always serialized against hlist allocation
7149          * and release. Which makes the protected version suitable here.
7150          * The context lock guarantees that.
7151          */
7152         hlist = rcu_dereference_protected(swhash->swevent_hlist,
7153                                           lockdep_is_held(&event->ctx->lock));
7154         if (!hlist)
7155                 return NULL;
7156
7157         return __find_swevent_head(hlist, type, event_id);
7158 }
7159
7160 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
7161                                     u64 nr,
7162                                     struct perf_sample_data *data,
7163                                     struct pt_regs *regs)
7164 {
7165         struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7166         struct perf_event *event;
7167         struct hlist_head *head;
7168
7169         rcu_read_lock();
7170         head = find_swevent_head_rcu(swhash, type, event_id);
7171         if (!head)
7172                 goto end;
7173
7174         hlist_for_each_entry_rcu(event, head, hlist_entry) {
7175                 if (perf_swevent_match(event, type, event_id, data, regs))
7176                         perf_swevent_event(event, nr, data, regs);
7177         }
7178 end:
7179         rcu_read_unlock();
7180 }
7181
7182 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7183
7184 int perf_swevent_get_recursion_context(void)
7185 {
7186         struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7187
7188         return get_recursion_context(swhash->recursion);
7189 }
7190 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
7191
7192 void perf_swevent_put_recursion_context(int rctx)
7193 {
7194         struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7195
7196         put_recursion_context(swhash->recursion, rctx);
7197 }
7198
7199 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7200 {
7201         struct perf_sample_data data;
7202
7203         if (WARN_ON_ONCE(!regs))
7204                 return;
7205
7206         perf_sample_data_init(&data, addr, 0);
7207         do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
7208 }
7209
7210 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7211 {
7212         int rctx;
7213
7214         preempt_disable_notrace();
7215         rctx = perf_swevent_get_recursion_context();
7216         if (unlikely(rctx < 0))
7217                 goto fail;
7218
7219         ___perf_sw_event(event_id, nr, regs, addr);
7220
7221         perf_swevent_put_recursion_context(rctx);
7222 fail:
7223         preempt_enable_notrace();
7224 }
7225
7226 static void perf_swevent_read(struct perf_event *event)
7227 {
7228 }
7229
7230 static int perf_swevent_add(struct perf_event *event, int flags)
7231 {
7232         struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7233         struct hw_perf_event *hwc = &event->hw;
7234         struct hlist_head *head;
7235
7236         if (is_sampling_event(event)) {
7237                 hwc->last_period = hwc->sample_period;
7238                 perf_swevent_set_period(event);
7239         }
7240
7241         hwc->state = !(flags & PERF_EF_START);
7242
7243         head = find_swevent_head(swhash, event);
7244         if (WARN_ON_ONCE(!head))
7245                 return -EINVAL;
7246
7247         hlist_add_head_rcu(&event->hlist_entry, head);
7248         perf_event_update_userpage(event);
7249
7250         return 0;
7251 }
7252
7253 static void perf_swevent_del(struct perf_event *event, int flags)
7254 {
7255         hlist_del_rcu(&event->hlist_entry);
7256 }
7257
7258 static void perf_swevent_start(struct perf_event *event, int flags)
7259 {
7260         event->hw.state = 0;
7261 }
7262
7263 static void perf_swevent_stop(struct perf_event *event, int flags)
7264 {
7265         event->hw.state = PERF_HES_STOPPED;
7266 }
7267
7268 /* Deref the hlist from the update side */
7269 static inline struct swevent_hlist *
7270 swevent_hlist_deref(struct swevent_htable *swhash)
7271 {
7272         return rcu_dereference_protected(swhash->swevent_hlist,
7273                                          lockdep_is_held(&swhash->hlist_mutex));
7274 }
7275
7276 static void swevent_hlist_release(struct swevent_htable *swhash)
7277 {
7278         struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
7279
7280         if (!hlist)
7281                 return;
7282
7283         RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
7284         kfree_rcu(hlist, rcu_head);
7285 }
7286
7287 static void swevent_hlist_put_cpu(int cpu)
7288 {
7289         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7290
7291         mutex_lock(&swhash->hlist_mutex);
7292
7293         if (!--swhash->hlist_refcount)
7294                 swevent_hlist_release(swhash);
7295
7296         mutex_unlock(&swhash->hlist_mutex);
7297 }
7298
7299 static void swevent_hlist_put(void)
7300 {
7301         int cpu;
7302
7303         for_each_possible_cpu(cpu)
7304                 swevent_hlist_put_cpu(cpu);
7305 }
7306
7307 static int swevent_hlist_get_cpu(int cpu)
7308 {
7309         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7310         int err = 0;
7311
7312         mutex_lock(&swhash->hlist_mutex);
7313         if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
7314                 struct swevent_hlist *hlist;
7315
7316                 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7317                 if (!hlist) {
7318                         err = -ENOMEM;
7319                         goto exit;
7320                 }
7321                 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7322         }
7323         swhash->hlist_refcount++;
7324 exit:
7325         mutex_unlock(&swhash->hlist_mutex);
7326
7327         return err;
7328 }
7329
7330 static int swevent_hlist_get(void)
7331 {
7332         int err, cpu, failed_cpu;
7333
7334         get_online_cpus();
7335         for_each_possible_cpu(cpu) {
7336                 err = swevent_hlist_get_cpu(cpu);
7337                 if (err) {
7338                         failed_cpu = cpu;
7339                         goto fail;
7340                 }
7341         }
7342         put_online_cpus();
7343
7344         return 0;
7345 fail:
7346         for_each_possible_cpu(cpu) {
7347                 if (cpu == failed_cpu)
7348                         break;
7349                 swevent_hlist_put_cpu(cpu);
7350         }
7351
7352         put_online_cpus();
7353         return err;
7354 }
7355
7356 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
7357
7358 static void sw_perf_event_destroy(struct perf_event *event)
7359 {
7360         u64 event_id = event->attr.config;
7361
7362         WARN_ON(event->parent);
7363
7364         static_key_slow_dec(&perf_swevent_enabled[event_id]);
7365         swevent_hlist_put();
7366 }
7367
7368 static int perf_swevent_init(struct perf_event *event)
7369 {
7370         u64 event_id = event->attr.config;
7371
7372         if (event->attr.type != PERF_TYPE_SOFTWARE)
7373                 return -ENOENT;
7374
7375         /*
7376          * no branch sampling for software events
7377          */
7378         if (has_branch_stack(event))
7379                 return -EOPNOTSUPP;
7380
7381         switch (event_id) {
7382         case PERF_COUNT_SW_CPU_CLOCK:
7383         case PERF_COUNT_SW_TASK_CLOCK:
7384                 return -ENOENT;
7385
7386         default:
7387                 break;
7388         }
7389
7390         if (event_id >= PERF_COUNT_SW_MAX)
7391                 return -ENOENT;
7392
7393         if (!event->parent) {
7394                 int err;
7395
7396                 err = swevent_hlist_get();
7397                 if (err)
7398                         return err;
7399
7400                 static_key_slow_inc(&perf_swevent_enabled[event_id]);
7401                 event->destroy = sw_perf_event_destroy;
7402         }
7403
7404         return 0;
7405 }
7406
7407 static struct pmu perf_swevent = {
7408         .task_ctx_nr    = perf_sw_context,
7409
7410         .capabilities   = PERF_PMU_CAP_NO_NMI,
7411
7412         .event_init     = perf_swevent_init,
7413         .add            = perf_swevent_add,
7414         .del            = perf_swevent_del,
7415         .start          = perf_swevent_start,
7416         .stop           = perf_swevent_stop,
7417         .read           = perf_swevent_read,
7418 };
7419
7420 #ifdef CONFIG_EVENT_TRACING
7421
7422 static int perf_tp_filter_match(struct perf_event *event,
7423                                 struct perf_sample_data *data)
7424 {
7425         void *record = data->raw->frag.data;
7426
7427         /* only top level events have filters set */
7428         if (event->parent)
7429                 event = event->parent;
7430
7431         if (likely(!event->filter) || filter_match_preds(event->filter, record))
7432                 return 1;
7433         return 0;
7434 }
7435
7436 static int perf_tp_event_match(struct perf_event *event,
7437                                 struct perf_sample_data *data,
7438                                 struct pt_regs *regs)
7439 {
7440         if (event->hw.state & PERF_HES_STOPPED)
7441                 return 0;
7442         /*
7443          * All tracepoints are from kernel-space.
7444          */
7445         if (event->attr.exclude_kernel)
7446                 return 0;
7447
7448         if (!perf_tp_filter_match(event, data))
7449                 return 0;
7450
7451         return 1;
7452 }
7453
7454 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7455                                struct trace_event_call *call, u64 count,
7456                                struct pt_regs *regs, struct hlist_head *head,
7457                                struct task_struct *task)
7458 {
7459         struct bpf_prog *prog = call->prog;
7460
7461         if (prog) {
7462                 *(struct pt_regs **)raw_data = regs;
7463                 if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7464                         perf_swevent_put_recursion_context(rctx);
7465                         return;
7466                 }
7467         }
7468         perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7469                       rctx, task);
7470 }
7471 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7472
7473 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
7474                    struct pt_regs *regs, struct hlist_head *head, int rctx,
7475                    struct task_struct *task)
7476 {
7477         struct perf_sample_data data;
7478         struct perf_event *event;
7479
7480         struct perf_raw_record raw = {
7481                 .frag = {
7482                         .size = entry_size,
7483                         .data = record,
7484                 },
7485         };
7486
7487         perf_sample_data_init(&data, 0, 0);
7488         data.raw = &raw;
7489
7490         perf_trace_buf_update(record, event_type);
7491
7492         hlist_for_each_entry_rcu(event, head, hlist_entry) {
7493                 if (perf_tp_event_match(event, &data, regs))
7494                         perf_swevent_event(event, count, &data, regs);
7495         }
7496
7497         /*
7498          * If we got specified a target task, also iterate its context and
7499          * deliver this event there too.
7500          */
7501         if (task && task != current) {
7502                 struct perf_event_context *ctx;
7503                 struct trace_entry *entry = record;
7504
7505                 rcu_read_lock();
7506                 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7507                 if (!ctx)
7508                         goto unlock;
7509
7510                 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7511                         if (event->attr.type != PERF_TYPE_TRACEPOINT)
7512                                 continue;
7513                         if (event->attr.config != entry->type)
7514                                 continue;
7515                         if (perf_tp_event_match(event, &data, regs))
7516                                 perf_swevent_event(event, count, &data, regs);
7517                 }
7518 unlock:
7519                 rcu_read_unlock();
7520         }
7521
7522         perf_swevent_put_recursion_context(rctx);
7523 }
7524 EXPORT_SYMBOL_GPL(perf_tp_event);
7525
7526 static void tp_perf_event_destroy(struct perf_event *event)
7527 {
7528         perf_trace_destroy(event);
7529 }
7530
7531 static int perf_tp_event_init(struct perf_event *event)
7532 {
7533         int err;
7534
7535         if (event->attr.type != PERF_TYPE_TRACEPOINT)
7536                 return -ENOENT;
7537
7538         /*
7539          * no branch sampling for tracepoint events
7540          */
7541         if (has_branch_stack(event))
7542                 return -EOPNOTSUPP;
7543
7544         err = perf_trace_init(event);
7545         if (err)
7546                 return err;
7547
7548         event->destroy = tp_perf_event_destroy;
7549
7550         return 0;
7551 }
7552
7553 static struct pmu perf_tracepoint = {
7554         .task_ctx_nr    = perf_sw_context,
7555
7556         .event_init     = perf_tp_event_init,
7557         .add            = perf_trace_add,
7558         .del            = perf_trace_del,
7559         .start          = perf_swevent_start,
7560         .stop           = perf_swevent_stop,
7561         .read           = perf_swevent_read,
7562 };
7563
7564 static inline void perf_tp_register(void)
7565 {
7566         perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
7567 }
7568
7569 static void perf_event_free_filter(struct perf_event *event)
7570 {
7571         ftrace_profile_free_filter(event);
7572 }
7573
7574 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7575 {
7576         bool is_kprobe, is_tracepoint;
7577         struct bpf_prog *prog;
7578
7579         if (event->attr.type != PERF_TYPE_TRACEPOINT)
7580                 return -EINVAL;
7581
7582         if (event->tp_event->prog)
7583                 return -EEXIST;
7584
7585         is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
7586         is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
7587         if (!is_kprobe && !is_tracepoint)
7588                 /* bpf programs can only be attached to u/kprobe or tracepoint */
7589                 return -EINVAL;
7590
7591         prog = bpf_prog_get(prog_fd);
7592         if (IS_ERR(prog))
7593                 return PTR_ERR(prog);
7594
7595         if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
7596             (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
7597                 /* valid fd, but invalid bpf program type */
7598                 bpf_prog_put(prog);
7599                 return -EINVAL;
7600         }
7601
7602         if (is_tracepoint) {
7603                 int off = trace_event_get_offsets(event->tp_event);
7604
7605                 if (prog->aux->max_ctx_offset > off) {
7606                         bpf_prog_put(prog);
7607                         return -EACCES;
7608                 }
7609         }
7610         event->tp_event->prog = prog;
7611
7612         return 0;
7613 }
7614
7615 static void perf_event_free_bpf_prog(struct perf_event *event)
7616 {
7617         struct bpf_prog *prog;
7618
7619         if (!event->tp_event)
7620                 return;
7621
7622         prog = event->tp_event->prog;
7623         if (prog) {
7624                 event->tp_event->prog = NULL;
7625                 bpf_prog_put(prog);
7626         }
7627 }
7628
7629 #else
7630
7631 static inline void perf_tp_register(void)
7632 {
7633 }
7634
7635 static void perf_event_free_filter(struct perf_event *event)
7636 {
7637 }
7638
7639 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7640 {
7641         return -ENOENT;
7642 }
7643
7644 static void perf_event_free_bpf_prog(struct perf_event *event)
7645 {
7646 }
7647 #endif /* CONFIG_EVENT_TRACING */
7648
7649 #ifdef CONFIG_HAVE_HW_BREAKPOINT
7650 void perf_bp_event(struct perf_event *bp, void *data)
7651 {
7652         struct perf_sample_data sample;
7653         struct pt_regs *regs = data;
7654
7655         perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
7656
7657         if (!bp->hw.state && !perf_exclude_event(bp, regs))
7658                 perf_swevent_event(bp, 1, &sample, regs);
7659 }
7660 #endif
7661
7662 /*
7663  * Allocate a new address filter
7664  */
7665 static struct perf_addr_filter *
7666 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
7667 {
7668         int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
7669         struct perf_addr_filter *filter;
7670
7671         filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
7672         if (!filter)
7673                 return NULL;
7674
7675         INIT_LIST_HEAD(&filter->entry);
7676         list_add_tail(&filter->entry, filters);
7677
7678         return filter;
7679 }
7680
7681 static void free_filters_list(struct list_head *filters)
7682 {
7683         struct perf_addr_filter *filter, *iter;
7684
7685         list_for_each_entry_safe(filter, iter, filters, entry) {
7686                 if (filter->inode)
7687                         iput(filter->inode);
7688                 list_del(&filter->entry);
7689                 kfree(filter);
7690         }
7691 }
7692
7693 /*
7694  * Free existing address filters and optionally install new ones
7695  */
7696 static void perf_addr_filters_splice(struct perf_event *event,
7697                                      struct list_head *head)
7698 {
7699         unsigned long flags;
7700         LIST_HEAD(list);
7701
7702         if (!has_addr_filter(event))
7703                 return;
7704
7705         /* don't bother with children, they don't have their own filters */
7706         if (event->parent)
7707                 return;
7708
7709         raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
7710
7711         list_splice_init(&event->addr_filters.list, &list);
7712         if (head)
7713                 list_splice(head, &event->addr_filters.list);
7714
7715         raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
7716
7717         free_filters_list(&list);
7718 }
7719
7720 /*
7721  * Scan through mm's vmas and see if one of them matches the
7722  * @filter; if so, adjust filter's address range.
7723  * Called with mm::mmap_sem down for reading.
7724  */
7725 static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
7726                                             struct mm_struct *mm)
7727 {
7728         struct vm_area_struct *vma;
7729
7730         for (vma = mm->mmap; vma; vma = vma->vm_next) {
7731                 struct file *file = vma->vm_file;
7732                 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
7733                 unsigned long vma_size = vma->vm_end - vma->vm_start;
7734
7735                 if (!file)
7736                         continue;
7737
7738                 if (!perf_addr_filter_match(filter, file, off, vma_size))
7739                         continue;
7740
7741                 return vma->vm_start;
7742         }
7743
7744         return 0;
7745 }
7746
7747 /*
7748  * Update event's address range filters based on the
7749  * task's existing mappings, if any.
7750  */
7751 static void perf_event_addr_filters_apply(struct perf_event *event)
7752 {
7753         struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7754         struct task_struct *task = READ_ONCE(event->ctx->task);
7755         struct perf_addr_filter *filter;
7756         struct mm_struct *mm = NULL;
7757         unsigned int count = 0;
7758         unsigned long flags;
7759
7760         /*
7761          * We may observe TASK_TOMBSTONE, which means that the event tear-down
7762          * will stop on the parent's child_mutex that our caller is also holding
7763          */
7764         if (task == TASK_TOMBSTONE)
7765                 return;
7766
7767         mm = get_task_mm(event->ctx->task);
7768         if (!mm)
7769                 goto restart;
7770
7771         down_read(&mm->mmap_sem);
7772
7773         raw_spin_lock_irqsave(&ifh->lock, flags);
7774         list_for_each_entry(filter, &ifh->list, entry) {
7775                 event->addr_filters_offs[count] = 0;
7776
7777                 if (perf_addr_filter_needs_mmap(filter))
7778                         event->addr_filters_offs[count] =
7779                                 perf_addr_filter_apply(filter, mm);
7780
7781                 count++;
7782         }
7783
7784         event->addr_filters_gen++;
7785         raw_spin_unlock_irqrestore(&ifh->lock, flags);
7786
7787         up_read(&mm->mmap_sem);
7788
7789         mmput(mm);
7790
7791 restart:
7792         perf_event_restart(event);
7793 }
7794
7795 /*
7796  * Address range filtering: limiting the data to certain
7797  * instruction address ranges. Filters are ioctl()ed to us from
7798  * userspace as ascii strings.
7799  *
7800  * Filter string format:
7801  *
7802  * ACTION RANGE_SPEC
7803  * where ACTION is one of the
7804  *  * "filter": limit the trace to this region
7805  *  * "start": start tracing from this address
7806  *  * "stop": stop tracing at this address/region;
7807  * RANGE_SPEC is
7808  *  * for kernel addresses: <start address>[/<size>]
7809  *  * for object files:     <start address>[/<size>]@</path/to/object/file>
7810  *
7811  * if <size> is not specified, the range is treated as a single address.
7812  */
7813 enum {
7814         IF_ACT_FILTER,
7815         IF_ACT_START,
7816         IF_ACT_STOP,
7817         IF_SRC_FILE,
7818         IF_SRC_KERNEL,
7819         IF_SRC_FILEADDR,
7820         IF_SRC_KERNELADDR,
7821 };
7822
7823 enum {
7824         IF_STATE_ACTION = 0,
7825         IF_STATE_SOURCE,
7826         IF_STATE_END,
7827 };
7828
7829 static const match_table_t if_tokens = {
7830         { IF_ACT_FILTER,        "filter" },
7831         { IF_ACT_START,         "start" },
7832         { IF_ACT_STOP,          "stop" },
7833         { IF_SRC_FILE,          "%u/%u@%s" },
7834         { IF_SRC_KERNEL,        "%u/%u" },
7835         { IF_SRC_FILEADDR,      "%u@%s" },
7836         { IF_SRC_KERNELADDR,    "%u" },
7837 };
7838
7839 /*
7840  * Address filter string parser
7841  */
7842 static int
7843 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
7844                              struct list_head *filters)
7845 {
7846         struct perf_addr_filter *filter = NULL;
7847         char *start, *orig, *filename = NULL;
7848         struct path path;
7849         substring_t args[MAX_OPT_ARGS];
7850         int state = IF_STATE_ACTION, token;
7851         unsigned int kernel = 0;
7852         int ret = -EINVAL;
7853
7854         orig = fstr = kstrdup(fstr, GFP_KERNEL);
7855         if (!fstr)
7856                 return -ENOMEM;
7857
7858         while ((start = strsep(&fstr, " ,\n")) != NULL) {
7859                 ret = -EINVAL;
7860
7861                 if (!*start)
7862                         continue;
7863
7864                 /* filter definition begins */
7865                 if (state == IF_STATE_ACTION) {
7866                         filter = perf_addr_filter_new(event, filters);
7867                         if (!filter)
7868                                 goto fail;
7869                 }
7870
7871                 token = match_token(start, if_tokens, args);
7872                 switch (token) {
7873                 case IF_ACT_FILTER:
7874                 case IF_ACT_START:
7875                         filter->filter = 1;
7876
7877                 case IF_ACT_STOP:
7878                         if (state != IF_STATE_ACTION)
7879                                 goto fail;
7880
7881                         state = IF_STATE_SOURCE;
7882                         break;
7883
7884                 case IF_SRC_KERNELADDR:
7885                 case IF_SRC_KERNEL:
7886                         kernel = 1;
7887
7888                 case IF_SRC_FILEADDR:
7889                 case IF_SRC_FILE:
7890                         if (state != IF_STATE_SOURCE)
7891                                 goto fail;
7892
7893                         if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
7894                                 filter->range = 1;
7895
7896                         *args[0].to = 0;
7897                         ret = kstrtoul(args[0].from, 0, &filter->offset);
7898                         if (ret)
7899                                 goto fail;
7900
7901                         if (filter->range) {
7902                                 *args[1].to = 0;
7903                                 ret = kstrtoul(args[1].from, 0, &filter->size);
7904                                 if (ret)
7905                                         goto fail;
7906                         }
7907
7908                         if (token == IF_SRC_FILE) {
7909                                 filename = match_strdup(&args[2]);
7910                                 if (!filename) {
7911                                         ret = -ENOMEM;
7912                                         goto fail;
7913                                 }
7914                         }
7915
7916                         state = IF_STATE_END;
7917                         break;
7918
7919                 default:
7920                         goto fail;
7921                 }
7922
7923                 /*
7924                  * Filter definition is fully parsed, validate and install it.
7925                  * Make sure that it doesn't contradict itself or the event's
7926                  * attribute.
7927                  */
7928                 if (state == IF_STATE_END) {
7929                         if (kernel && event->attr.exclude_kernel)
7930                                 goto fail;
7931
7932                         if (!kernel) {
7933                                 if (!filename)
7934                                         goto fail;
7935
7936                                 /* look up the path and grab its inode */
7937                                 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
7938                                 if (ret)
7939                                         goto fail_free_name;
7940
7941                                 filter->inode = igrab(d_inode(path.dentry));
7942                                 path_put(&path);
7943                                 kfree(filename);
7944                                 filename = NULL;
7945
7946                                 ret = -EINVAL;
7947                                 if (!filter->inode ||
7948                                     !S_ISREG(filter->inode->i_mode))
7949                                         /* free_filters_list() will iput() */
7950                                         goto fail;
7951                         }
7952
7953                         /* ready to consume more filters */
7954                         state = IF_STATE_ACTION;
7955                         filter = NULL;
7956                 }
7957         }
7958
7959         if (state != IF_STATE_ACTION)
7960                 goto fail;
7961
7962         kfree(orig);
7963
7964         return 0;
7965
7966 fail_free_name:
7967         kfree(filename);
7968 fail:
7969         free_filters_list(filters);
7970         kfree(orig);
7971
7972         return ret;
7973 }
7974
7975 static int
7976 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
7977 {
7978         LIST_HEAD(filters);
7979         int ret;
7980
7981         /*
7982          * Since this is called in perf_ioctl() path, we're already holding
7983          * ctx::mutex.
7984          */
7985         lockdep_assert_held(&event->ctx->mutex);
7986
7987         if (WARN_ON_ONCE(event->parent))
7988                 return -EINVAL;
7989
7990         /*
7991          * For now, we only support filtering in per-task events; doing so
7992          * for CPU-wide events requires additional context switching trickery,
7993          * since same object code will be mapped at different virtual
7994          * addresses in different processes.
7995          */
7996         if (!event->ctx->task)
7997                 return -EOPNOTSUPP;
7998
7999         ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8000         if (ret)
8001                 return ret;
8002
8003         ret = event->pmu->addr_filters_validate(&filters);
8004         if (ret) {
8005                 free_filters_list(&filters);
8006                 return ret;
8007         }
8008
8009         /* remove existing filters, if any */
8010         perf_addr_filters_splice(event, &filters);
8011
8012         /* install new filters */
8013         perf_event_for_each_child(event, perf_event_addr_filters_apply);
8014
8015         return ret;
8016 }
8017
8018 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8019 {
8020         char *filter_str;
8021         int ret = -EINVAL;
8022
8023         if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8024             !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8025             !has_addr_filter(event))
8026                 return -EINVAL;
8027
8028         filter_str = strndup_user(arg, PAGE_SIZE);
8029         if (IS_ERR(filter_str))
8030                 return PTR_ERR(filter_str);
8031
8032         if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8033             event->attr.type == PERF_TYPE_TRACEPOINT)
8034                 ret = ftrace_profile_set_filter(event, event->attr.config,
8035                                                 filter_str);
8036         else if (has_addr_filter(event))
8037                 ret = perf_event_set_addr_filter(event, filter_str);
8038
8039         kfree(filter_str);
8040         return ret;
8041 }
8042
8043 /*
8044  * hrtimer based swevent callback
8045  */
8046
8047 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
8048 {
8049         enum hrtimer_restart ret = HRTIMER_RESTART;
8050         struct perf_sample_data data;
8051         struct pt_regs *regs;
8052         struct perf_event *event;
8053         u64 period;
8054
8055         event = container_of(hrtimer, struct perf_event, hw.hrtimer);
8056
8057         if (event->state != PERF_EVENT_STATE_ACTIVE)
8058                 return HRTIMER_NORESTART;
8059
8060         event->pmu->read(event);
8061
8062         perf_sample_data_init(&data, 0, event->hw.last_period);
8063         regs = get_irq_regs();
8064
8065         if (regs && !perf_exclude_event(event, regs)) {
8066                 if (!(event->attr.exclude_idle && is_idle_task(current)))
8067                         if (__perf_event_overflow(event, 1, &data, regs))
8068                                 ret = HRTIMER_NORESTART;
8069         }
8070
8071         period = max_t(u64, 10000, event->hw.sample_period);
8072         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
8073
8074         return ret;
8075 }
8076
8077 static void perf_swevent_start_hrtimer(struct perf_event *event)
8078 {
8079         struct hw_perf_event *hwc = &event->hw;
8080         s64 period;
8081
8082         if (!is_sampling_event(event))
8083                 return;
8084
8085         period = local64_read(&hwc->period_left);
8086         if (period) {
8087                 if (period < 0)
8088                         period = 10000;
8089
8090                 local64_set(&hwc->period_left, 0);
8091         } else {
8092                 period = max_t(u64, 10000, hwc->sample_period);
8093         }
8094         hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8095                       HRTIMER_MODE_REL_PINNED);
8096 }
8097
8098 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
8099 {
8100         struct hw_perf_event *hwc = &event->hw;
8101
8102         if (is_sampling_event(event)) {
8103                 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
8104                 local64_set(&hwc->period_left, ktime_to_ns(remaining));
8105
8106                 hrtimer_cancel(&hwc->hrtimer);
8107         }
8108 }
8109
8110 static void perf_swevent_init_hrtimer(struct perf_event *event)
8111 {
8112         struct hw_perf_event *hwc = &event->hw;
8113
8114         if (!is_sampling_event(event))
8115                 return;
8116
8117         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8118         hwc->hrtimer.function = perf_swevent_hrtimer;
8119
8120         /*
8121          * Since hrtimers have a fixed rate, we can do a static freq->period
8122          * mapping and avoid the whole period adjust feedback stuff.
8123          */
8124         if (event->attr.freq) {
8125                 long freq = event->attr.sample_freq;
8126
8127                 event->attr.sample_period = NSEC_PER_SEC / freq;
8128                 hwc->sample_period = event->attr.sample_period;
8129                 local64_set(&hwc->period_left, hwc->sample_period);
8130                 hwc->last_period = hwc->sample_period;
8131                 event->attr.freq = 0;
8132         }
8133 }
8134
8135 /*
8136  * Software event: cpu wall time clock
8137  */
8138
8139 static void cpu_clock_event_update(struct perf_event *event)
8140 {
8141         s64 prev;
8142         u64 now;
8143
8144         now = local_clock();
8145         prev = local64_xchg(&event->hw.prev_count, now);
8146         local64_add(now - prev, &event->count);
8147 }
8148
8149 static void cpu_clock_event_start(struct perf_event *event, int flags)
8150 {
8151         local64_set(&event->hw.prev_count, local_clock());
8152         perf_swevent_start_hrtimer(event);
8153 }
8154
8155 static void cpu_clock_event_stop(struct perf_event *event, int flags)
8156 {
8157         perf_swevent_cancel_hrtimer(event);
8158         cpu_clock_event_update(event);
8159 }
8160
8161 static int cpu_clock_event_add(struct perf_event *event, int flags)
8162 {
8163         if (flags & PERF_EF_START)
8164                 cpu_clock_event_start(event, flags);
8165         perf_event_update_userpage(event);
8166
8167         return 0;
8168 }
8169
8170 static void cpu_clock_event_del(struct perf_event *event, int flags)
8171 {
8172         cpu_clock_event_stop(event, flags);
8173 }
8174
8175 static void cpu_clock_event_read(struct perf_event *event)
8176 {
8177         cpu_clock_event_update(event);
8178 }
8179
8180 static int cpu_clock_event_init(struct perf_event *event)
8181 {
8182         if (event->attr.type != PERF_TYPE_SOFTWARE)
8183                 return -ENOENT;
8184
8185         if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8186                 return -ENOENT;
8187
8188         /*
8189          * no branch sampling for software events
8190          */
8191         if (has_branch_stack(event))
8192                 return -EOPNOTSUPP;
8193
8194         perf_swevent_init_hrtimer(event);
8195
8196         return 0;
8197 }
8198
8199 static struct pmu perf_cpu_clock = {
8200         .task_ctx_nr    = perf_sw_context,
8201
8202         .capabilities   = PERF_PMU_CAP_NO_NMI,
8203
8204         .event_init     = cpu_clock_event_init,
8205         .add            = cpu_clock_event_add,
8206         .del            = cpu_clock_event_del,
8207         .start          = cpu_clock_event_start,
8208         .stop           = cpu_clock_event_stop,
8209         .read           = cpu_clock_event_read,
8210 };
8211
8212 /*
8213  * Software event: task time clock
8214  */
8215
8216 static void task_clock_event_update(struct perf_event *event, u64 now)
8217 {
8218         u64 prev;
8219         s64 delta;
8220
8221         prev = local64_xchg(&event->hw.prev_count, now);
8222         delta = now - prev;
8223         local64_add(delta, &event->count);
8224 }
8225
8226 static void task_clock_event_start(struct perf_event *event, int flags)
8227 {
8228         local64_set(&event->hw.prev_count, event->ctx->time);
8229         perf_swevent_start_hrtimer(event);
8230 }
8231
8232 static void task_clock_event_stop(struct perf_event *event, int flags)
8233 {
8234         perf_swevent_cancel_hrtimer(event);
8235         task_clock_event_update(event, event->ctx->time);
8236 }
8237
8238 static int task_clock_event_add(struct perf_event *event, int flags)
8239 {
8240         if (flags & PERF_EF_START)
8241                 task_clock_event_start(event, flags);
8242         perf_event_update_userpage(event);
8243
8244         return 0;
8245 }
8246
8247 static void task_clock_event_del(struct perf_event *event, int flags)
8248 {
8249         task_clock_event_stop(event, PERF_EF_UPDATE);
8250 }
8251
8252 static void task_clock_event_read(struct perf_event *event)
8253 {
8254         u64 now = perf_clock();
8255         u64 delta = now - event->ctx->timestamp;
8256         u64 time = event->ctx->time + delta;
8257
8258         task_clock_event_update(event, time);
8259 }
8260
8261 static int task_clock_event_init(struct perf_event *event)
8262 {
8263         if (event->attr.type != PERF_TYPE_SOFTWARE)
8264                 return -ENOENT;
8265
8266         if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8267                 return -ENOENT;
8268
8269         /*
8270          * no branch sampling for software events
8271          */
8272         if (has_branch_stack(event))
8273                 return -EOPNOTSUPP;
8274
8275         perf_swevent_init_hrtimer(event);
8276
8277         return 0;
8278 }
8279
8280 static struct pmu perf_task_clock = {
8281         .task_ctx_nr    = perf_sw_context,
8282
8283         .capabilities   = PERF_PMU_CAP_NO_NMI,
8284
8285         .event_init     = task_clock_event_init,
8286         .add            = task_clock_event_add,
8287         .del            = task_clock_event_del,
8288         .start          = task_clock_event_start,
8289         .stop           = task_clock_event_stop,
8290         .read           = task_clock_event_read,
8291 };
8292
8293 static void perf_pmu_nop_void(struct pmu *pmu)
8294 {
8295 }
8296
8297 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8298 {
8299 }
8300
8301 static int perf_pmu_nop_int(struct pmu *pmu)
8302 {
8303         return 0;
8304 }
8305
8306 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
8307
8308 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
8309 {
8310         __this_cpu_write(nop_txn_flags, flags);
8311
8312         if (flags & ~PERF_PMU_TXN_ADD)
8313                 return;
8314
8315         perf_pmu_disable(pmu);
8316 }
8317
8318 static int perf_pmu_commit_txn(struct pmu *pmu)
8319 {
8320         unsigned int flags = __this_cpu_read(nop_txn_flags);
8321
8322         __this_cpu_write(nop_txn_flags, 0);
8323
8324         if (flags & ~PERF_PMU_TXN_ADD)
8325                 return 0;
8326
8327         perf_pmu_enable(pmu);
8328         return 0;
8329 }
8330
8331 static void perf_pmu_cancel_txn(struct pmu *pmu)
8332 {
8333         unsigned int flags =  __this_cpu_read(nop_txn_flags);
8334
8335         __this_cpu_write(nop_txn_flags, 0);
8336
8337         if (flags & ~PERF_PMU_TXN_ADD)
8338                 return;
8339
8340         perf_pmu_enable(pmu);
8341 }
8342
8343 static int perf_event_idx_default(struct perf_event *event)
8344 {
8345         return 0;
8346 }
8347
8348 /*
8349  * Ensures all contexts with the same task_ctx_nr have the same
8350  * pmu_cpu_context too.
8351  */
8352 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
8353 {
8354         struct pmu *pmu;
8355
8356         if (ctxn < 0)
8357                 return NULL;
8358
8359         list_for_each_entry(pmu, &pmus, entry) {
8360                 if (pmu->task_ctx_nr == ctxn)
8361                         return pmu->pmu_cpu_context;
8362         }
8363
8364         return NULL;
8365 }
8366
8367 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
8368 {
8369         int cpu;
8370
8371         for_each_possible_cpu(cpu) {
8372                 struct perf_cpu_context *cpuctx;
8373
8374                 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8375
8376                 if (cpuctx->unique_pmu == old_pmu)
8377                         cpuctx->unique_pmu = pmu;
8378         }
8379 }
8380
8381 static void free_pmu_context(struct pmu *pmu)
8382 {
8383         struct pmu *i;
8384
8385         mutex_lock(&pmus_lock);
8386         /*
8387          * Like a real lame refcount.
8388          */
8389         list_for_each_entry(i, &pmus, entry) {
8390                 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
8391                         update_pmu_context(i, pmu);
8392                         goto out;
8393                 }
8394         }
8395
8396         free_percpu(pmu->pmu_cpu_context);
8397 out:
8398         mutex_unlock(&pmus_lock);
8399 }
8400
8401 /*
8402  * Let userspace know that this PMU supports address range filtering:
8403  */
8404 static ssize_t nr_addr_filters_show(struct device *dev,
8405                                     struct device_attribute *attr,
8406                                     char *page)
8407 {
8408         struct pmu *pmu = dev_get_drvdata(dev);
8409
8410         return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8411 }
8412 DEVICE_ATTR_RO(nr_addr_filters);
8413
8414 static struct idr pmu_idr;
8415
8416 static ssize_t
8417 type_show(struct device *dev, struct device_attribute *attr, char *page)
8418 {
8419         struct pmu *pmu = dev_get_drvdata(dev);
8420
8421         return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8422 }
8423 static DEVICE_ATTR_RO(type);
8424
8425 static ssize_t
8426 perf_event_mux_interval_ms_show(struct device *dev,
8427                                 struct device_attribute *attr,
8428                                 char *page)
8429 {
8430         struct pmu *pmu = dev_get_drvdata(dev);
8431
8432         return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8433 }
8434
8435 static DEFINE_MUTEX(mux_interval_mutex);
8436
8437 static ssize_t
8438 perf_event_mux_interval_ms_store(struct device *dev,
8439                                  struct device_attribute *attr,
8440                                  const char *buf, size_t count)
8441 {
8442         struct pmu *pmu = dev_get_drvdata(dev);
8443         int timer, cpu, ret;
8444
8445         ret = kstrtoint(buf, 0, &timer);
8446         if (ret)
8447                 return ret;
8448
8449         if (timer < 1)
8450                 return -EINVAL;
8451
8452         /* same value, noting to do */
8453         if (timer == pmu->hrtimer_interval_ms)
8454                 return count;
8455
8456         mutex_lock(&mux_interval_mutex);
8457         pmu->hrtimer_interval_ms = timer;
8458
8459         /* update all cpuctx for this PMU */
8460         get_online_cpus();
8461         for_each_online_cpu(cpu) {
8462                 struct perf_cpu_context *cpuctx;
8463                 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8464                 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8465
8466                 cpu_function_call(cpu,
8467                         (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
8468         }
8469         put_online_cpus();
8470         mutex_unlock(&mux_interval_mutex);
8471
8472         return count;
8473 }
8474 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
8475
8476 static struct attribute *pmu_dev_attrs[] = {
8477         &dev_attr_type.attr,
8478         &dev_attr_perf_event_mux_interval_ms.attr,
8479         NULL,
8480 };
8481 ATTRIBUTE_GROUPS(pmu_dev);
8482
8483 static int pmu_bus_running;
8484 static struct bus_type pmu_bus = {
8485         .name           = "event_source",
8486         .dev_groups     = pmu_dev_groups,
8487 };
8488
8489 static void pmu_dev_release(struct device *dev)
8490 {
8491         kfree(dev);
8492 }
8493
8494 static int pmu_dev_alloc(struct pmu *pmu)
8495 {
8496         int ret = -ENOMEM;
8497
8498         pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8499         if (!pmu->dev)
8500                 goto out;
8501
8502         pmu->dev->groups = pmu->attr_groups;
8503         device_initialize(pmu->dev);
8504         ret = dev_set_name(pmu->dev, "%s", pmu->name);
8505         if (ret)
8506                 goto free_dev;
8507
8508         dev_set_drvdata(pmu->dev, pmu);
8509         pmu->dev->bus = &pmu_bus;
8510         pmu->dev->release = pmu_dev_release;
8511         ret = device_add(pmu->dev);
8512         if (ret)
8513                 goto free_dev;
8514
8515         /* For PMUs with address filters, throw in an extra attribute: */
8516         if (pmu->nr_addr_filters)
8517                 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
8518
8519         if (ret)
8520                 goto del_dev;
8521
8522 out:
8523         return ret;
8524
8525 del_dev:
8526         device_del(pmu->dev);
8527
8528 free_dev:
8529         put_device(pmu->dev);
8530         goto out;
8531 }
8532
8533 static struct lock_class_key cpuctx_mutex;
8534 static struct lock_class_key cpuctx_lock;
8535
8536 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
8537 {
8538         int cpu, ret;
8539
8540         mutex_lock(&pmus_lock);
8541         ret = -ENOMEM;
8542         pmu->pmu_disable_count = alloc_percpu(int);
8543         if (!pmu->pmu_disable_count)
8544                 goto unlock;
8545
8546         pmu->type = -1;
8547         if (!name)
8548                 goto skip_type;
8549         pmu->name = name;
8550
8551         if (type < 0) {
8552                 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
8553                 if (type < 0) {
8554                         ret = type;
8555                         goto free_pdc;
8556                 }
8557         }
8558         pmu->type = type;
8559
8560         if (pmu_bus_running) {
8561                 ret = pmu_dev_alloc(pmu);
8562                 if (ret)
8563                         goto free_idr;
8564         }
8565
8566 skip_type:
8567         if (pmu->task_ctx_nr == perf_hw_context) {
8568                 static int hw_context_taken = 0;
8569
8570                 /*
8571                  * Other than systems with heterogeneous CPUs, it never makes
8572                  * sense for two PMUs to share perf_hw_context. PMUs which are
8573                  * uncore must use perf_invalid_context.
8574                  */
8575                 if (WARN_ON_ONCE(hw_context_taken &&
8576                     !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
8577                         pmu->task_ctx_nr = perf_invalid_context;
8578
8579                 hw_context_taken = 1;
8580         }
8581
8582         pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
8583         if (pmu->pmu_cpu_context)
8584                 goto got_cpu_context;
8585
8586         ret = -ENOMEM;
8587         pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
8588         if (!pmu->pmu_cpu_context)
8589                 goto free_dev;
8590
8591         for_each_possible_cpu(cpu) {
8592                 struct perf_cpu_context *cpuctx;
8593
8594                 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8595                 __perf_event_init_context(&cpuctx->ctx);
8596                 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
8597                 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
8598                 cpuctx->ctx.pmu = pmu;
8599
8600                 __perf_mux_hrtimer_init(cpuctx, cpu);
8601
8602                 cpuctx->unique_pmu = pmu;
8603         }
8604
8605 got_cpu_context:
8606         if (!pmu->start_txn) {
8607                 if (pmu->pmu_enable) {
8608                         /*
8609                          * If we have pmu_enable/pmu_disable calls, install
8610                          * transaction stubs that use that to try and batch
8611                          * hardware accesses.
8612                          */
8613                         pmu->start_txn  = perf_pmu_start_txn;
8614                         pmu->commit_txn = perf_pmu_commit_txn;
8615                         pmu->cancel_txn = perf_pmu_cancel_txn;
8616                 } else {
8617                         pmu->start_txn  = perf_pmu_nop_txn;
8618                         pmu->commit_txn = perf_pmu_nop_int;
8619                         pmu->cancel_txn = perf_pmu_nop_void;
8620                 }
8621         }
8622
8623         if (!pmu->pmu_enable) {
8624                 pmu->pmu_enable  = perf_pmu_nop_void;
8625                 pmu->pmu_disable = perf_pmu_nop_void;
8626         }
8627
8628         if (!pmu->event_idx)
8629                 pmu->event_idx = perf_event_idx_default;
8630
8631         list_add_rcu(&pmu->entry, &pmus);
8632         atomic_set(&pmu->exclusive_cnt, 0);
8633         ret = 0;
8634 unlock:
8635         mutex_unlock(&pmus_lock);
8636
8637         return ret;
8638
8639 free_dev:
8640         device_del(pmu->dev);
8641         put_device(pmu->dev);
8642
8643 free_idr:
8644         if (pmu->type >= PERF_TYPE_MAX)
8645                 idr_remove(&pmu_idr, pmu->type);
8646
8647 free_pdc:
8648         free_percpu(pmu->pmu_disable_count);
8649         goto unlock;
8650 }
8651 EXPORT_SYMBOL_GPL(perf_pmu_register);
8652
8653 void perf_pmu_unregister(struct pmu *pmu)
8654 {
8655         mutex_lock(&pmus_lock);
8656         list_del_rcu(&pmu->entry);
8657         mutex_unlock(&pmus_lock);
8658
8659         /*
8660          * We dereference the pmu list under both SRCU and regular RCU, so
8661          * synchronize against both of those.
8662          */
8663         synchronize_srcu(&pmus_srcu);
8664         synchronize_rcu();
8665
8666         free_percpu(pmu->pmu_disable_count);
8667         if (pmu->type >= PERF_TYPE_MAX)
8668                 idr_remove(&pmu_idr, pmu->type);
8669         if (pmu->nr_addr_filters)
8670                 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
8671         device_del(pmu->dev);
8672         put_device(pmu->dev);
8673         free_pmu_context(pmu);
8674 }
8675 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
8676
8677 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
8678 {
8679         struct perf_event_context *ctx = NULL;
8680         int ret;
8681
8682         if (!try_module_get(pmu->module))
8683                 return -ENODEV;
8684
8685         if (event->group_leader != event) {
8686                 /*
8687                  * This ctx->mutex can nest when we're called through
8688                  * inheritance. See the perf_event_ctx_lock_nested() comment.
8689                  */
8690                 ctx = perf_event_ctx_lock_nested(event->group_leader,
8691                                                  SINGLE_DEPTH_NESTING);
8692                 BUG_ON(!ctx);
8693         }
8694
8695         event->pmu = pmu;
8696         ret = pmu->event_init(event);
8697
8698         if (ctx)
8699                 perf_event_ctx_unlock(event->group_leader, ctx);
8700
8701         if (ret)
8702                 module_put(pmu->module);
8703
8704         return ret;
8705 }
8706
8707 static struct pmu *perf_init_event(struct perf_event *event)
8708 {
8709         struct pmu *pmu = NULL;
8710         int idx;
8711         int ret;
8712
8713         idx = srcu_read_lock(&pmus_srcu);
8714
8715         rcu_read_lock();
8716         pmu = idr_find(&pmu_idr, event->attr.type);
8717         rcu_read_unlock();
8718         if (pmu) {
8719                 ret = perf_try_init_event(pmu, event);
8720                 if (ret)
8721                         pmu = ERR_PTR(ret);
8722                 goto unlock;
8723         }
8724
8725         list_for_each_entry_rcu(pmu, &pmus, entry) {
8726                 ret = perf_try_init_event(pmu, event);
8727                 if (!ret)
8728                         goto unlock;
8729
8730                 if (ret != -ENOENT) {
8731                         pmu = ERR_PTR(ret);
8732                         goto unlock;
8733                 }
8734         }
8735         pmu = ERR_PTR(-ENOENT);
8736 unlock:
8737         srcu_read_unlock(&pmus_srcu, idx);
8738
8739         return pmu;
8740 }
8741
8742 static void attach_sb_event(struct perf_event *event)
8743 {
8744         struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
8745
8746         raw_spin_lock(&pel->lock);
8747         list_add_rcu(&event->sb_list, &pel->list);
8748         raw_spin_unlock(&pel->lock);
8749 }
8750
8751 /*
8752  * We keep a list of all !task (and therefore per-cpu) events
8753  * that need to receive side-band records.
8754  *
8755  * This avoids having to scan all the various PMU per-cpu contexts
8756  * looking for them.
8757  */
8758 static void account_pmu_sb_event(struct perf_event *event)
8759 {
8760         if (is_sb_event(event))
8761                 attach_sb_event(event);
8762 }
8763
8764 static void account_event_cpu(struct perf_event *event, int cpu)
8765 {
8766         if (event->parent)
8767                 return;
8768
8769         if (is_cgroup_event(event))
8770                 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
8771 }
8772
8773 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
8774 static void account_freq_event_nohz(void)
8775 {
8776 #ifdef CONFIG_NO_HZ_FULL
8777         /* Lock so we don't race with concurrent unaccount */
8778         spin_lock(&nr_freq_lock);
8779         if (atomic_inc_return(&nr_freq_events) == 1)
8780                 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
8781         spin_unlock(&nr_freq_lock);
8782 #endif
8783 }
8784
8785 static void account_freq_event(void)
8786 {
8787         if (tick_nohz_full_enabled())
8788                 account_freq_event_nohz();
8789         else
8790                 atomic_inc(&nr_freq_events);
8791 }
8792
8793
8794 static void account_event(struct perf_event *event)
8795 {
8796         bool inc = false;
8797
8798         if (event->parent)
8799                 return;
8800
8801         if (event->attach_state & PERF_ATTACH_TASK)
8802                 inc = true;
8803         if (event->attr.mmap || event->attr.mmap_data)
8804                 atomic_inc(&nr_mmap_events);
8805         if (event->attr.comm)
8806                 atomic_inc(&nr_comm_events);
8807         if (event->attr.task)
8808                 atomic_inc(&nr_task_events);
8809         if (event->attr.freq)
8810                 account_freq_event();
8811         if (event->attr.context_switch) {
8812                 atomic_inc(&nr_switch_events);
8813                 inc = true;
8814         }
8815         if (has_branch_stack(event))
8816                 inc = true;
8817         if (is_cgroup_event(event))
8818                 inc = true;
8819
8820         if (inc) {
8821                 if (atomic_inc_not_zero(&perf_sched_count))
8822                         goto enabled;
8823
8824                 mutex_lock(&perf_sched_mutex);
8825                 if (!atomic_read(&perf_sched_count)) {
8826                         static_branch_enable(&perf_sched_events);
8827                         /*
8828                          * Guarantee that all CPUs observe they key change and
8829                          * call the perf scheduling hooks before proceeding to
8830                          * install events that need them.
8831                          */
8832                         synchronize_sched();
8833                 }
8834                 /*
8835                  * Now that we have waited for the sync_sched(), allow further
8836                  * increments to by-pass the mutex.
8837                  */
8838                 atomic_inc(&perf_sched_count);
8839                 mutex_unlock(&perf_sched_mutex);
8840         }
8841 enabled:
8842
8843         account_event_cpu(event, event->cpu);
8844
8845         account_pmu_sb_event(event);
8846 }
8847
8848 /*
8849  * Allocate and initialize a event structure
8850  */
8851 static struct perf_event *
8852 perf_event_alloc(struct perf_event_attr *attr, int cpu,
8853                  struct task_struct *task,
8854                  struct perf_event *group_leader,
8855                  struct perf_event *parent_event,
8856                  perf_overflow_handler_t overflow_handler,
8857                  void *context, int cgroup_fd)
8858 {
8859         struct pmu *pmu;
8860         struct perf_event *event;
8861         struct hw_perf_event *hwc;
8862         long err = -EINVAL;
8863
8864         if ((unsigned)cpu >= nr_cpu_ids) {
8865                 if (!task || cpu != -1)
8866                         return ERR_PTR(-EINVAL);
8867         }
8868
8869         event = kzalloc(sizeof(*event), GFP_KERNEL);
8870         if (!event)
8871                 return ERR_PTR(-ENOMEM);
8872
8873         /*
8874          * Single events are their own group leaders, with an
8875          * empty sibling list:
8876          */
8877         if (!group_leader)
8878                 group_leader = event;
8879
8880         mutex_init(&event->child_mutex);
8881         INIT_LIST_HEAD(&event->child_list);
8882
8883         INIT_LIST_HEAD(&event->group_entry);
8884         INIT_LIST_HEAD(&event->event_entry);
8885         INIT_LIST_HEAD(&event->sibling_list);
8886         INIT_LIST_HEAD(&event->rb_entry);
8887         INIT_LIST_HEAD(&event->active_entry);
8888         INIT_LIST_HEAD(&event->addr_filters.list);
8889         INIT_HLIST_NODE(&event->hlist_entry);
8890
8891
8892         init_waitqueue_head(&event->waitq);
8893         init_irq_work(&event->pending, perf_pending_event);
8894
8895         mutex_init(&event->mmap_mutex);
8896         raw_spin_lock_init(&event->addr_filters.lock);
8897
8898         atomic_long_set(&event->refcount, 1);
8899         event->cpu              = cpu;
8900         event->attr             = *attr;
8901         event->group_leader     = group_leader;
8902         event->pmu              = NULL;
8903         event->oncpu            = -1;
8904
8905         event->parent           = parent_event;
8906
8907         event->ns               = get_pid_ns(task_active_pid_ns(current));
8908         event->id               = atomic64_inc_return(&perf_event_id);
8909
8910         event->state            = PERF_EVENT_STATE_INACTIVE;
8911
8912         if (task) {
8913                 event->attach_state = PERF_ATTACH_TASK;
8914                 /*
8915                  * XXX pmu::event_init needs to know what task to account to
8916                  * and we cannot use the ctx information because we need the
8917                  * pmu before we get a ctx.
8918                  */
8919                 event->hw.target = task;
8920         }
8921
8922         event->clock = &local_clock;
8923         if (parent_event)
8924                 event->clock = parent_event->clock;
8925
8926         if (!overflow_handler && parent_event) {
8927                 overflow_handler = parent_event->overflow_handler;
8928                 context = parent_event->overflow_handler_context;
8929         }
8930
8931         if (overflow_handler) {
8932                 event->overflow_handler = overflow_handler;
8933                 event->overflow_handler_context = context;
8934         } else if (is_write_backward(event)){
8935                 event->overflow_handler = perf_event_output_backward;
8936                 event->overflow_handler_context = NULL;
8937         } else {
8938                 event->overflow_handler = perf_event_output_forward;
8939                 event->overflow_handler_context = NULL;
8940         }
8941
8942         perf_event__state_init(event);
8943
8944         pmu = NULL;
8945
8946         hwc = &event->hw;
8947         hwc->sample_period = attr->sample_period;
8948         if (attr->freq && attr->sample_freq)
8949                 hwc->sample_period = 1;
8950         hwc->last_period = hwc->sample_period;
8951
8952         local64_set(&hwc->period_left, hwc->sample_period);
8953
8954         /*
8955          * we currently do not support PERF_FORMAT_GROUP on inherited events
8956          */
8957         if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
8958                 goto err_ns;
8959
8960         if (!has_branch_stack(event))
8961                 event->attr.branch_sample_type = 0;
8962
8963         if (cgroup_fd != -1) {
8964                 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
8965                 if (err)
8966                         goto err_ns;
8967         }
8968
8969         pmu = perf_init_event(event);
8970         if (!pmu)
8971                 goto err_ns;
8972         else if (IS_ERR(pmu)) {
8973                 err = PTR_ERR(pmu);
8974                 goto err_ns;
8975         }
8976
8977         err = exclusive_event_init(event);
8978         if (err)
8979                 goto err_pmu;
8980
8981         if (has_addr_filter(event)) {
8982                 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
8983                                                    sizeof(unsigned long),
8984                                                    GFP_KERNEL);
8985                 if (!event->addr_filters_offs)
8986                         goto err_per_task;
8987
8988                 /* force hw sync on the address filters */
8989                 event->addr_filters_gen = 1;
8990         }
8991
8992         if (!event->parent) {
8993                 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
8994                         err = get_callchain_buffers(attr->sample_max_stack);
8995                         if (err)
8996                                 goto err_addr_filters;
8997                 }
8998         }
8999
9000         /* symmetric to unaccount_event() in _free_event() */
9001         account_event(event);
9002
9003         return event;
9004
9005 err_addr_filters:
9006         kfree(event->addr_filters_offs);
9007
9008 err_per_task:
9009         exclusive_event_destroy(event);
9010
9011 err_pmu:
9012         if (event->destroy)
9013                 event->destroy(event);
9014         module_put(pmu->module);
9015 err_ns:
9016         if (is_cgroup_event(event))
9017                 perf_detach_cgroup(event);
9018         if (event->ns)
9019                 put_pid_ns(event->ns);
9020         kfree(event);
9021
9022         return ERR_PTR(err);
9023 }
9024
9025 static int perf_copy_attr(struct perf_event_attr __user *uattr,
9026                           struct perf_event_attr *attr)
9027 {
9028         u32 size;
9029         int ret;
9030
9031         if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9032                 return -EFAULT;
9033
9034         /*
9035          * zero the full structure, so that a short copy will be nice.
9036          */
9037         memset(attr, 0, sizeof(*attr));
9038
9039         ret = get_user(size, &uattr->size);
9040         if (ret)
9041                 return ret;
9042
9043         if (size > PAGE_SIZE)   /* silly large */
9044                 goto err_size;
9045
9046         if (!size)              /* abi compat */
9047                 size = PERF_ATTR_SIZE_VER0;
9048
9049         if (size < PERF_ATTR_SIZE_VER0)
9050                 goto err_size;
9051
9052         /*
9053          * If we're handed a bigger struct than we know of,
9054          * ensure all the unknown bits are 0 - i.e. new
9055          * user-space does not rely on any kernel feature
9056          * extensions we dont know about yet.
9057          */
9058         if (size > sizeof(*attr)) {
9059                 unsigned char __user *addr;
9060                 unsigned char __user *end;
9061                 unsigned char val;
9062
9063                 addr = (void __user *)uattr + sizeof(*attr);
9064                 end  = (void __user *)uattr + size;
9065
9066                 for (; addr < end; addr++) {
9067                         ret = get_user(val, addr);
9068                         if (ret)
9069                                 return ret;
9070                         if (val)
9071                                 goto err_size;
9072                 }
9073                 size = sizeof(*attr);
9074         }
9075
9076         ret = copy_from_user(attr, uattr, size);
9077         if (ret)
9078                 return -EFAULT;
9079
9080         if (attr->__reserved_1)
9081                 return -EINVAL;
9082
9083         if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9084                 return -EINVAL;
9085
9086         if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9087                 return -EINVAL;
9088
9089         if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9090                 u64 mask = attr->branch_sample_type;
9091
9092                 /* only using defined bits */
9093                 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9094                         return -EINVAL;
9095
9096                 /* at least one branch bit must be set */
9097                 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9098                         return -EINVAL;
9099
9100                 /* propagate priv level, when not set for branch */
9101                 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9102
9103                         /* exclude_kernel checked on syscall entry */
9104                         if (!attr->exclude_kernel)
9105                                 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9106
9107                         if (!attr->exclude_user)
9108                                 mask |= PERF_SAMPLE_BRANCH_USER;
9109
9110                         if (!attr->exclude_hv)
9111                                 mask |= PERF_SAMPLE_BRANCH_HV;
9112                         /*
9113                          * adjust user setting (for HW filter setup)
9114                          */
9115                         attr->branch_sample_type = mask;
9116                 }
9117                 /* privileged levels capture (kernel, hv): check permissions */
9118                 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
9119                     && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9120                         return -EACCES;
9121         }
9122
9123         if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
9124                 ret = perf_reg_validate(attr->sample_regs_user);
9125                 if (ret)
9126                         return ret;
9127         }
9128
9129         if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9130                 if (!arch_perf_have_user_stack_dump())
9131                         return -ENOSYS;
9132
9133                 /*
9134                  * We have __u32 type for the size, but so far
9135                  * we can only use __u16 as maximum due to the
9136                  * __u16 sample size limit.
9137                  */
9138                 if (attr->sample_stack_user >= USHRT_MAX)
9139                         ret = -EINVAL;
9140                 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9141                         ret = -EINVAL;
9142         }
9143
9144         if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9145                 ret = perf_reg_validate(attr->sample_regs_intr);
9146 out:
9147         return ret;
9148
9149 err_size:
9150         put_user(sizeof(*attr), &uattr->size);
9151         ret = -E2BIG;
9152         goto out;
9153 }
9154
9155 static int
9156 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
9157 {
9158         struct ring_buffer *rb = NULL;
9159         int ret = -EINVAL;
9160
9161         if (!output_event)
9162                 goto set;
9163
9164         /* don't allow circular references */
9165         if (event == output_event)
9166                 goto out;
9167
9168         /*
9169          * Don't allow cross-cpu buffers
9170          */
9171         if (output_event->cpu != event->cpu)
9172                 goto out;
9173
9174         /*
9175          * If its not a per-cpu rb, it must be the same task.
9176          */
9177         if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9178                 goto out;
9179
9180         /*
9181          * Mixing clocks in the same buffer is trouble you don't need.
9182          */
9183         if (output_event->clock != event->clock)
9184                 goto out;
9185
9186         /*
9187          * Either writing ring buffer from beginning or from end.
9188          * Mixing is not allowed.
9189          */
9190         if (is_write_backward(output_event) != is_write_backward(event))
9191                 goto out;
9192
9193         /*
9194          * If both events generate aux data, they must be on the same PMU
9195          */
9196         if (has_aux(event) && has_aux(output_event) &&
9197             event->pmu != output_event->pmu)
9198                 goto out;
9199
9200 set:
9201         mutex_lock(&event->mmap_mutex);
9202         /* Can't redirect output if we've got an active mmap() */
9203         if (atomic_read(&event->mmap_count))
9204                 goto unlock;
9205
9206         if (output_event) {
9207                 /* get the rb we want to redirect to */
9208                 rb = ring_buffer_get(output_event);
9209                 if (!rb)
9210                         goto unlock;
9211         }
9212
9213         ring_buffer_attach(event, rb);
9214
9215         ret = 0;
9216 unlock:
9217         mutex_unlock(&event->mmap_mutex);
9218
9219 out:
9220         return ret;
9221 }
9222
9223 static void mutex_lock_double(struct mutex *a, struct mutex *b)
9224 {
9225         if (b < a)
9226                 swap(a, b);
9227
9228         mutex_lock(a);
9229         mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9230 }
9231
9232 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9233 {
9234         bool nmi_safe = false;
9235
9236         switch (clk_id) {
9237         case CLOCK_MONOTONIC:
9238                 event->clock = &ktime_get_mono_fast_ns;
9239                 nmi_safe = true;
9240                 break;
9241
9242         case CLOCK_MONOTONIC_RAW:
9243                 event->clock = &ktime_get_raw_fast_ns;
9244                 nmi_safe = true;
9245                 break;
9246
9247         case CLOCK_REALTIME:
9248                 event->clock = &ktime_get_real_ns;
9249                 break;
9250
9251         case CLOCK_BOOTTIME:
9252                 event->clock = &ktime_get_boot_ns;
9253                 break;
9254
9255         case CLOCK_TAI:
9256                 event->clock = &ktime_get_tai_ns;
9257                 break;
9258
9259         default:
9260                 return -EINVAL;
9261         }
9262
9263         if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9264                 return -EINVAL;
9265
9266         return 0;
9267 }
9268
9269 /**
9270  * sys_perf_event_open - open a performance event, associate it to a task/cpu
9271  *
9272  * @attr_uptr:  event_id type attributes for monitoring/sampling
9273  * @pid:                target pid
9274  * @cpu:                target cpu
9275  * @group_fd:           group leader event fd
9276  */
9277 SYSCALL_DEFINE5(perf_event_open,
9278                 struct perf_event_attr __user *, attr_uptr,
9279                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
9280 {
9281         struct perf_event *group_leader = NULL, *output_event = NULL;
9282         struct perf_event *event, *sibling;
9283         struct perf_event_attr attr;
9284         struct perf_event_context *ctx, *uninitialized_var(gctx);
9285         struct file *event_file = NULL;
9286         struct fd group = {NULL, 0};
9287         struct task_struct *task = NULL;
9288         struct pmu *pmu;
9289         int event_fd;
9290         int move_group = 0;
9291         int err;
9292         int f_flags = O_RDWR;
9293         int cgroup_fd = -1;
9294
9295         /* for future expandability... */
9296         if (flags & ~PERF_FLAG_ALL)
9297                 return -EINVAL;
9298
9299         err = perf_copy_attr(attr_uptr, &attr);
9300         if (err)
9301                 return err;
9302
9303         if (!attr.exclude_kernel) {
9304                 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9305                         return -EACCES;
9306         }
9307
9308         if (attr.freq) {
9309                 if (attr.sample_freq > sysctl_perf_event_sample_rate)
9310                         return -EINVAL;
9311         } else {
9312                 if (attr.sample_period & (1ULL << 63))
9313                         return -EINVAL;
9314         }
9315
9316         if (!attr.sample_max_stack)
9317                 attr.sample_max_stack = sysctl_perf_event_max_stack;
9318
9319         /*
9320          * In cgroup mode, the pid argument is used to pass the fd
9321          * opened to the cgroup directory in cgroupfs. The cpu argument
9322          * designates the cpu on which to monitor threads from that
9323          * cgroup.
9324          */
9325         if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9326                 return -EINVAL;
9327
9328         if (flags & PERF_FLAG_FD_CLOEXEC)
9329                 f_flags |= O_CLOEXEC;
9330
9331         event_fd = get_unused_fd_flags(f_flags);
9332         if (event_fd < 0)
9333                 return event_fd;
9334
9335         if (group_fd != -1) {
9336                 err = perf_fget_light(group_fd, &group);
9337                 if (err)
9338                         goto err_fd;
9339                 group_leader = group.file->private_data;
9340                 if (flags & PERF_FLAG_FD_OUTPUT)
9341                         output_event = group_leader;
9342                 if (flags & PERF_FLAG_FD_NO_GROUP)
9343                         group_leader = NULL;
9344         }
9345
9346         if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
9347                 task = find_lively_task_by_vpid(pid);
9348                 if (IS_ERR(task)) {
9349                         err = PTR_ERR(task);
9350                         goto err_group_fd;
9351                 }
9352         }
9353
9354         if (task && group_leader &&
9355             group_leader->attr.inherit != attr.inherit) {
9356                 err = -EINVAL;
9357                 goto err_task;
9358         }
9359
9360         get_online_cpus();
9361
9362         if (task) {
9363                 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9364                 if (err)
9365                         goto err_cpus;
9366
9367                 /*
9368                  * Reuse ptrace permission checks for now.
9369                  *
9370                  * We must hold cred_guard_mutex across this and any potential
9371                  * perf_install_in_context() call for this new event to
9372                  * serialize against exec() altering our credentials (and the
9373                  * perf_event_exit_task() that could imply).
9374                  */
9375                 err = -EACCES;
9376                 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9377                         goto err_cred;
9378         }
9379
9380         if (flags & PERF_FLAG_PID_CGROUP)
9381                 cgroup_fd = pid;
9382
9383         event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
9384                                  NULL, NULL, cgroup_fd);
9385         if (IS_ERR(event)) {
9386                 err = PTR_ERR(event);
9387                 goto err_cred;
9388         }
9389
9390         if (is_sampling_event(event)) {
9391                 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
9392                         err = -EOPNOTSUPP;
9393                         goto err_alloc;
9394                 }
9395         }
9396
9397         /*
9398          * Special case software events and allow them to be part of
9399          * any hardware group.
9400          */
9401         pmu = event->pmu;
9402
9403         if (attr.use_clockid) {
9404                 err = perf_event_set_clock(event, attr.clockid);
9405                 if (err)
9406                         goto err_alloc;
9407         }
9408
9409         if (group_leader &&
9410             (is_software_event(event) != is_software_event(group_leader))) {
9411                 if (is_software_event(event)) {
9412                         /*
9413                          * If event and group_leader are not both a software
9414                          * event, and event is, then group leader is not.
9415                          *
9416                          * Allow the addition of software events to !software
9417                          * groups, this is safe because software events never
9418                          * fail to schedule.
9419                          */
9420                         pmu = group_leader->pmu;
9421                 } else if (is_software_event(group_leader) &&
9422                            (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
9423                         /*
9424                          * In case the group is a pure software group, and we
9425                          * try to add a hardware event, move the whole group to
9426                          * the hardware context.
9427                          */
9428                         move_group = 1;
9429                 }
9430         }
9431
9432         /*
9433          * Get the target context (task or percpu):
9434          */
9435         ctx = find_get_context(pmu, task, event);
9436         if (IS_ERR(ctx)) {
9437                 err = PTR_ERR(ctx);
9438                 goto err_alloc;
9439         }
9440
9441         if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
9442                 err = -EBUSY;
9443                 goto err_context;
9444         }
9445
9446         /*
9447          * Look up the group leader (we will attach this event to it):
9448          */
9449         if (group_leader) {
9450                 err = -EINVAL;
9451
9452                 /*
9453                  * Do not allow a recursive hierarchy (this new sibling
9454                  * becoming part of another group-sibling):
9455                  */
9456                 if (group_leader->group_leader != group_leader)
9457                         goto err_context;
9458
9459                 /* All events in a group should have the same clock */
9460                 if (group_leader->clock != event->clock)
9461                         goto err_context;
9462
9463                 /*
9464                  * Do not allow to attach to a group in a different
9465                  * task or CPU context:
9466                  */
9467                 if (move_group) {
9468                         /*
9469                          * Make sure we're both on the same task, or both
9470                          * per-cpu events.
9471                          */
9472                         if (group_leader->ctx->task != ctx->task)
9473                                 goto err_context;
9474
9475                         /*
9476                          * Make sure we're both events for the same CPU;
9477                          * grouping events for different CPUs is broken; since
9478                          * you can never concurrently schedule them anyhow.
9479                          */
9480                         if (group_leader->cpu != event->cpu)
9481                                 goto err_context;
9482                 } else {
9483                         if (group_leader->ctx != ctx)
9484                                 goto err_context;
9485                 }
9486
9487                 /*
9488                  * Only a group leader can be exclusive or pinned
9489                  */
9490                 if (attr.exclusive || attr.pinned)
9491                         goto err_context;
9492         }
9493
9494         if (output_event) {
9495                 err = perf_event_set_output(event, output_event);
9496                 if (err)
9497                         goto err_context;
9498         }
9499
9500         event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
9501                                         f_flags);
9502         if (IS_ERR(event_file)) {
9503                 err = PTR_ERR(event_file);
9504                 event_file = NULL;
9505                 goto err_context;
9506         }
9507
9508         if (move_group) {
9509                 gctx = group_leader->ctx;
9510                 mutex_lock_double(&gctx->mutex, &ctx->mutex);
9511                 if (gctx->task == TASK_TOMBSTONE) {
9512                         err = -ESRCH;
9513                         goto err_locked;
9514                 }
9515         } else {
9516                 mutex_lock(&ctx->mutex);
9517         }
9518
9519         if (ctx->task == TASK_TOMBSTONE) {
9520                 err = -ESRCH;
9521                 goto err_locked;
9522         }
9523
9524         if (!perf_event_validate_size(event)) {
9525                 err = -E2BIG;
9526                 goto err_locked;
9527         }
9528
9529         /*
9530          * Must be under the same ctx::mutex as perf_install_in_context(),
9531          * because we need to serialize with concurrent event creation.
9532          */
9533         if (!exclusive_event_installable(event, ctx)) {
9534                 /* exclusive and group stuff are assumed mutually exclusive */
9535                 WARN_ON_ONCE(move_group);
9536
9537                 err = -EBUSY;
9538                 goto err_locked;
9539         }
9540
9541         WARN_ON_ONCE(ctx->parent_ctx);
9542
9543         /*
9544          * This is the point on no return; we cannot fail hereafter. This is
9545          * where we start modifying current state.
9546          */
9547
9548         if (move_group) {
9549                 /*
9550                  * See perf_event_ctx_lock() for comments on the details
9551                  * of swizzling perf_event::ctx.
9552                  */
9553                 perf_remove_from_context(group_leader, 0);
9554
9555                 list_for_each_entry(sibling, &group_leader->sibling_list,
9556                                     group_entry) {
9557                         perf_remove_from_context(sibling, 0);
9558                         put_ctx(gctx);
9559                 }
9560
9561                 /*
9562                  * Wait for everybody to stop referencing the events through
9563                  * the old lists, before installing it on new lists.
9564                  */
9565                 synchronize_rcu();
9566
9567                 /*
9568                  * Install the group siblings before the group leader.
9569                  *
9570                  * Because a group leader will try and install the entire group
9571                  * (through the sibling list, which is still in-tact), we can
9572                  * end up with siblings installed in the wrong context.
9573                  *
9574                  * By installing siblings first we NO-OP because they're not
9575                  * reachable through the group lists.
9576                  */
9577                 list_for_each_entry(sibling, &group_leader->sibling_list,
9578                                     group_entry) {
9579                         perf_event__state_init(sibling);
9580                         perf_install_in_context(ctx, sibling, sibling->cpu);
9581                         get_ctx(ctx);
9582                 }
9583
9584                 /*
9585                  * Removing from the context ends up with disabled
9586                  * event. What we want here is event in the initial
9587                  * startup state, ready to be add into new context.
9588                  */
9589                 perf_event__state_init(group_leader);
9590                 perf_install_in_context(ctx, group_leader, group_leader->cpu);
9591                 get_ctx(ctx);
9592
9593                 /*
9594                  * Now that all events are installed in @ctx, nothing
9595                  * references @gctx anymore, so drop the last reference we have
9596                  * on it.
9597                  */
9598                 put_ctx(gctx);
9599         }
9600
9601         /*
9602          * Precalculate sample_data sizes; do while holding ctx::mutex such
9603          * that we're serialized against further additions and before
9604          * perf_install_in_context() which is the point the event is active and
9605          * can use these values.
9606          */
9607         perf_event__header_size(event);
9608         perf_event__id_header_size(event);
9609
9610         event->owner = current;
9611
9612         perf_install_in_context(ctx, event, event->cpu);
9613         perf_unpin_context(ctx);
9614
9615         if (move_group)
9616                 mutex_unlock(&gctx->mutex);
9617         mutex_unlock(&ctx->mutex);
9618
9619         if (task) {
9620                 mutex_unlock(&task->signal->cred_guard_mutex);
9621                 put_task_struct(task);
9622         }
9623
9624         put_online_cpus();
9625
9626         mutex_lock(&current->perf_event_mutex);
9627         list_add_tail(&event->owner_entry, &current->perf_event_list);
9628         mutex_unlock(&current->perf_event_mutex);
9629
9630         /*
9631          * Drop the reference on the group_event after placing the
9632          * new event on the sibling_list. This ensures destruction
9633          * of the group leader will find the pointer to itself in
9634          * perf_group_detach().
9635          */
9636         fdput(group);
9637         fd_install(event_fd, event_file);
9638         return event_fd;
9639
9640 err_locked:
9641         if (move_group)
9642                 mutex_unlock(&gctx->mutex);
9643         mutex_unlock(&ctx->mutex);
9644 /* err_file: */
9645         fput(event_file);
9646 err_context:
9647         perf_unpin_context(ctx);
9648         put_ctx(ctx);
9649 err_alloc:
9650         /*
9651          * If event_file is set, the fput() above will have called ->release()
9652          * and that will take care of freeing the event.
9653          */
9654         if (!event_file)
9655                 free_event(event);
9656 err_cred:
9657         if (task)
9658                 mutex_unlock(&task->signal->cred_guard_mutex);
9659 err_cpus:
9660         put_online_cpus();
9661 err_task:
9662         if (task)
9663                 put_task_struct(task);
9664 err_group_fd:
9665         fdput(group);
9666 err_fd:
9667         put_unused_fd(event_fd);
9668         return err;
9669 }
9670
9671 /**
9672  * perf_event_create_kernel_counter
9673  *
9674  * @attr: attributes of the counter to create
9675  * @cpu: cpu in which the counter is bound
9676  * @task: task to profile (NULL for percpu)
9677  */
9678 struct perf_event *
9679 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
9680                                  struct task_struct *task,
9681                                  perf_overflow_handler_t overflow_handler,
9682                                  void *context)
9683 {
9684         struct perf_event_context *ctx;
9685         struct perf_event *event;
9686         int err;
9687
9688         /*
9689          * Get the target context (task or percpu):
9690          */
9691
9692         event = perf_event_alloc(attr, cpu, task, NULL, NULL,
9693                                  overflow_handler, context, -1);
9694         if (IS_ERR(event)) {
9695                 err = PTR_ERR(event);
9696                 goto err;
9697         }
9698
9699         /* Mark owner so we could distinguish it from user events. */
9700         event->owner = TASK_TOMBSTONE;
9701
9702         ctx = find_get_context(event->pmu, task, event);
9703         if (IS_ERR(ctx)) {
9704                 err = PTR_ERR(ctx);
9705                 goto err_free;
9706         }
9707
9708         WARN_ON_ONCE(ctx->parent_ctx);
9709         mutex_lock(&ctx->mutex);
9710         if (ctx->task == TASK_TOMBSTONE) {
9711                 err = -ESRCH;
9712                 goto err_unlock;
9713         }
9714
9715         if (!exclusive_event_installable(event, ctx)) {
9716                 err = -EBUSY;
9717                 goto err_unlock;
9718         }
9719
9720         perf_install_in_context(ctx, event, cpu);
9721         perf_unpin_context(ctx);
9722         mutex_unlock(&ctx->mutex);
9723
9724         return event;
9725
9726 err_unlock:
9727         mutex_unlock(&ctx->mutex);
9728         perf_unpin_context(ctx);
9729         put_ctx(ctx);
9730 err_free:
9731         free_event(event);
9732 err:
9733         return ERR_PTR(err);
9734 }
9735 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9736
9737 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
9738 {
9739         struct perf_event_context *src_ctx;
9740         struct perf_event_context *dst_ctx;
9741         struct perf_event *event, *tmp;
9742         LIST_HEAD(events);
9743
9744         src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
9745         dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
9746
9747         /*
9748          * See perf_event_ctx_lock() for comments on the details
9749          * of swizzling perf_event::ctx.
9750          */
9751         mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
9752         list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
9753                                  event_entry) {
9754                 perf_remove_from_context(event, 0);
9755                 unaccount_event_cpu(event, src_cpu);
9756                 put_ctx(src_ctx);
9757                 list_add(&event->migrate_entry, &events);
9758         }
9759
9760         /*
9761          * Wait for the events to quiesce before re-instating them.
9762          */
9763         synchronize_rcu();
9764
9765         /*
9766          * Re-instate events in 2 passes.
9767          *
9768          * Skip over group leaders and only install siblings on this first
9769          * pass, siblings will not get enabled without a leader, however a
9770          * leader will enable its siblings, even if those are still on the old
9771          * context.
9772          */
9773         list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
9774                 if (event->group_leader == event)
9775                         continue;
9776
9777                 list_del(&event->migrate_entry);
9778                 if (event->state >= PERF_EVENT_STATE_OFF)
9779                         event->state = PERF_EVENT_STATE_INACTIVE;
9780                 account_event_cpu(event, dst_cpu);
9781                 perf_install_in_context(dst_ctx, event, dst_cpu);
9782                 get_ctx(dst_ctx);
9783         }
9784
9785         /*
9786          * Once all the siblings are setup properly, install the group leaders
9787          * to make it go.
9788          */
9789         list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
9790                 list_del(&event->migrate_entry);
9791                 if (event->state >= PERF_EVENT_STATE_OFF)
9792                         event->state = PERF_EVENT_STATE_INACTIVE;
9793                 account_event_cpu(event, dst_cpu);
9794                 perf_install_in_context(dst_ctx, event, dst_cpu);
9795                 get_ctx(dst_ctx);
9796         }
9797         mutex_unlock(&dst_ctx->mutex);
9798         mutex_unlock(&src_ctx->mutex);
9799 }
9800 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
9801
9802 static void sync_child_event(struct perf_event *child_event,
9803                                struct task_struct *child)
9804 {
9805         struct perf_event *parent_event = child_event->parent;
9806         u64 child_val;
9807
9808         if (child_event->attr.inherit_stat)
9809                 perf_event_read_event(child_event, child);
9810
9811         child_val = perf_event_count(child_event);
9812
9813         /*
9814          * Add back the child's count to the parent's count:
9815          */
9816         atomic64_add(child_val, &parent_event->child_count);
9817         atomic64_add(child_event->total_time_enabled,
9818                      &parent_event->child_total_time_enabled);
9819         atomic64_add(child_event->total_time_running,
9820                      &parent_event->child_total_time_running);
9821 }
9822
9823 static void
9824 perf_event_exit_event(struct perf_event *child_event,
9825                       struct perf_event_context *child_ctx,
9826                       struct task_struct *child)
9827 {
9828         struct perf_event *parent_event = child_event->parent;
9829
9830         /*
9831          * Do not destroy the 'original' grouping; because of the context
9832          * switch optimization the original events could've ended up in a
9833          * random child task.
9834          *
9835          * If we were to destroy the original group, all group related
9836          * operations would cease to function properly after this random
9837          * child dies.
9838          *
9839          * Do destroy all inherited groups, we don't care about those
9840          * and being thorough is better.
9841          */
9842         raw_spin_lock_irq(&child_ctx->lock);
9843         WARN_ON_ONCE(child_ctx->is_active);
9844
9845         if (parent_event)
9846                 perf_group_detach(child_event);
9847         list_del_event(child_event, child_ctx);
9848         child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
9849         raw_spin_unlock_irq(&child_ctx->lock);
9850
9851         /*
9852          * Parent events are governed by their filedesc, retain them.
9853          */
9854         if (!parent_event) {
9855                 perf_event_wakeup(child_event);
9856                 return;
9857         }
9858         /*
9859          * Child events can be cleaned up.
9860          */
9861
9862         sync_child_event(child_event, child);
9863
9864         /*
9865          * Remove this event from the parent's list
9866          */
9867         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
9868         mutex_lock(&parent_event->child_mutex);
9869         list_del_init(&child_event->child_list);
9870         mutex_unlock(&parent_event->child_mutex);
9871
9872         /*
9873          * Kick perf_poll() for is_event_hup().
9874          */
9875         perf_event_wakeup(parent_event);
9876         free_event(child_event);
9877         put_event(parent_event);
9878 }
9879
9880 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9881 {
9882         struct perf_event_context *child_ctx, *clone_ctx = NULL;
9883         struct perf_event *child_event, *next;
9884
9885         WARN_ON_ONCE(child != current);
9886
9887         child_ctx = perf_pin_task_context(child, ctxn);
9888         if (!child_ctx)
9889                 return;
9890
9891         /*
9892          * In order to reduce the amount of tricky in ctx tear-down, we hold
9893          * ctx::mutex over the entire thing. This serializes against almost
9894          * everything that wants to access the ctx.
9895          *
9896          * The exception is sys_perf_event_open() /
9897          * perf_event_create_kernel_count() which does find_get_context()
9898          * without ctx::mutex (it cannot because of the move_group double mutex
9899          * lock thing). See the comments in perf_install_in_context().
9900          */
9901         mutex_lock(&child_ctx->mutex);
9902
9903         /*
9904          * In a single ctx::lock section, de-schedule the events and detach the
9905          * context from the task such that we cannot ever get it scheduled back
9906          * in.
9907          */
9908         raw_spin_lock_irq(&child_ctx->lock);
9909         task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
9910
9911         /*
9912          * Now that the context is inactive, destroy the task <-> ctx relation
9913          * and mark the context dead.
9914          */
9915         RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
9916         put_ctx(child_ctx); /* cannot be last */
9917         WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
9918         put_task_struct(current); /* cannot be last */
9919
9920         clone_ctx = unclone_ctx(child_ctx);
9921         raw_spin_unlock_irq(&child_ctx->lock);
9922
9923         if (clone_ctx)
9924                 put_ctx(clone_ctx);
9925
9926         /*
9927          * Report the task dead after unscheduling the events so that we
9928          * won't get any samples after PERF_RECORD_EXIT. We can however still
9929          * get a few PERF_RECORD_READ events.
9930          */
9931         perf_event_task(child, child_ctx, 0);
9932
9933         list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
9934                 perf_event_exit_event(child_event, child_ctx, child);
9935
9936         mutex_unlock(&child_ctx->mutex);
9937
9938         put_ctx(child_ctx);
9939 }
9940
9941 /*
9942  * When a child task exits, feed back event values to parent events.
9943  *
9944  * Can be called with cred_guard_mutex held when called from
9945  * install_exec_creds().
9946  */
9947 void perf_event_exit_task(struct task_struct *child)
9948 {
9949         struct perf_event *event, *tmp;
9950         int ctxn;
9951
9952         mutex_lock(&child->perf_event_mutex);
9953         list_for_each_entry_safe(event, tmp, &child->perf_event_list,
9954                                  owner_entry) {
9955                 list_del_init(&event->owner_entry);
9956
9957                 /*
9958                  * Ensure the list deletion is visible before we clear
9959                  * the owner, closes a race against perf_release() where
9960                  * we need to serialize on the owner->perf_event_mutex.
9961                  */
9962                 smp_store_release(&event->owner, NULL);
9963         }
9964         mutex_unlock(&child->perf_event_mutex);
9965
9966         for_each_task_context_nr(ctxn)
9967                 perf_event_exit_task_context(child, ctxn);
9968
9969         /*
9970          * The perf_event_exit_task_context calls perf_event_task
9971          * with child's task_ctx, which generates EXIT events for
9972          * child contexts and sets child->perf_event_ctxp[] to NULL.
9973          * At this point we need to send EXIT events to cpu contexts.
9974          */
9975         perf_event_task(child, NULL, 0);
9976 }
9977
9978 static void perf_free_event(struct perf_event *event,
9979                             struct perf_event_context *ctx)
9980 {
9981         struct perf_event *parent = event->parent;
9982
9983         if (WARN_ON_ONCE(!parent))
9984                 return;
9985
9986         mutex_lock(&parent->child_mutex);
9987         list_del_init(&event->child_list);
9988         mutex_unlock(&parent->child_mutex);
9989
9990         put_event(parent);
9991
9992         raw_spin_lock_irq(&ctx->lock);
9993         perf_group_detach(event);
9994         list_del_event(event, ctx);
9995         raw_spin_unlock_irq(&ctx->lock);
9996         free_event(event);
9997 }
9998
9999 /*
10000  * Free an unexposed, unused context as created by inheritance by
10001  * perf_event_init_task below, used by fork() in case of fail.
10002  *
10003  * Not all locks are strictly required, but take them anyway to be nice and
10004  * help out with the lockdep assertions.
10005  */
10006 void perf_event_free_task(struct task_struct *task)
10007 {
10008         struct perf_event_context *ctx;
10009         struct perf_event *event, *tmp;
10010         int ctxn;
10011
10012         for_each_task_context_nr(ctxn) {
10013                 ctx = task->perf_event_ctxp[ctxn];
10014                 if (!ctx)
10015                         continue;
10016
10017                 mutex_lock(&ctx->mutex);
10018 again:
10019                 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
10020                                 group_entry)
10021                         perf_free_event(event, ctx);
10022
10023                 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
10024                                 group_entry)
10025                         perf_free_event(event, ctx);
10026
10027                 if (!list_empty(&ctx->pinned_groups) ||
10028                                 !list_empty(&ctx->flexible_groups))
10029                         goto again;
10030
10031                 mutex_unlock(&ctx->mutex);
10032
10033                 put_ctx(ctx);
10034         }
10035 }
10036
10037 void perf_event_delayed_put(struct task_struct *task)
10038 {
10039         int ctxn;
10040
10041         for_each_task_context_nr(ctxn)
10042                 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10043 }
10044
10045 struct file *perf_event_get(unsigned int fd)
10046 {
10047         struct file *file;
10048
10049         file = fget_raw(fd);
10050         if (!file)
10051                 return ERR_PTR(-EBADF);
10052
10053         if (file->f_op != &perf_fops) {
10054                 fput(file);
10055                 return ERR_PTR(-EBADF);
10056         }
10057
10058         return file;
10059 }
10060
10061 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10062 {
10063         if (!event)
10064                 return ERR_PTR(-EINVAL);
10065
10066         return &event->attr;
10067 }
10068
10069 /*
10070  * inherit a event from parent task to child task:
10071  */
10072 static struct perf_event *
10073 inherit_event(struct perf_event *parent_event,
10074               struct task_struct *parent,
10075               struct perf_event_context *parent_ctx,
10076               struct task_struct *child,
10077               struct perf_event *group_leader,
10078               struct perf_event_context *child_ctx)
10079 {
10080         enum perf_event_active_state parent_state = parent_event->state;
10081         struct perf_event *child_event;
10082         unsigned long flags;
10083
10084         /*
10085          * Instead of creating recursive hierarchies of events,
10086          * we link inherited events back to the original parent,
10087          * which has a filp for sure, which we use as the reference
10088          * count:
10089          */
10090         if (parent_event->parent)
10091                 parent_event = parent_event->parent;
10092
10093         child_event = perf_event_alloc(&parent_event->attr,
10094                                            parent_event->cpu,
10095                                            child,
10096                                            group_leader, parent_event,
10097                                            NULL, NULL, -1);
10098         if (IS_ERR(child_event))
10099                 return child_event;
10100
10101         /*
10102          * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10103          * must be under the same lock in order to serialize against
10104          * perf_event_release_kernel(), such that either we must observe
10105          * is_orphaned_event() or they will observe us on the child_list.
10106          */
10107         mutex_lock(&parent_event->child_mutex);
10108         if (is_orphaned_event(parent_event) ||
10109             !atomic_long_inc_not_zero(&parent_event->refcount)) {
10110                 mutex_unlock(&parent_event->child_mutex);
10111                 free_event(child_event);
10112                 return NULL;
10113         }
10114
10115         get_ctx(child_ctx);
10116
10117         /*
10118          * Make the child state follow the state of the parent event,
10119          * not its attr.disabled bit.  We hold the parent's mutex,
10120          * so we won't race with perf_event_{en, dis}able_family.
10121          */
10122         if (parent_state >= PERF_EVENT_STATE_INACTIVE)
10123                 child_event->state = PERF_EVENT_STATE_INACTIVE;
10124         else
10125                 child_event->state = PERF_EVENT_STATE_OFF;
10126
10127         if (parent_event->attr.freq) {
10128                 u64 sample_period = parent_event->hw.sample_period;
10129                 struct hw_perf_event *hwc = &child_event->hw;
10130
10131                 hwc->sample_period = sample_period;
10132                 hwc->last_period   = sample_period;
10133
10134                 local64_set(&hwc->period_left, sample_period);
10135         }
10136
10137         child_event->ctx = child_ctx;
10138         child_event->overflow_handler = parent_event->overflow_handler;
10139         child_event->overflow_handler_context
10140                 = parent_event->overflow_handler_context;
10141
10142         /*
10143          * Precalculate sample_data sizes
10144          */
10145         perf_event__header_size(child_event);
10146         perf_event__id_header_size(child_event);
10147
10148         /*
10149          * Link it up in the child's context:
10150          */
10151         raw_spin_lock_irqsave(&child_ctx->lock, flags);
10152         add_event_to_ctx(child_event, child_ctx);
10153         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
10154
10155         /*
10156          * Link this into the parent event's child list
10157          */
10158         list_add_tail(&child_event->child_list, &parent_event->child_list);
10159         mutex_unlock(&parent_event->child_mutex);
10160
10161         return child_event;
10162 }
10163
10164 static int inherit_group(struct perf_event *parent_event,
10165               struct task_struct *parent,
10166               struct perf_event_context *parent_ctx,
10167               struct task_struct *child,
10168               struct perf_event_context *child_ctx)
10169 {
10170         struct perf_event *leader;
10171         struct perf_event *sub;
10172         struct perf_event *child_ctr;
10173
10174         leader = inherit_event(parent_event, parent, parent_ctx,
10175                                  child, NULL, child_ctx);
10176         if (IS_ERR(leader))
10177                 return PTR_ERR(leader);
10178         list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10179                 child_ctr = inherit_event(sub, parent, parent_ctx,
10180                                             child, leader, child_ctx);
10181                 if (IS_ERR(child_ctr))
10182                         return PTR_ERR(child_ctr);
10183         }
10184         return 0;
10185 }
10186
10187 static int
10188 inherit_task_group(struct perf_event *event, struct task_struct *parent,
10189                    struct perf_event_context *parent_ctx,
10190                    struct task_struct *child, int ctxn,
10191                    int *inherited_all)
10192 {
10193         int ret;
10194         struct perf_event_context *child_ctx;
10195
10196         if (!event->attr.inherit) {
10197                 *inherited_all = 0;
10198                 return 0;
10199         }
10200
10201         child_ctx = child->perf_event_ctxp[ctxn];
10202         if (!child_ctx) {
10203                 /*
10204                  * This is executed from the parent task context, so
10205                  * inherit events that have been marked for cloning.
10206                  * First allocate and initialize a context for the
10207                  * child.
10208                  */
10209
10210                 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
10211                 if (!child_ctx)
10212                         return -ENOMEM;
10213
10214                 child->perf_event_ctxp[ctxn] = child_ctx;
10215         }
10216
10217         ret = inherit_group(event, parent, parent_ctx,
10218                             child, child_ctx);
10219
10220         if (ret)
10221                 *inherited_all = 0;
10222
10223         return ret;
10224 }
10225
10226 /*
10227  * Initialize the perf_event context in task_struct
10228  */
10229 static int perf_event_init_context(struct task_struct *child, int ctxn)
10230 {
10231         struct perf_event_context *child_ctx, *parent_ctx;
10232         struct perf_event_context *cloned_ctx;
10233         struct perf_event *event;
10234         struct task_struct *parent = current;
10235         int inherited_all = 1;
10236         unsigned long flags;
10237         int ret = 0;
10238
10239         if (likely(!parent->perf_event_ctxp[ctxn]))
10240                 return 0;
10241
10242         /*
10243          * If the parent's context is a clone, pin it so it won't get
10244          * swapped under us.
10245          */
10246         parent_ctx = perf_pin_task_context(parent, ctxn);
10247         if (!parent_ctx)
10248                 return 0;
10249
10250         /*
10251          * No need to check if parent_ctx != NULL here; since we saw
10252          * it non-NULL earlier, the only reason for it to become NULL
10253          * is if we exit, and since we're currently in the middle of
10254          * a fork we can't be exiting at the same time.
10255          */
10256
10257         /*
10258          * Lock the parent list. No need to lock the child - not PID
10259          * hashed yet and not running, so nobody can access it.
10260          */
10261         mutex_lock(&parent_ctx->mutex);
10262
10263         /*
10264          * We dont have to disable NMIs - we are only looking at
10265          * the list, not manipulating it:
10266          */
10267         list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
10268                 ret = inherit_task_group(event, parent, parent_ctx,
10269                                          child, ctxn, &inherited_all);
10270                 if (ret)
10271                         break;
10272         }
10273
10274         /*
10275          * We can't hold ctx->lock when iterating the ->flexible_group list due
10276          * to allocations, but we need to prevent rotation because
10277          * rotate_ctx() will change the list from interrupt context.
10278          */
10279         raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10280         parent_ctx->rotate_disable = 1;
10281         raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10282
10283         list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
10284                 ret = inherit_task_group(event, parent, parent_ctx,
10285                                          child, ctxn, &inherited_all);
10286                 if (ret)
10287                         break;
10288         }
10289
10290         raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10291         parent_ctx->rotate_disable = 0;
10292
10293         child_ctx = child->perf_event_ctxp[ctxn];
10294
10295         if (child_ctx && inherited_all) {
10296                 /*
10297                  * Mark the child context as a clone of the parent
10298                  * context, or of whatever the parent is a clone of.
10299                  *
10300                  * Note that if the parent is a clone, the holding of
10301                  * parent_ctx->lock avoids it from being uncloned.
10302                  */
10303                 cloned_ctx = parent_ctx->parent_ctx;
10304                 if (cloned_ctx) {
10305                         child_ctx->parent_ctx = cloned_ctx;
10306                         child_ctx->parent_gen = parent_ctx->parent_gen;
10307                 } else {
10308                         child_ctx->parent_ctx = parent_ctx;
10309                         child_ctx->parent_gen = parent_ctx->generation;
10310                 }
10311                 get_ctx(child_ctx->parent_ctx);
10312         }
10313
10314         raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10315         mutex_unlock(&parent_ctx->mutex);
10316
10317         perf_unpin_context(parent_ctx);
10318         put_ctx(parent_ctx);
10319
10320         return ret;
10321 }
10322
10323 /*
10324  * Initialize the perf_event context in task_struct
10325  */
10326 int perf_event_init_task(struct task_struct *child)
10327 {
10328         int ctxn, ret;
10329
10330         memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10331         mutex_init(&child->perf_event_mutex);
10332         INIT_LIST_HEAD(&child->perf_event_list);
10333
10334         for_each_task_context_nr(ctxn) {
10335                 ret = perf_event_init_context(child, ctxn);
10336                 if (ret) {
10337                         perf_event_free_task(child);
10338                         return ret;
10339                 }
10340         }
10341
10342         return 0;
10343 }
10344
10345 static void __init perf_event_init_all_cpus(void)
10346 {
10347         struct swevent_htable *swhash;
10348         int cpu;
10349
10350         for_each_possible_cpu(cpu) {
10351                 swhash = &per_cpu(swevent_htable, cpu);
10352                 mutex_init(&swhash->hlist_mutex);
10353                 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
10354
10355                 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10356                 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
10357         }
10358 }
10359
10360 int perf_event_init_cpu(unsigned int cpu)
10361 {
10362         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
10363
10364         mutex_lock(&swhash->hlist_mutex);
10365         if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
10366                 struct swevent_hlist *hlist;
10367
10368                 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
10369                 WARN_ON(!hlist);
10370                 rcu_assign_pointer(swhash->swevent_hlist, hlist);
10371         }
10372         mutex_unlock(&swhash->hlist_mutex);
10373         return 0;
10374 }
10375
10376 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
10377 static void __perf_event_exit_context(void *__info)
10378 {
10379         struct perf_event_context *ctx = __info;
10380         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
10381         struct perf_event *event;
10382
10383         raw_spin_lock(&ctx->lock);
10384         list_for_each_entry(event, &ctx->event_list, event_entry)
10385                 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
10386         raw_spin_unlock(&ctx->lock);
10387 }
10388
10389 static void perf_event_exit_cpu_context(int cpu)
10390 {
10391         struct perf_event_context *ctx;
10392         struct pmu *pmu;
10393         int idx;
10394
10395         idx = srcu_read_lock(&pmus_srcu);
10396         list_for_each_entry_rcu(pmu, &pmus, entry) {
10397                 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
10398
10399                 mutex_lock(&ctx->mutex);
10400                 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
10401                 mutex_unlock(&ctx->mutex);
10402         }
10403         srcu_read_unlock(&pmus_srcu, idx);
10404 }
10405 #else
10406
10407 static void perf_event_exit_cpu_context(int cpu) { }
10408
10409 #endif
10410
10411 int perf_event_exit_cpu(unsigned int cpu)
10412 {
10413         perf_event_exit_cpu_context(cpu);
10414         return 0;
10415 }
10416
10417 static int
10418 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
10419 {
10420         int cpu;
10421
10422         for_each_online_cpu(cpu)
10423                 perf_event_exit_cpu(cpu);
10424
10425         return NOTIFY_OK;
10426 }
10427
10428 /*
10429  * Run the perf reboot notifier at the very last possible moment so that
10430  * the generic watchdog code runs as long as possible.
10431  */
10432 static struct notifier_block perf_reboot_notifier = {
10433         .notifier_call = perf_reboot,
10434         .priority = INT_MIN,
10435 };
10436
10437 void __init perf_event_init(void)
10438 {
10439         int ret;
10440
10441         idr_init(&pmu_idr);
10442
10443         perf_event_init_all_cpus();
10444         init_srcu_struct(&pmus_srcu);
10445         perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
10446         perf_pmu_register(&perf_cpu_clock, NULL, -1);
10447         perf_pmu_register(&perf_task_clock, NULL, -1);
10448         perf_tp_register();
10449         perf_event_init_cpu(smp_processor_id());
10450         register_reboot_notifier(&perf_reboot_notifier);
10451
10452         ret = init_hw_breakpoint();
10453         WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
10454
10455         /*
10456          * Build time assertion that we keep the data_head at the intended
10457          * location.  IOW, validation we got the __reserved[] size right.
10458          */
10459         BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
10460                      != 1024);
10461 }
10462
10463 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
10464                               char *page)
10465 {
10466         struct perf_pmu_events_attr *pmu_attr =
10467                 container_of(attr, struct perf_pmu_events_attr, attr);
10468
10469         if (pmu_attr->event_str)
10470                 return sprintf(page, "%s\n", pmu_attr->event_str);
10471
10472         return 0;
10473 }
10474 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
10475
10476 static int __init perf_event_sysfs_init(void)
10477 {
10478         struct pmu *pmu;
10479         int ret;
10480
10481         mutex_lock(&pmus_lock);
10482
10483         ret = bus_register(&pmu_bus);
10484         if (ret)
10485                 goto unlock;
10486
10487         list_for_each_entry(pmu, &pmus, entry) {
10488                 if (!pmu->name || pmu->type < 0)
10489                         continue;
10490
10491                 ret = pmu_dev_alloc(pmu);
10492                 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
10493         }
10494         pmu_bus_running = 1;
10495         ret = 0;
10496
10497 unlock:
10498         mutex_unlock(&pmus_lock);
10499
10500         return ret;
10501 }
10502 device_initcall(perf_event_sysfs_init);
10503
10504 #ifdef CONFIG_CGROUP_PERF
10505 static struct cgroup_subsys_state *
10506 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10507 {
10508         struct perf_cgroup *jc;
10509
10510         jc = kzalloc(sizeof(*jc), GFP_KERNEL);
10511         if (!jc)
10512                 return ERR_PTR(-ENOMEM);
10513
10514         jc->info = alloc_percpu(struct perf_cgroup_info);
10515         if (!jc->info) {
10516                 kfree(jc);
10517                 return ERR_PTR(-ENOMEM);
10518         }
10519
10520         return &jc->css;
10521 }
10522
10523 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
10524 {
10525         struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
10526
10527         free_percpu(jc->info);
10528         kfree(jc);
10529 }
10530
10531 static int __perf_cgroup_move(void *info)
10532 {
10533         struct task_struct *task = info;
10534         rcu_read_lock();
10535         perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
10536         rcu_read_unlock();
10537         return 0;
10538 }
10539
10540 static void perf_cgroup_attach(struct cgroup_taskset *tset)
10541 {
10542         struct task_struct *task;
10543         struct cgroup_subsys_state *css;
10544
10545         cgroup_taskset_for_each(task, css, tset)
10546                 task_function_call(task, __perf_cgroup_move, task);
10547 }
10548
10549 struct cgroup_subsys perf_event_cgrp_subsys = {
10550         .css_alloc      = perf_cgroup_css_alloc,
10551         .css_free       = perf_cgroup_css_free,
10552         .attach         = perf_cgroup_attach,
10553 };
10554 #endif /* CONFIG_CGROUP_PERF */