rcu_read_unlock();
}
-void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
+void __weak arch_perf_update_userpage(
+ struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
{
}
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
- arch_perf_update_userpage(userpg, now);
+ arch_perf_update_userpage(event, userpg, now);
barrier();
++userpg->lock;
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
+
+ if (event->pmu->event_mapped)
+ event->pmu->event_mapped(event);
}
/*
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
+ if (event->pmu->event_unmapped)
+ event->pmu->event_unmapped(event);
+
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
+ if (event->pmu->event_mapped)
+ event->pmu->event_mapped(event);
+
return ret;
}