perf/x86/intel: Add Haswell PEBS record support
authorAndi Kleen <ak@linux.intel.com>
Tue, 18 Jun 2013 00:36:47 +0000 (17:36 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 19 Jun 2013 12:43:32 +0000 (14:43 +0200)
Add support for the Haswell extended (fmt2) PEBS format.

It has a superset of the nhm (fmt1) PEBS fields, but has a
longer record so we need to adjust the code paths.

The main advantage is the new "EventingRip" support which
directly gives the instruction, not off-by-one instruction. So
with precise == 2 we use that directly and don't try to use LBRs
and walking basic blocks. This lowers the overhead of using
precise significantly.

Some other features are added in later patches.

Reviewed-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Andi Kleen <ak@linux.jf.intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: http://lkml.kernel.org/r/1371515812-9646-2-git-send-email-andi@firstfloor.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel_ds.c

index e52a9e5..ab33952 100644 (file)
@@ -403,7 +403,8 @@ int x86_pmu_hw_config(struct perf_event *event)
                 * check that PEBS LBR correction does not conflict with
                 * whatever the user is asking with attr->branch_sample_type
                 */
-               if (event->attr.precise_ip > 1) {
+               if (event->attr.precise_ip > 1 &&
+                   x86_pmu.intel_cap.pebs_format < 2) {
                        u64 *br_type = &event->attr.branch_sample_type;
 
                        if (has_branch_stack(event)) {
index 60250f6..2a63d13 100644 (file)
@@ -165,6 +165,22 @@ struct pebs_record_nhm {
        u64 status, dla, dse, lat;
 };
 
+/*
+ * Same as pebs_record_nhm, with two additional fields.
+ */
+struct pebs_record_hsw {
+       struct pebs_record_nhm nhm;
+       /*
+        * Real IP of the event. In the Intel documentation this
+        * is called eventingrip.
+        */
+       u64 real_ip;
+       /*
+        * TSX tuning information field: abort cycles and abort flags.
+        */
+       u64 tsx_tuning;
+};
+
 void init_debug_store_on_cpu(int cpu)
 {
        struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -697,6 +713,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
         */
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct pebs_record_nhm *pebs = __pebs;
+       struct pebs_record_hsw *pebs_hsw = __pebs;
        struct perf_sample_data data;
        struct pt_regs regs;
        u64 sample_type;
@@ -753,7 +770,10 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
        regs.bp = pebs->bp;
        regs.sp = pebs->sp;
 
-       if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
+       if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
+               regs.ip = pebs_hsw->real_ip;
+               regs.flags |= PERF_EFLAGS_EXACT;
+       } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
                regs.flags |= PERF_EFLAGS_EXACT;
        else
                regs.flags &= ~PERF_EFLAGS_EXACT;
@@ -806,35 +826,22 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
        __intel_pmu_pebs_event(event, iregs, at);
 }
 
-static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+static void __intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, void *at,
+                                       void *top)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct debug_store *ds = cpuc->ds;
-       struct pebs_record_nhm *at, *top;
        struct perf_event *event = NULL;
        u64 status = 0;
-       int bit, n;
-
-       if (!x86_pmu.pebs_active)
-               return;
-
-       at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
-       top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+       int bit;
 
        ds->pebs_index = ds->pebs_buffer_base;
 
-       n = top - at;
-       if (n <= 0)
-               return;
-
-       /*
-        * Should not happen, we program the threshold at 1 and do not
-        * set a reset value.
-        */
-       WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
+       for (; at < top; at += x86_pmu.pebs_record_size) {
+               struct pebs_record_nhm *p = at;
 
-       for ( ; at < top; at++) {
-               for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
+               for_each_set_bit(bit, (unsigned long *)&p->status,
+                                x86_pmu.max_pebs_events) {
                        event = cpuc->events[bit];
                        if (!test_bit(bit, cpuc->active_mask))
                                continue;
@@ -857,6 +864,61 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
        }
 }
 
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct debug_store *ds = cpuc->ds;
+       struct pebs_record_nhm *at, *top;
+       int n;
+
+       if (!x86_pmu.pebs_active)
+               return;
+
+       at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+       top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+
+       ds->pebs_index = ds->pebs_buffer_base;
+
+       n = top - at;
+       if (n <= 0)
+               return;
+
+       /*
+        * Should not happen, we program the threshold at 1 and do not
+        * set a reset value.
+        */
+       WARN_ONCE(n > x86_pmu.max_pebs_events,
+                 "Unexpected number of pebs records %d\n", n);
+
+       return __intel_pmu_drain_pebs_nhm(iregs, at, top);
+}
+
+static void intel_pmu_drain_pebs_hsw(struct pt_regs *iregs)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct debug_store *ds = cpuc->ds;
+       struct pebs_record_hsw *at, *top;
+       int n;
+
+       if (!x86_pmu.pebs_active)
+               return;
+
+       at  = (struct pebs_record_hsw *)(unsigned long)ds->pebs_buffer_base;
+       top = (struct pebs_record_hsw *)(unsigned long)ds->pebs_index;
+
+       n = top - at;
+       if (n <= 0)
+               return;
+       /*
+        * Should not happen, we program the threshold at 1 and do not
+        * set a reset value.
+        */
+       WARN_ONCE(n > x86_pmu.max_pebs_events,
+                 "Unexpected number of pebs records %d\n", n);
+
+       return __intel_pmu_drain_pebs_nhm(iregs, at, top);
+}
+
 /*
  * BTS, PEBS probe and setup
  */
@@ -888,6 +950,12 @@ void intel_ds_init(void)
                        x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
                        break;
 
+               case 2:
+                       pr_cont("PEBS fmt2%c, ", pebs_type);
+                       x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
+                       x86_pmu.drain_pebs = intel_pmu_drain_pebs_hsw;
+                       break;
+
                default:
                        printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
                        x86_pmu.pebs = 0;