Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
index 5073bf1..7a8b9d0 100644 (file)
  */
 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]           = 0x003c,
-  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x4f2e,
-  [PERF_COUNT_HW_CACHE_MISSES]         = 0x412e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
-  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
-  [PERF_COUNT_HW_BUS_CYCLES]           = 0x013c,
-  [PERF_COUNT_HW_REF_CPU_CYCLES]       = 0x0300, /* pseudo-encoding */
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
+       [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
+       [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
 };
 
 static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -749,7 +749,7 @@ static void intel_pmu_disable_all(void)
 
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 
-       if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+       if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
 
        intel_pmu_pebs_disable_all();
@@ -765,9 +765,9 @@ static void intel_pmu_enable_all(int added)
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
                        x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
 
-       if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+       if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
                struct perf_event *event =
-                       cpuc->events[X86_PMC_IDX_FIXED_BTS];
+                       cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
 
                if (WARN_ON_ONCE(!event))
                        return;
@@ -873,7 +873,7 @@ static inline void intel_pmu_ack_status(u64 ack)
 
 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = hwc->idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
        u64 ctrl_val, mask;
 
        mask = 0xfULL << (idx * 4);
@@ -888,7 +888,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+       if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
                intel_pmu_disable_bts();
                intel_pmu_drain_bts_buffer();
                return;
@@ -917,7 +917,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
 
 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = hwc->idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
        u64 ctrl_val, bits, mask;
 
        /*
@@ -951,7 +951,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+       if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
                if (!__this_cpu_read(cpu_hw_events.enabled))
                        return;
 
@@ -1005,11 +1005,11 @@ static void intel_pmu_reset(void)
        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
-               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
+               wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
+               wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
-               checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+               wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
 
        if (ds)
                ds->bts_index = ds->bts_buffer_base;
@@ -1714,11 +1714,56 @@ static __init void intel_clovertown_quirk(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
+static int intel_snb_pebs_broken(int cpu)
+{
+       u32 rev = UINT_MAX; /* default to broken for unknown models */
+
+       switch (cpu_data(cpu).x86_model) {
+       case 42: /* SNB */
+               rev = 0x28;
+               break;
+
+       case 45: /* SNB-EP */
+               switch (cpu_data(cpu).x86_mask) {
+               case 6: rev = 0x618; break;
+               case 7: rev = 0x70c; break;
+               }
+       }
+
+       return (cpu_data(cpu).microcode < rev);
+}
+
+static void intel_snb_check_microcode(void)
+{
+       int pebs_broken = 0;
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if ((pebs_broken = intel_snb_pebs_broken(cpu)))
+                       break;
+       }
+       put_online_cpus();
+
+       if (pebs_broken == x86_pmu.pebs_broken)
+               return;
+
+       /*
+        * Serialized by the microcode lock..
+        */
+       if (x86_pmu.pebs_broken) {
+               pr_info("PEBS enabled due to microcode update\n");
+               x86_pmu.pebs_broken = 0;
+       } else {
+               pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
+               x86_pmu.pebs_broken = 1;
+       }
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
-       pr_warn("PEBS disabled due to CPU errata\n");
-       x86_pmu.pebs = 0;
-       x86_pmu.pebs_constraints = NULL;
+       x86_pmu.check_microcode = intel_snb_check_microcode;
+       intel_snb_check_microcode();
 }
 
 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
@@ -1767,6 +1812,7 @@ __init int intel_pmu_init(void)
        union cpuid10_edx edx;
        union cpuid10_eax eax;
        union cpuid10_ebx ebx;
+       struct event_constraint *c;
        unsigned int unused;
        int version;
 
@@ -1802,6 +1848,8 @@ __init int intel_pmu_init(void)
        x86_pmu.events_maskl            = ebx.full;
        x86_pmu.events_mask_len         = eax.split.mask_length;
 
+       x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
        /*
         * Quirk: v2 perfmon does not report fixed-purpose events, so
         * assume at least 3 events:
@@ -1953,5 +2001,37 @@ __init int intel_pmu_init(void)
                }
        }
 
+       if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
+               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+                    x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
+               x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
+       }
+       x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+
+       if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+                    x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
+               x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
+       }
+
+       x86_pmu.intel_ctrl |=
+               ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+
+       if (x86_pmu.event_constraints) {
+               /*
+                * event on fixed counter2 (REF_CYCLES) only works on this
+                * counter, so do not extend mask to generic counters
+                */
+               for_each_event_constraint(c, x86_pmu.event_constraints) {
+                       if (c->cmask != X86_RAW_EVENT_MASK
+                           || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+                               continue;
+                       }
+
+                       c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+                       c->weight += x86_pmu.num_counters;
+               }
+       }
+
        return 0;
 }