/*
* Performance counter x86 architecture code
*
- * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
- * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
- * Copyright(C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* For licencing details see kernel-base/COPYING
*/
#include <asm/stacktrace.h>
#include <asm/nmi.h>
-static bool perf_counters_initialized __read_mostly;
-
-/*
- * Number of (generic) HW counters:
- */
-static int nr_counters_generic __read_mostly;
static u64 perf_counter_mask __read_mostly;
-static u64 counter_value_mask __read_mostly;
-static int counter_value_bits __read_mostly;
-
-static int nr_counters_fixed __read_mostly;
struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
- unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long interrupts;
- u64 throttle_ctrl;
+ unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long interrupts;
int enabled;
};
/*
- * struct pmc_x86_ops - performance counter x86 ops
+ * struct x86_pmu - generic x86 pmu
*/
-struct pmc_x86_ops {
- u64 (*save_disable_all)(void);
- void (*restore_all)(u64);
- u64 (*get_status)(u64);
- void (*ack_status)(u64);
- void (*enable)(int, u64);
- void (*disable)(int, u64);
+struct x86_pmu {
+ const char *name;
+ int version;
+ int (*handle_irq)(struct pt_regs *);
+ void (*disable_all)(void);
+ void (*enable_all)(void);
+ void (*enable)(struct hw_perf_counter *, int);
+ void (*disable)(struct hw_perf_counter *, int);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
u64 (*raw_event)(u64);
int max_events;
+ int num_counters;
+ int num_counters_fixed;
+ int counter_bits;
+ u64 counter_mask;
+ u64 max_period;
+ u64 intel_ctrl;
};
-static struct pmc_x86_ops *pmc_ops __read_mostly;
+static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
.enabled = 1,
};
-static __read_mostly int intel_perfmon_version;
-
/*
* Intel PerfMon v3. Used on Core2 and later.
*/
[PERF_COUNT_BUS_CYCLES] = 0x013c,
};
-static u64 pmc_intel_event_map(int event)
+static u64 intel_pmu_event_map(int event)
{
return intel_perfmon_event_map[event];
}
-static u64 pmc_intel_raw_event(u64 event)
+static u64 intel_pmu_raw_event(u64 event)
{
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
+#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
+#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
#define CORE_EVNTSEL_MASK \
(CORE_EVNTSEL_EVENT_MASK | \
CORE_EVNTSEL_UNIT_MASK | \
+ CORE_EVNTSEL_EDGE_MASK | \
+ CORE_EVNTSEL_INV_MASK | \
CORE_EVNTSEL_COUNTER_MASK)
return event & CORE_EVNTSEL_MASK;
[PERF_COUNT_BRANCH_MISSES] = 0x00c5,
};
-static u64 pmc_amd_event_map(int event)
+static u64 amd_pmu_event_map(int event)
{
return amd_perfmon_event_map[event];
}
-static u64 pmc_amd_raw_event(u64 event)
+static u64 amd_pmu_raw_event(u64 event)
{
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
+#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
+#define K7_EVNTSEL_INV_MASK 0x000800000ULL
#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
#define K7_EVNTSEL_MASK \
(K7_EVNTSEL_EVENT_MASK | \
K7_EVNTSEL_UNIT_MASK | \
+ K7_EVNTSEL_EDGE_MASK | \
+ K7_EVNTSEL_INV_MASK | \
K7_EVNTSEL_COUNTER_MASK)
return event & K7_EVNTSEL_MASK;
* Can only be executed on the CPU where the counter is active.
* Returns the delta events processed.
*/
-static void
+static u64
x86_perf_counter_update(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
- u64 prev_raw_count, new_raw_count, delta;
+ int shift = 64 - x86_pmu.counter_bits;
+ u64 prev_raw_count, new_raw_count;
+ s64 delta;
/*
* Careful: an NMI might modify the previous counter value.
* (counter-)time and add that to the generic counter.
*
* Careful, not all hw sign-extends above the physical width
- * of the count, so we do that by clipping the delta to 32 bits:
+ * of the count.
*/
- delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
+
+ return new_raw_count;
}
-static atomic_t num_counters;
+static atomic_t active_counters;
static DEFINE_MUTEX(pmc_reserve_mutex);
static bool reserve_pmc_hardware(void)
if (nmi_watchdog == NMI_LOCAL_APIC)
disable_lapic_nmi_watchdog();
- for (i = 0; i < nr_counters_generic; i++) {
- if (!reserve_perfctr_nmi(pmc_ops->perfctr + i))
+ for (i = 0; i < x86_pmu.num_counters; i++) {
+ if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
- for (i = 0; i < nr_counters_generic; i++) {
- if (!reserve_evntsel_nmi(pmc_ops->eventsel + i))
+ for (i = 0; i < x86_pmu.num_counters; i++) {
+ if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
eventsel_fail:
for (i--; i >= 0; i--)
- release_evntsel_nmi(pmc_ops->eventsel + i);
+ release_evntsel_nmi(x86_pmu.eventsel + i);
- i = nr_counters_generic;
+ i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
- release_perfctr_nmi(pmc_ops->perfctr + i);
+ release_perfctr_nmi(x86_pmu.perfctr + i);
if (nmi_watchdog == NMI_LOCAL_APIC)
enable_lapic_nmi_watchdog();
{
int i;
- for (i = 0; i < nr_counters_generic; i++) {
- release_perfctr_nmi(pmc_ops->perfctr + i);
- release_evntsel_nmi(pmc_ops->eventsel + i);
+ for (i = 0; i < x86_pmu.num_counters; i++) {
+ release_perfctr_nmi(x86_pmu.perfctr + i);
+ release_evntsel_nmi(x86_pmu.eventsel + i);
}
if (nmi_watchdog == NMI_LOCAL_APIC)
static void hw_perf_counter_destroy(struct perf_counter *counter)
{
- if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
+ if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
+static inline int x86_pmu_initialized(void)
+{
+ return x86_pmu.handle_irq != NULL;
+}
+
/*
- * Setup the hardware configuration for a given hw_event_type
+ * Setup the hardware configuration for a given attr_type
*/
static int __hw_perf_counter_init(struct perf_counter *counter)
{
- struct perf_counter_hw_event *hw_event = &counter->hw_event;
+ struct perf_counter_attr *attr = &counter->attr;
struct hw_perf_counter *hwc = &counter->hw;
int err;
- if (unlikely(!perf_counters_initialized))
- return -EINVAL;
+ if (!x86_pmu_initialized())
+ return -ENODEV;
err = 0;
- if (atomic_inc_not_zero(&num_counters)) {
+ if (!atomic_inc_not_zero(&active_counters)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
+ if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
err = -EBUSY;
else
- atomic_inc(&num_counters);
+ atomic_inc(&active_counters);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
/*
* Count user and OS events unless requested not to.
*/
- if (!hw_event->exclude_user)
+ if (!attr->exclude_user)
hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!hw_event->exclude_kernel)
+ if (!attr->exclude_kernel)
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
- /*
- * If privileged enough, allow NMI events:
- */
- hwc->nmi = 0;
- if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
- hwc->nmi = 1;
-
- hwc->irq_period = hw_event->irq_period;
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic counter period:
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
- hwc->irq_period = 0x7FFFFFFF;
+ if (!hwc->sample_period)
+ hwc->sample_period = x86_pmu.max_period;
- atomic64_set(&hwc->period_left, hwc->irq_period);
+ atomic64_set(&hwc->period_left, hwc->sample_period);
/*
* Raw event type provide the config in the event structure
*/
- if (perf_event_raw(hw_event)) {
- hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event));
+ if (perf_event_raw(attr)) {
+ hwc->config |= x86_pmu.raw_event(perf_event_config(attr));
} else {
- if (perf_event_id(hw_event) >= pmc_ops->max_events)
+ if (perf_event_id(attr) >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
- hwc->config |= pmc_ops->event_map(perf_event_id(hw_event));
+ hwc->config |= x86_pmu.event_map(perf_event_id(attr));
}
counter->destroy = hw_perf_counter_destroy;
return 0;
}
-static u64 pmc_intel_save_disable_all(void)
+static void intel_pmu_disable_all(void)
{
- u64 ctrl;
-
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-
- return ctrl;
}
-static u64 pmc_amd_save_disable_all(void)
+static void amd_pmu_disable_all(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- int enabled, idx;
+ int idx;
+
+ if (!cpuc->enabled)
+ return;
- enabled = cpuc->enabled;
cpuc->enabled = 0;
/*
* ensure we write the disable before we start disabling the
- * counters proper, so that pcm_amd_enable() does the right thing.
+ * counters proper, so that amd_pmu_enable_counter() does the
+ * right thing.
*/
barrier();
- for (idx = 0; idx < nr_counters_generic; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+ continue;
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
-
- return enabled;
}
-u64 hw_perf_save_disable(void)
+void hw_perf_disable(void)
{
- if (unlikely(!perf_counters_initialized))
- return 0;
-
- return pmc_ops->save_disable_all();
+ if (!x86_pmu_initialized())
+ return;
+ return x86_pmu.disable_all();
}
-/*
- * Exported because of ACPI idle
- */
-EXPORT_SYMBOL_GPL(hw_perf_save_disable);
-static void pmc_intel_restore_all(u64 ctrl)
+static void intel_pmu_enable_all(void)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
}
-static void pmc_amd_restore_all(u64 ctrl)
+static void amd_pmu_enable_all(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
int idx;
- cpuc->enabled = ctrl;
- barrier();
- if (!ctrl)
+ if (cpuc->enabled)
return;
- for (idx = 0; idx < nr_counters_generic; idx++) {
- if (test_bit(idx, cpuc->active_mask)) {
- u64 val;
+ cpuc->enabled = 1;
+ barrier();
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ u64 val;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ continue;
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
}
-void hw_perf_restore(u64 ctrl)
+void hw_perf_enable(void)
{
- if (unlikely(!perf_counters_initialized))
+ if (!x86_pmu_initialized())
return;
-
- pmc_ops->restore_all(ctrl);
+ x86_pmu.enable_all();
}
-/*
- * Exported because of ACPI idle
- */
-EXPORT_SYMBOL_GPL(hw_perf_restore);
-static u64 pmc_intel_get_status(u64 mask)
+static inline u64 intel_pmu_get_status(void)
{
u64 status;
return status;
}
-static u64 pmc_amd_get_status(u64 mask)
-{
- u64 status = 0;
- int idx;
-
- for (idx = 0; idx < nr_counters_generic; idx++) {
- s64 val;
-
- if (!(mask & (1 << idx)))
- continue;
-
- rdmsrl(MSR_K7_PERFCTR0 + idx, val);
- val <<= (64 - counter_value_bits);
- if (val >= 0)
- status |= (1 << idx);
- }
-
- return status;
-}
-
-static u64 hw_perf_get_status(u64 mask)
-{
- if (unlikely(!perf_counters_initialized))
- return 0;
-
- return pmc_ops->get_status(mask);
-}
-
-static void pmc_intel_ack_status(u64 ack)
+static inline void intel_pmu_ack_status(u64 ack)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static void pmc_amd_ack_status(u64 ack)
-{
-}
-
-static void hw_perf_ack_status(u64 ack)
-{
- if (unlikely(!perf_counters_initialized))
- return;
-
- pmc_ops->ack_status(ack);
-}
-
-static void pmc_intel_enable(int idx, u64 config)
+static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
- config | ARCH_PERFMON_EVENTSEL0_ENABLE);
-}
-
-static void pmc_amd_enable(int idx, u64 config)
-{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-
- set_bit(idx, cpuc->active_mask);
- if (cpuc->enabled)
- config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-
- wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
-}
-
-static void hw_perf_enable(int idx, u64 config)
-{
- if (unlikely(!perf_counters_initialized))
- return;
-
- pmc_ops->enable(idx, config);
-}
-
-static void pmc_intel_disable(int idx, u64 config)
-{
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
-}
-
-static void pmc_amd_disable(int idx, u64 config)
-{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-
- clear_bit(idx, cpuc->active_mask);
- wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
-
+ int err;
+ err = checking_wrmsrl(hwc->config_base + idx,
+ hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
-static void hw_perf_disable(int idx, u64 config)
+static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
- if (unlikely(!perf_counters_initialized))
- return;
-
- pmc_ops->disable(idx, config);
+ int err;
+ err = checking_wrmsrl(hwc->config_base + idx,
+ hwc->config);
}
static inline void
-__pmc_fixed_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int __idx)
+intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
}
static inline void
-__pmc_generic_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int idx)
+intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- __pmc_fixed_disable(counter, hwc, idx);
- else
- hw_perf_disable(idx, hwc->config);
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ intel_pmu_disable_fixed(hwc, idx);
+ return;
+ }
+
+ x86_pmu_disable_counter(hwc, idx);
+}
+
+static inline void
+amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+ x86_pmu_disable_counter(hwc, idx);
}
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the counter disabled in hw:
*/
-static void
-__hw_perf_counter_set_period(struct perf_counter *counter,
+static int
+x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
- s64 period = hwc->irq_period;
- int err;
+ s64 period = hwc->sample_period;
+ int err, ret = 0;
/*
* If we are way outside a reasoable range then just skip forward:
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
+ ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
+ ret = 1;
}
+ /*
+ * Quirk: certain CPUs dont like it if just 1 event is left:
+ */
+ if (unlikely(left < 2))
+ left = 2;
+
+ if (left > x86_pmu.max_period)
+ left = x86_pmu.max_period;
per_cpu(prev_left[idx], smp_processor_id()) = left;
atomic64_set(&hwc->prev_count, (u64)-left);
err = checking_wrmsrl(hwc->counter_base + idx,
- (u64)(-left) & counter_value_mask);
+ (u64)(-left) & x86_pmu.counter_mask);
+
+ return ret;
}
static inline void
-__pmc_fixed_enable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int __idx)
+intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
err = checking_wrmsrl(hwc->config_base, ctrl_val);
}
-static void
-__pmc_generic_enable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ intel_pmu_enable_fixed(hwc, idx);
+ return;
+ }
+
+ x86_pmu_enable_counter(hwc, idx);
+}
+
+static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- __pmc_fixed_enable(counter, hwc, idx);
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+
+ if (cpuc->enabled)
+ x86_pmu_enable_counter(hwc, idx);
else
- hw_perf_enable(idx, hwc->config);
+ x86_pmu_disable_counter(hwc, idx);
}
static int
{
unsigned int event;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- return -1;
-
- if (unlikely(hwc->nmi))
+ if (!x86_pmu.num_counters_fixed)
return -1;
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
return X86_PMC_IDX_FIXED_BUS_CYCLES;
return -1;
/*
* Find a PMC slot for the freshly enabled / scheduled in counter:
*/
-static int pmc_generic_enable(struct perf_counter *counter)
+static int x86_pmu_enable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
* Try to get the fixed counter, if that is already taken
* then try to get a generic counter:
*/
- if (test_and_set_bit(idx, cpuc->used))
+ if (test_and_set_bit(idx, cpuc->used_mask))
goto try_generic;
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
} else {
idx = hwc->idx;
/* Try to get the previous generic counter again */
- if (test_and_set_bit(idx, cpuc->used)) {
+ if (test_and_set_bit(idx, cpuc->used_mask)) {
try_generic:
- idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
- if (idx == nr_counters_generic)
+ idx = find_first_zero_bit(cpuc->used_mask,
+ x86_pmu.num_counters);
+ if (idx == x86_pmu.num_counters)
return -EAGAIN;
- set_bit(idx, cpuc->used);
+ set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
- hwc->config_base = pmc_ops->eventsel;
- hwc->counter_base = pmc_ops->perfctr;
+ hwc->config_base = x86_pmu.eventsel;
+ hwc->counter_base = x86_pmu.perfctr;
}
- perf_counters_lapic_init(hwc->nmi);
+ perf_counters_lapic_init();
- __pmc_generic_disable(counter, hwc, idx);
+ x86_pmu.disable(hwc, idx);
cpuc->counters[idx] = counter;
- /*
- * Make it visible before enabling the hw:
- */
- smp_wmb();
+ set_bit(idx, cpuc->active_mask);
- __hw_perf_counter_set_period(counter, hwc, idx);
- __pmc_generic_enable(counter, hwc, idx);
+ x86_perf_counter_set_period(counter, hwc, idx);
+ x86_pmu.enable(hwc, idx);
return 0;
}
+static void x86_pmu_unthrottle(struct perf_counter *counter)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct hw_perf_counter *hwc = &counter->hw;
+
+ if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
+ cpuc->counters[hwc->idx] != counter))
+ return;
+
+ x86_pmu.enable(hwc, hwc->idx);
+}
+
void perf_counter_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
struct cpu_hw_counters *cpuc;
+ unsigned long flags;
int cpu, idx;
- if (!nr_counters_generic)
+ if (!x86_pmu.num_counters)
return;
- local_irq_disable();
+ local_irq_save(flags);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
- if (intel_perfmon_version >= 2) {
+ if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
}
- pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
+ pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
- for (idx = 0; idx < nr_counters_generic; idx++) {
- rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
- rdmsrl(pmc_ops->perfctr + idx, pmc_count);
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
+ rdmsrl(x86_pmu.perfctr + idx, pmc_count);
prev_left = per_cpu(prev_left[idx], cpu);
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < nr_counters_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
- local_irq_enable();
+ local_irq_restore(flags);
}
-static void pmc_generic_disable(struct perf_counter *counter)
+static void x86_pmu_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
- unsigned int idx = hwc->idx;
+ int idx = hwc->idx;
- __pmc_generic_disable(counter, hwc, idx);
+ /*
+ * Must be done before we disable, otherwise the nmi handler
+ * could reenable again:
+ */
+ clear_bit(idx, cpuc->active_mask);
+ x86_pmu.disable(hwc, idx);
- clear_bit(idx, cpuc->used);
- cpuc->counters[idx] = NULL;
/*
* Make sure the cleared pointer becomes visible before we
* (potentially) free the counter:
*/
- smp_wmb();
+ barrier();
/*
* Drain the remaining delta count out of a counter
* that we are disabling:
*/
x86_perf_counter_update(counter, hwc, idx);
+ cpuc->counters[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
}
/*
* Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops:
*/
-static void perf_save_and_restart(struct perf_counter *counter)
+static int intel_pmu_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
+ int ret;
x86_perf_counter_update(counter, hwc, idx);
- __hw_perf_counter_set_period(counter, hwc, idx);
+ ret = x86_perf_counter_set_period(counter, hwc, idx);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- __pmc_generic_enable(counter, hwc, idx);
+ intel_pmu_enable_counter(hwc, idx);
+
+ return ret;
+}
+
+static void intel_pmu_reset(void)
+{
+ unsigned long flags;
+ int idx;
+
+ if (!x86_pmu.num_counters)
+ return;
+
+ local_irq_save(flags);
+
+ printk("clearing PMU state on CPU#%d\n", smp_processor_id());
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
+ checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
+ }
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ }
+
+ local_irq_restore(flags);
}
-/*
- * Maximum interrupt frequency of 100KHz per CPU
- */
-#define PERFMON_MAX_INTERRUPTS (100000/HZ)
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
-static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
+static int intel_pmu_handle_irq(struct pt_regs *regs)
{
- int bit, cpu = smp_processor_id();
+ struct cpu_hw_counters *cpuc;
+ struct cpu_hw_counters;
+ int bit, cpu, loops;
u64 ack, status;
- struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- int ret = 0;
- cpuc->throttle_ctrl = hw_perf_save_disable();
+ cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_counters, cpu);
- status = hw_perf_get_status(cpuc->throttle_ctrl);
- if (!status)
- goto out;
+ perf_disable();
+ status = intel_pmu_get_status();
+ if (!status) {
+ perf_enable();
+ return 0;
+ }
- ret = 1;
+ loops = 0;
again:
+ if (++loops > 100) {
+ WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
+ perf_counter_print_debug();
+ intel_pmu_reset();
+ perf_enable();
+ return 1;
+ }
+
inc_irq_stat(apic_perf_irqs);
ack = status;
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_counter *counter = cpuc->counters[bit];
clear_bit(bit, (unsigned long *) &status);
- if (!counter)
+ if (!test_bit(bit, cpuc->active_mask))
+ continue;
+
+ if (!intel_pmu_save_and_restart(counter))
continue;
- perf_save_and_restart(counter);
- if (perf_counter_overflow(counter, nmi, regs))
- __pmc_generic_disable(counter, &counter->hw, bit);
+ if (perf_counter_overflow(counter, 1, regs, 0))
+ intel_pmu_disable_counter(&counter->hw, bit);
}
- hw_perf_ack_status(ack);
+ intel_pmu_ack_status(ack);
/*
* Repeat if there is more work to be done:
*/
- status = hw_perf_get_status(cpuc->throttle_ctrl);
+ status = intel_pmu_get_status();
if (status)
goto again;
-out:
- /*
- * Restore - do not reenable when global enable is off or throttled:
- */
- if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
- hw_perf_restore(cpuc->throttle_ctrl);
- return ret;
+ perf_enable();
+
+ return 1;
}
-void perf_counter_unthrottle(void)
+static int amd_pmu_handle_irq(struct pt_regs *regs)
{
+ int cpu, idx, handled = 0;
struct cpu_hw_counters *cpuc;
+ struct perf_counter *counter;
+ struct hw_perf_counter *hwc;
+ u64 val;
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return;
+ cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_counters, cpu);
- if (unlikely(!perf_counters_initialized))
- return;
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ counter = cpuc->counters[idx];
+ hwc = &counter->hw;
- cpuc = &__get_cpu_var(cpu_hw_counters);
- if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
- if (printk_ratelimit())
- printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
- hw_perf_restore(cpuc->throttle_ctrl);
+ val = x86_perf_counter_update(counter, hwc, idx);
+ if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+ continue;
+
+ /* counter overflow */
+ handled = 1;
+ inc_irq_stat(apic_perf_irqs);
+ if (!x86_perf_counter_set_period(counter, hwc, idx))
+ continue;
+
+ if (perf_counter_overflow(counter, 1, regs, 0))
+ amd_pmu_disable_counter(hwc, idx);
}
- cpuc->interrupts = 0;
-}
-void smp_perf_counter_interrupt(struct pt_regs *regs)
-{
- irq_enter();
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- ack_APIC_irq();
- __smp_perf_counter_interrupt(regs, 0);
- irq_exit();
+ return handled;
}
void smp_perf_pending_interrupt(struct pt_regs *regs)
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
}
-void perf_counters_lapic_init(int nmi)
+void perf_counters_lapic_init(void)
{
- u32 apic_val;
-
- if (!perf_counters_initialized)
+ if (!x86_pmu_initialized())
return;
+
/*
- * Enable the performance counter vector in the APIC LVT:
+ * Always use NMI for PMU
*/
- apic_val = apic_read(APIC_LVTERR);
-
- apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
- if (nmi)
- apic_write(APIC_LVTPC, APIC_DM_NMI);
- else
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- apic_write(APIC_LVTERR, apic_val);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
}
static int __kprobes
{
struct die_args *args = __args;
struct pt_regs *regs;
- int ret;
+
+ if (!atomic_read(&active_counters))
+ return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
regs = args->regs;
apic_write(APIC_LVTPC, APIC_DM_NMI);
- ret = __smp_perf_counter_interrupt(regs, 1);
+ /*
+ * Can't rely on the handled return value to say it was our NMI, two
+ * counters could trigger 'simultaneously' raising two back-to-back NMIs.
+ *
+ * If the first NMI handles both, the latter will be empty and daze
+ * the CPU.
+ */
+ x86_pmu.handle_irq(regs);
- return ret ? NOTIFY_STOP : NOTIFY_OK;
+ return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
.priority = 1
};
-static struct pmc_x86_ops pmc_intel_ops = {
- .save_disable_all = pmc_intel_save_disable_all,
- .restore_all = pmc_intel_restore_all,
- .get_status = pmc_intel_get_status,
- .ack_status = pmc_intel_ack_status,
- .enable = pmc_intel_enable,
- .disable = pmc_intel_disable,
+static struct x86_pmu intel_pmu = {
+ .name = "Intel",
+ .handle_irq = intel_pmu_handle_irq,
+ .disable_all = intel_pmu_disable_all,
+ .enable_all = intel_pmu_enable_all,
+ .enable = intel_pmu_enable_counter,
+ .disable = intel_pmu_disable_counter,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = pmc_intel_event_map,
- .raw_event = pmc_intel_raw_event,
+ .event_map = intel_pmu_event_map,
+ .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
+ /*
+ * Intel PMCs cannot be accessed sanely above 32 bit width,
+ * so we install an artificial 1<<31 period regardless of
+ * the generic counter period:
+ */
+ .max_period = (1ULL << 31) - 1,
};
-static struct pmc_x86_ops pmc_amd_ops = {
- .save_disable_all = pmc_amd_save_disable_all,
- .restore_all = pmc_amd_restore_all,
- .get_status = pmc_amd_get_status,
- .ack_status = pmc_amd_ack_status,
- .enable = pmc_amd_enable,
- .disable = pmc_amd_disable,
+static struct x86_pmu amd_pmu = {
+ .name = "AMD",
+ .handle_irq = amd_pmu_handle_irq,
+ .disable_all = amd_pmu_disable_all,
+ .enable_all = amd_pmu_enable_all,
+ .enable = amd_pmu_enable_counter,
+ .disable = amd_pmu_disable_counter,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
- .event_map = pmc_amd_event_map,
- .raw_event = pmc_amd_raw_event,
+ .event_map = amd_pmu_event_map,
+ .raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
+ .num_counters = 4,
+ .counter_bits = 48,
+ .counter_mask = (1ULL << 48) - 1,
+ /* use highest bit to detect overflow */
+ .max_period = (1ULL << 47) - 1,
};
-static struct pmc_x86_ops *pmc_intel_init(void)
+static int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
unsigned int unused;
unsigned int ebx;
+ int version;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return -ENODEV;
/*
* Check whether the Architectural PerfMon supports
*/
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
- return NULL;
+ return -ENODEV;
- intel_perfmon_version = eax.split.version_id;
- if (intel_perfmon_version < 2)
- return NULL;
+ version = eax.split.version_id;
+ if (version < 2)
+ return -ENODEV;
- pr_info("Intel Performance Monitoring support detected.\n");
- pr_info("... version: %d\n", intel_perfmon_version);
- pr_info("... bit width: %d\n", eax.split.bit_width);
- pr_info("... mask length: %d\n", eax.split.mask_length);
+ x86_pmu = intel_pmu;
+ x86_pmu.version = version;
+ x86_pmu.num_counters = eax.split.num_counters;
- nr_counters_generic = eax.split.num_counters;
- nr_counters_fixed = edx.split.num_counters_fixed;
- counter_value_mask = (1ULL << eax.split.bit_width) - 1;
+ /*
+ * Quirk: v2 perfmon does not report fixed-purpose counters, so
+ * assume at least 3 counters:
+ */
+ x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
- return &pmc_intel_ops;
-}
+ x86_pmu.counter_bits = eax.split.bit_width;
+ x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
-static struct pmc_x86_ops *pmc_amd_init(void)
-{
- nr_counters_generic = 4;
- nr_counters_fixed = 0;
- counter_value_mask = 0x0000FFFFFFFFFFFFULL;
- counter_value_bits = 48;
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
- pr_info("AMD Performance Monitoring support detected.\n");
+ return 0;
+}
- return &pmc_amd_ops;
+static int amd_pmu_init(void)
+{
+ x86_pmu = amd_pmu;
+ return 0;
}
void __init init_hw_perf_counters(void)
{
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return;
+ int err;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
- pmc_ops = pmc_intel_init();
+ err = intel_pmu_init();
break;
case X86_VENDOR_AMD:
- pmc_ops = pmc_amd_init();
+ err = amd_pmu_init();
break;
+ default:
+ return;
}
- if (!pmc_ops)
+ if (err != 0)
return;
- pr_info("... num counters: %d\n", nr_counters_generic);
- if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
- nr_counters_generic = X86_PMC_MAX_GENERIC;
+ pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
+ pr_info("... version: %d\n", x86_pmu.version);
+ pr_info("... bit width: %d\n", x86_pmu.counter_bits);
+
+ pr_info("... num counters: %d\n", x86_pmu.num_counters);
+ if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
+ x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
- nr_counters_generic, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
}
- perf_counter_mask = (1 << nr_counters_generic) - 1;
- perf_max_counters = nr_counters_generic;
+ perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
+ perf_max_counters = x86_pmu.num_counters;
- pr_info("... value mask: %016Lx\n", counter_value_mask);
+ pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
+ pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
- nr_counters_fixed = X86_PMC_MAX_FIXED;
+ if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
+ x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
- nr_counters_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
}
- pr_info("... fixed counters: %d\n", nr_counters_fixed);
+ pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
- perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+ perf_counter_mask |=
+ ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
pr_info("... counter mask: %016Lx\n", perf_counter_mask);
- perf_counters_initialized = true;
- perf_counters_lapic_init(0);
+ perf_counters_lapic_init();
register_die_notifier(&perf_counter_nmi_notifier);
}
-static void pmc_generic_read(struct perf_counter *counter)
+static inline void x86_pmu_read(struct perf_counter *counter)
{
x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}
-static const struct hw_perf_counter_ops x86_perf_counter_ops = {
- .enable = pmc_generic_enable,
- .disable = pmc_generic_disable,
- .read = pmc_generic_read,
+static const struct pmu pmu = {
+ .enable = x86_pmu_enable,
+ .disable = x86_pmu_disable,
+ .read = x86_pmu_read,
+ .unthrottle = x86_pmu_unthrottle,
};
-const struct hw_perf_counter_ops *
-hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
int err;
if (err)
return ERR_PTR(err);
- return &x86_perf_counter_ops;
+ return &pmu;
}
/*