unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
- u64 throttle_ctrl;
int enabled;
};
struct x86_pmu {
const char *name;
int version;
- int (*handle_irq)(struct pt_regs *, int);
- u64 (*save_disable_all)(void);
- void (*restore_all)(u64);
+ int (*handle_irq)(struct pt_regs *);
+ void (*disable_all)(void);
+ void (*enable_all)(void);
void (*enable)(struct hw_perf_counter *, int);
void (*disable)(struct hw_perf_counter *, int);
unsigned eventsel;
int counter_bits;
u64 counter_mask;
u64 max_period;
+ u64 intel_ctrl;
};
static struct x86_pmu x86_pmu __read_mostly;
{
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
+#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
+#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
#define CORE_EVNTSEL_MASK \
(CORE_EVNTSEL_EVENT_MASK | \
CORE_EVNTSEL_UNIT_MASK | \
+ CORE_EVNTSEL_EDGE_MASK | \
+ CORE_EVNTSEL_INV_MASK | \
CORE_EVNTSEL_COUNTER_MASK)
return event & CORE_EVNTSEL_MASK;
{
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
+#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
+#define K7_EVNTSEL_INV_MASK 0x000800000ULL
#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
#define K7_EVNTSEL_MASK \
(K7_EVNTSEL_EVENT_MASK | \
K7_EVNTSEL_UNIT_MASK | \
+ K7_EVNTSEL_EDGE_MASK | \
+ K7_EVNTSEL_INV_MASK | \
K7_EVNTSEL_COUNTER_MASK)
return event & K7_EVNTSEL_MASK;
x86_perf_counter_update(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
- u64 prev_raw_count, new_raw_count, delta;
+ int shift = 64 - x86_pmu.counter_bits;
+ u64 prev_raw_count, new_raw_count;
+ s64 delta;
/*
* Careful: an NMI might modify the previous counter value.
* (counter-)time and add that to the generic counter.
*
* Careful, not all hw sign-extends above the physical width
- * of the count, so we do that by clipping the delta to 32 bits:
+ * of the count.
*/
- delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
-static atomic_t num_counters;
+static atomic_t active_counters;
static DEFINE_MUTEX(pmc_reserve_mutex);
static bool reserve_pmc_hardware(void)
static void hw_perf_counter_destroy(struct perf_counter *counter)
{
- if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
+ if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
- * Setup the hardware configuration for a given hw_event_type
+ * Setup the hardware configuration for a given attr_type
*/
static int __hw_perf_counter_init(struct perf_counter *counter)
{
- struct perf_counter_hw_event *hw_event = &counter->hw_event;
+ struct perf_counter_attr *attr = &counter->attr;
struct hw_perf_counter *hwc = &counter->hw;
int err;
return -ENODEV;
err = 0;
- if (atomic_inc_not_zero(&num_counters)) {
+ if (!atomic_inc_not_zero(&active_counters)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
+ if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
err = -EBUSY;
else
- atomic_inc(&num_counters);
+ atomic_inc(&active_counters);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
/*
* Count user and OS events unless requested not to.
*/
- if (!hw_event->exclude_user)
+ if (!attr->exclude_user)
hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!hw_event->exclude_kernel)
+ if (!attr->exclude_kernel)
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
- /*
- * If privileged enough, allow NMI events:
- */
- hwc->nmi = 0;
- if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
- hwc->nmi = 1;
+ if (!hwc->sample_period)
+ hwc->sample_period = x86_pmu.max_period;
- hwc->irq_period = hw_event->irq_period;
- if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
- hwc->irq_period = x86_pmu.max_period;
-
- atomic64_set(&hwc->period_left, hwc->irq_period);
+ atomic64_set(&hwc->period_left, hwc->sample_period);
/*
* Raw event type provide the config in the event structure
*/
- if (perf_event_raw(hw_event)) {
- hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
+ if (perf_event_raw(attr)) {
+ hwc->config |= x86_pmu.raw_event(perf_event_config(attr));
} else {
- if (perf_event_id(hw_event) >= x86_pmu.max_events)
+ if (perf_event_id(attr) >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
- hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
+ hwc->config |= x86_pmu.event_map(perf_event_id(attr));
}
counter->destroy = hw_perf_counter_destroy;
return 0;
}
-static u64 intel_pmu_save_disable_all(void)
+static void intel_pmu_disable_all(void)
{
- u64 ctrl;
-
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-
- return ctrl;
}
-static u64 amd_pmu_save_disable_all(void)
+static void amd_pmu_disable_all(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- int enabled, idx;
+ int idx;
+
+ if (!cpuc->enabled)
+ return;
- enabled = cpuc->enabled;
cpuc->enabled = 0;
/*
* ensure we write the disable before we start disabling the
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
-
- return enabled;
}
-u64 hw_perf_save_disable(void)
+void hw_perf_disable(void)
{
if (!x86_pmu_initialized())
- return 0;
- return x86_pmu.save_disable_all();
+ return;
+ return x86_pmu.disable_all();
}
-/*
- * Exported because of ACPI idle
- */
-EXPORT_SYMBOL_GPL(hw_perf_save_disable);
-static void intel_pmu_restore_all(u64 ctrl)
+static void intel_pmu_enable_all(void)
{
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
}
-static void amd_pmu_restore_all(u64 ctrl)
+static void amd_pmu_enable_all(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
int idx;
- cpuc->enabled = ctrl;
- barrier();
- if (!ctrl)
+ if (cpuc->enabled)
return;
+ cpuc->enabled = 1;
+ barrier();
+
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
}
}
-void hw_perf_restore(u64 ctrl)
+void hw_perf_enable(void)
{
if (!x86_pmu_initialized())
return;
- x86_pmu.restore_all(ctrl);
+ x86_pmu.enable_all();
}
-/*
- * Exported because of ACPI idle
- */
-EXPORT_SYMBOL_GPL(hw_perf_restore);
static inline u64 intel_pmu_get_status(void)
{
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the counter disabled in hw:
*/
-static void
+static int
x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
- s64 period = hwc->irq_period;
- int err;
+ s64 period = hwc->sample_period;
+ int err, ret = 0;
/*
* If we are way outside a reasoable range then just skip forward:
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
+ ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
+ ret = 1;
}
+ /*
+ * Quirk: certain CPUs dont like it if just 1 event is left:
+ */
+ if (unlikely(left < 2))
+ left = 2;
+
+ if (left > x86_pmu.max_period)
+ left = x86_pmu.max_period;
per_cpu(prev_left[idx], smp_processor_id()) = left;
err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask);
+
+ return ret;
}
static inline void
if (!x86_pmu.num_counters_fixed)
return -1;
- if (unlikely(hwc->nmi))
- return -1;
-
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
hwc->counter_base = x86_pmu.perfctr;
}
- perf_counters_lapic_init(hwc->nmi);
+ perf_counters_lapic_init();
x86_pmu.disable(hwc, idx);
return 0;
}
+static void x86_pmu_unthrottle(struct perf_counter *counter)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct hw_perf_counter *hwc = &counter->hw;
+
+ if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
+ cpuc->counters[hwc->idx] != counter))
+ return;
+
+ x86_pmu.enable(hwc, hwc->idx);
+}
+
void perf_counter_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
struct cpu_hw_counters *cpuc;
+ unsigned long flags;
int cpu, idx;
if (!x86_pmu.num_counters)
return;
- local_irq_disable();
+ local_irq_save(flags);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
- local_irq_enable();
+ local_irq_restore(flags);
}
static void x86_pmu_disable(struct perf_counter *counter)
* Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops:
*/
-static void intel_pmu_save_and_restart(struct perf_counter *counter)
+static int intel_pmu_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
+ int ret;
x86_perf_counter_update(counter, hwc, idx);
- x86_perf_counter_set_period(counter, hwc, idx);
+ ret = x86_perf_counter_set_period(counter, hwc, idx);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
intel_pmu_enable_counter(hwc, idx);
+
+ return ret;
+}
+
+static void intel_pmu_reset(void)
+{
+ unsigned long flags;
+ int idx;
+
+ if (!x86_pmu.num_counters)
+ return;
+
+ local_irq_save(flags);
+
+ printk("clearing PMU state on CPU#%d\n", smp_processor_id());
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
+ checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
+ }
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ }
+
+ local_irq_restore(flags);
}
-/*
- * Maximum interrupt frequency of 100KHz per CPU
- */
-#define PERFMON_MAX_INTERRUPTS (100000/HZ)
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
-static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
+static int intel_pmu_handle_irq(struct pt_regs *regs)
{
- int bit, cpu = smp_processor_id();
+ struct cpu_hw_counters *cpuc;
+ struct cpu_hw_counters;
+ int bit, cpu, loops;
u64 ack, status;
- struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- int ret = 0;
- cpuc->throttle_ctrl = intel_pmu_save_disable_all();
+ cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_counters, cpu);
+ perf_disable();
status = intel_pmu_get_status();
- if (!status)
- goto out;
+ if (!status) {
+ perf_enable();
+ return 0;
+ }
- ret = 1;
+ loops = 0;
again:
+ if (++loops > 100) {
+ WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
+ perf_counter_print_debug();
+ intel_pmu_reset();
+ perf_enable();
+ return 1;
+ }
+
inc_irq_stat(apic_perf_irqs);
ack = status;
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
if (!test_bit(bit, cpuc->active_mask))
continue;
- intel_pmu_save_and_restart(counter);
- if (perf_counter_overflow(counter, nmi, regs, 0))
+ if (!intel_pmu_save_and_restart(counter))
+ continue;
+
+ if (perf_counter_overflow(counter, 1, regs, 0))
intel_pmu_disable_counter(&counter->hw, bit);
}
status = intel_pmu_get_status();
if (status)
goto again;
-out:
- /*
- * Restore - do not reenable when global enable is off or throttled:
- */
- if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
- intel_pmu_restore_all(cpuc->throttle_ctrl);
- return ret;
+ perf_enable();
+
+ return 1;
}
-static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
+static int amd_pmu_handle_irq(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
- struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- u64 val;
- int handled = 0;
+ int cpu, idx, handled = 0;
+ struct cpu_hw_counters *cpuc;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
- int idx;
+ u64 val;
+
+ cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_counters, cpu);
- ++cpuc->interrupts;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
+
counter = cpuc->counters[idx];
hwc = &counter->hw;
+
val = x86_perf_counter_update(counter, hwc, idx);
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;
+
/* counter overflow */
- x86_perf_counter_set_period(counter, hwc, idx);
handled = 1;
inc_irq_stat(apic_perf_irqs);
- if (perf_counter_overflow(counter, nmi, regs, 0))
- amd_pmu_disable_counter(hwc, idx);
- else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
- /*
- * do not reenable when throttled, but reload
- * the register
- */
- amd_pmu_disable_counter(hwc, idx);
- else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- amd_pmu_enable_counter(hwc, idx);
- }
- return handled;
-}
-
-void perf_counter_unthrottle(void)
-{
- struct cpu_hw_counters *cpuc;
-
- if (!x86_pmu_initialized())
- return;
+ if (!x86_perf_counter_set_period(counter, hwc, idx))
+ continue;
- cpuc = &__get_cpu_var(cpu_hw_counters);
- if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
- if (printk_ratelimit())
- printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
- hw_perf_restore(cpuc->throttle_ctrl);
+ if (perf_counter_overflow(counter, 1, regs, 0))
+ amd_pmu_disable_counter(hwc, idx);
}
- cpuc->interrupts = 0;
-}
-void smp_perf_counter_interrupt(struct pt_regs *regs)
-{
- irq_enter();
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- ack_APIC_irq();
- x86_pmu.handle_irq(regs, 0);
- irq_exit();
+ return handled;
}
void smp_perf_pending_interrupt(struct pt_regs *regs)
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
}
-void perf_counters_lapic_init(int nmi)
+void perf_counters_lapic_init(void)
{
- u32 apic_val;
-
if (!x86_pmu_initialized())
return;
/*
- * Enable the performance counter vector in the APIC LVT:
+ * Always use NMI for PMU
*/
- apic_val = apic_read(APIC_LVTERR);
-
- apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
- if (nmi)
- apic_write(APIC_LVTPC, APIC_DM_NMI);
- else
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- apic_write(APIC_LVTERR, apic_val);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
}
static int __kprobes
{
struct die_args *args = __args;
struct pt_regs *regs;
- int ret;
+
+ if (!atomic_read(&active_counters))
+ return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
regs = args->regs;
apic_write(APIC_LVTPC, APIC_DM_NMI);
- ret = x86_pmu.handle_irq(regs, 1);
+ /*
+ * Can't rely on the handled return value to say it was our NMI, two
+ * counters could trigger 'simultaneously' raising two back-to-back NMIs.
+ *
+ * If the first NMI handles both, the latter will be empty and daze
+ * the CPU.
+ */
+ x86_pmu.handle_irq(regs);
- return ret ? NOTIFY_STOP : NOTIFY_OK;
+ return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
static struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
- .save_disable_all = intel_pmu_save_disable_all,
- .restore_all = intel_pmu_restore_all,
+ .disable_all = intel_pmu_disable_all,
+ .enable_all = intel_pmu_enable_all,
.enable = intel_pmu_enable_counter,
.disable = intel_pmu_disable_counter,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
static struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = amd_pmu_handle_irq,
- .save_disable_all = amd_pmu_save_disable_all,
- .restore_all = amd_pmu_restore_all,
+ .disable_all = amd_pmu_disable_all,
+ .enable_all = amd_pmu_enable_all,
.enable = amd_pmu_enable_counter,
.disable = amd_pmu_disable_counter,
.eventsel = MSR_K7_EVNTSEL0,
x86_pmu = intel_pmu;
x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters;
- x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
+
+ /*
+ * Quirk: v2 perfmon does not report fixed-purpose counters, so
+ * assume at least 3 counters:
+ */
+ x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+
x86_pmu.counter_bits = eax.split.bit_width;
x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+
return 0;
}
pr_info("... counter mask: %016Lx\n", perf_counter_mask);
- perf_counters_lapic_init(0);
+ perf_counters_lapic_init();
register_die_notifier(&perf_counter_nmi_notifier);
}
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
.read = x86_pmu_read,
+ .unthrottle = x86_pmu_unthrottle,
};
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)