* Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
* Copyright(C) 2009 Jaswinder Singh Rajput
+ * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
+#include <linux/uaccess.h>
#include <asm/apic.h>
+#include <asm/stacktrace.h>
+#include <asm/nmi.h>
static bool perf_counters_initialized __read_mostly;
};
/*
- * struct pmc_x86_ops - performance counter x86 ops
+ * struct x86_pmu - generic x86 pmu
*/
-struct pmc_x86_ops {
+struct x86_pmu {
+ int (*handle_irq)(struct pt_regs *, int);
u64 (*save_disable_all)(void);
void (*restore_all)(u64);
- u64 (*get_status)(u64);
- void (*ack_status)(u64);
void (*enable)(int, u64);
void (*disable)(int, u64);
unsigned eventsel;
int max_events;
};
-static struct pmc_x86_ops *pmc_ops __read_mostly;
+static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
.enabled = 1,
[PERF_COUNT_BUS_CYCLES] = 0x013c,
};
-static u64 pmc_intel_event_map(int event)
+static u64 intel_pmu_event_map(int event)
{
return intel_perfmon_event_map[event];
}
-static u64 pmc_intel_raw_event(u64 event)
+static u64 intel_pmu_raw_event(u64 event)
{
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
[PERF_COUNT_BRANCH_MISSES] = 0x00c5,
};
-static u64 pmc_amd_event_map(int event)
+static u64 amd_pmu_event_map(int event)
{
return amd_perfmon_event_map[event];
}
-static u64 pmc_amd_raw_event(u64 event)
+static u64 amd_pmu_raw_event(u64 event)
{
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
atomic64_sub(delta, &hwc->period_left);
}
+static atomic_t num_counters;
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+static bool reserve_pmc_hardware(void)
+{
+ int i;
+
+ if (nmi_watchdog == NMI_LOCAL_APIC)
+ disable_lapic_nmi_watchdog();
+
+ for (i = 0; i < nr_counters_generic; i++) {
+ if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
+ goto perfctr_fail;
+ }
+
+ for (i = 0; i < nr_counters_generic; i++) {
+ if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
+ goto eventsel_fail;
+ }
+
+ return true;
+
+eventsel_fail:
+ for (i--; i >= 0; i--)
+ release_evntsel_nmi(x86_pmu.eventsel + i);
+
+ i = nr_counters_generic;
+
+perfctr_fail:
+ for (i--; i >= 0; i--)
+ release_perfctr_nmi(x86_pmu.perfctr + i);
+
+ if (nmi_watchdog == NMI_LOCAL_APIC)
+ enable_lapic_nmi_watchdog();
+
+ return false;
+}
+
+static void release_pmc_hardware(void)
+{
+ int i;
+
+ for (i = 0; i < nr_counters_generic; i++) {
+ release_perfctr_nmi(x86_pmu.perfctr + i);
+ release_evntsel_nmi(x86_pmu.eventsel + i);
+ }
+
+ if (nmi_watchdog == NMI_LOCAL_APIC)
+ enable_lapic_nmi_watchdog();
+}
+
+static void hw_perf_counter_destroy(struct perf_counter *counter)
+{
+ if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
+ release_pmc_hardware();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+}
+
/*
* Setup the hardware configuration for a given hw_event_type
*/
{
struct perf_counter_hw_event *hw_event = &counter->hw_event;
struct hw_perf_counter *hwc = &counter->hw;
+ int err;
+
+ /* disable temporarily */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return -ENOSYS;
if (unlikely(!perf_counters_initialized))
return -EINVAL;
+ err = 0;
+ if (atomic_inc_not_zero(&num_counters)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
+ err = -EBUSY;
+ else
+ atomic_inc(&num_counters);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+ if (err)
+ return err;
+
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
* Raw event type provide the config in the event structure
*/
if (perf_event_raw(hw_event)) {
- hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event));
+ hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
} else {
- if (perf_event_id(hw_event) >= pmc_ops->max_events)
+ if (perf_event_id(hw_event) >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
- hwc->config |= pmc_ops->event_map(perf_event_id(hw_event));
+ hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
}
+ counter->destroy = hw_perf_counter_destroy;
+
return 0;
}
-static u64 pmc_intel_save_disable_all(void)
+static u64 intel_pmu_save_disable_all(void)
{
u64 ctrl;
return ctrl;
}
-static u64 pmc_amd_save_disable_all(void)
+static u64 amd_pmu_save_disable_all(void)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
int enabled, idx;
cpuc->enabled = 0;
/*
* ensure we write the disable before we start disabling the
- * counters proper, so that pcm_amd_enable() does the right thing.
+ * counters proper, so that amd_pmu_enable_counter() does the
+ * right thing.
*/
barrier();
for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val;
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+ continue;
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
return enabled;
if (unlikely(!perf_counters_initialized))
return 0;
- return pmc_ops->save_disable_all();
+ return x86_pmu.save_disable_all();
}
/*
* Exported because of ACPI idle
*/
EXPORT_SYMBOL_GPL(hw_perf_save_disable);
-static void pmc_intel_restore_all(u64 ctrl)
+static void intel_pmu_restore_all(u64 ctrl)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
}
-static void pmc_amd_restore_all(u64 ctrl)
+static void amd_pmu_restore_all(u64 ctrl)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
int idx;
return;
for (idx = 0; idx < nr_counters_generic; idx++) {
- if (test_bit(idx, cpuc->active_mask)) {
- u64 val;
+ u64 val;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ continue;
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
}
if (unlikely(!perf_counters_initialized))
return;
- pmc_ops->restore_all(ctrl);
+ x86_pmu.restore_all(ctrl);
}
/*
* Exported because of ACPI idle
*/
EXPORT_SYMBOL_GPL(hw_perf_restore);
-static u64 pmc_intel_get_status(u64 mask)
+static inline u64 intel_pmu_get_status(u64 mask)
{
u64 status;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
-
- return status;
-}
-
-static u64 pmc_amd_get_status(u64 mask)
-{
- u64 status = 0;
- int idx;
-
- for (idx = 0; idx < nr_counters_generic; idx++) {
- s64 val;
-
- if (!(mask & (1 << idx)))
- continue;
-
- rdmsrl(MSR_K7_PERFCTR0 + idx, val);
- val <<= (64 - counter_value_bits);
- if (val >= 0)
- status |= (1 << idx);
- }
-
- return status;
-}
-
-static u64 hw_perf_get_status(u64 mask)
-{
if (unlikely(!perf_counters_initialized))
return 0;
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- return pmc_ops->get_status(mask);
+ return status;
}
-static void pmc_intel_ack_status(u64 ack)
+static inline void intel_pmu_ack_status(u64 ack)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static void pmc_amd_ack_status(u64 ack)
-{
-}
-
-static void hw_perf_ack_status(u64 ack)
-{
- if (unlikely(!perf_counters_initialized))
- return;
-
- pmc_ops->ack_status(ack);
-}
-
-static void pmc_intel_enable(int idx, u64 config)
+static void intel_pmu_enable_counter(int idx, u64 config)
{
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
-static void pmc_amd_enable(int idx, u64 config)
+static void amd_pmu_enable_counter(int idx, u64 config)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
if (unlikely(!perf_counters_initialized))
return;
- pmc_ops->enable(idx, config);
+ x86_pmu.enable(idx, config);
}
-static void pmc_intel_disable(int idx, u64 config)
+static void intel_pmu_disable_counter(int idx, u64 config)
{
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
}
-static void pmc_amd_disable(int idx, u64 config)
+static void amd_pmu_disable_counter(int idx, u64 config)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
if (unlikely(!perf_counters_initialized))
return;
- pmc_ops->disable(idx, config);
+ x86_pmu.disable(idx, config);
}
static inline void
}
static inline void
-__pmc_generic_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int idx)
+__x86_pmu_disable(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, unsigned int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
__pmc_fixed_disable(counter, hwc, idx);
* To be called with the counter disabled in hw:
*/
static void
-__hw_perf_counter_set_period(struct perf_counter *counter,
+x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
}
static void
-__pmc_generic_enable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+__x86_pmu_enable(struct perf_counter *counter,
+ struct hw_perf_counter *hwc, int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
__pmc_fixed_enable(counter, hwc, idx);
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
return X86_PMC_IDX_FIXED_BUS_CYCLES;
return -1;
/*
* Find a PMC slot for the freshly enabled / scheduled in counter:
*/
-static int pmc_generic_enable(struct perf_counter *counter)
+static int x86_pmu_enable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
set_bit(idx, cpuc->used);
hwc->idx = idx;
}
- hwc->config_base = pmc_ops->eventsel;
- hwc->counter_base = pmc_ops->perfctr;
+ hwc->config_base = x86_pmu.eventsel;
+ hwc->counter_base = x86_pmu.perfctr;
}
perf_counters_lapic_init(hwc->nmi);
- __pmc_generic_disable(counter, hwc, idx);
+ __x86_pmu_disable(counter, hwc, idx);
cpuc->counters[idx] = counter;
/*
* Make it visible before enabling the hw:
*/
- smp_wmb();
+ barrier();
- __hw_perf_counter_set_period(counter, hwc, idx);
- __pmc_generic_enable(counter, hwc, idx);
+ x86_perf_counter_set_period(counter, hwc, idx);
+ __x86_pmu_enable(counter, hwc, idx);
return 0;
}
pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
for (idx = 0; idx < nr_counters_generic; idx++) {
- rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
- rdmsrl(pmc_ops->perfctr + idx, pmc_count);
+ rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
+ rdmsrl(x86_pmu.perfctr + idx, pmc_count);
prev_left = per_cpu(prev_left[idx], cpu);
local_irq_enable();
}
-static void pmc_generic_disable(struct perf_counter *counter)
+static void x86_pmu_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
unsigned int idx = hwc->idx;
- __pmc_generic_disable(counter, hwc, idx);
+ __x86_pmu_disable(counter, hwc, idx);
clear_bit(idx, cpuc->used);
cpuc->counters[idx] = NULL;
* Make sure the cleared pointer becomes visible before we
* (potentially) free the counter:
*/
- smp_wmb();
+ barrier();
/*
* Drain the remaining delta count out of a counter
* Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops:
*/
-static void perf_save_and_restart(struct perf_counter *counter)
+static void intel_pmu_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
x86_perf_counter_update(counter, hwc, idx);
- __hw_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_counter_set_period(counter, hwc, idx);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- __pmc_generic_enable(counter, hwc, idx);
+ __x86_pmu_enable(counter, hwc, idx);
}
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
-static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
+static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
u64 ack, status;
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
int ret = 0;
- cpuc->throttle_ctrl = hw_perf_save_disable();
+ cpuc->throttle_ctrl = intel_pmu_save_disable_all();
- status = hw_perf_get_status(cpuc->throttle_ctrl);
+ status = intel_pmu_get_status(cpuc->throttle_ctrl);
if (!status)
goto out;
if (!counter)
continue;
- perf_save_and_restart(counter);
- perf_counter_output(counter, nmi, regs);
+ intel_pmu_save_and_restart(counter);
+ if (perf_counter_overflow(counter, nmi, regs, 0))
+ __x86_pmu_disable(counter, &counter->hw, bit);
}
- hw_perf_ack_status(ack);
+ intel_pmu_ack_status(ack);
/*
* Repeat if there is more work to be done:
*/
- status = hw_perf_get_status(cpuc->throttle_ctrl);
+ status = intel_pmu_get_status(cpuc->throttle_ctrl);
if (status)
goto again;
out:
* Restore - do not reenable when global enable is off or throttled:
*/
if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
- hw_perf_restore(cpuc->throttle_ctrl);
+ intel_pmu_restore_all(cpuc->throttle_ctrl);
return ret;
}
+static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; }
+
void perf_counter_unthrottle(void)
{
struct cpu_hw_counters *cpuc;
irq_enter();
apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
ack_APIC_irq();
- __smp_perf_counter_interrupt(regs, 0);
+ x86_pmu.handle_irq(regs, 0);
irq_exit();
}
+void smp_perf_pending_interrupt(struct pt_regs *regs)
+{
+ irq_enter();
+ ack_APIC_irq();
+ inc_irq_stat(apic_pending_irqs);
+ perf_counter_do_pending();
+ irq_exit();
+}
+
+void set_perf_counter_pending(void)
+{
+ apic->send_IPI_self(LOCAL_PENDING_VECTOR);
+}
+
void perf_counters_lapic_init(int nmi)
{
u32 apic_val;
regs = args->regs;
apic_write(APIC_LVTPC, APIC_DM_NMI);
- ret = __smp_perf_counter_interrupt(regs, 1);
+ ret = x86_pmu.handle_irq(regs, 1);
return ret ? NOTIFY_STOP : NOTIFY_OK;
}
.priority = 1
};
-static struct pmc_x86_ops pmc_intel_ops = {
- .save_disable_all = pmc_intel_save_disable_all,
- .restore_all = pmc_intel_restore_all,
- .get_status = pmc_intel_get_status,
- .ack_status = pmc_intel_ack_status,
- .enable = pmc_intel_enable,
- .disable = pmc_intel_disable,
+static struct x86_pmu intel_pmu = {
+ .handle_irq = intel_pmu_handle_irq,
+ .save_disable_all = intel_pmu_save_disable_all,
+ .restore_all = intel_pmu_restore_all,
+ .enable = intel_pmu_enable_counter,
+ .disable = intel_pmu_disable_counter,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = pmc_intel_event_map,
- .raw_event = pmc_intel_raw_event,
+ .event_map = intel_pmu_event_map,
+ .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
};
-static struct pmc_x86_ops pmc_amd_ops = {
- .save_disable_all = pmc_amd_save_disable_all,
- .restore_all = pmc_amd_restore_all,
- .get_status = pmc_amd_get_status,
- .ack_status = pmc_amd_ack_status,
- .enable = pmc_amd_enable,
- .disable = pmc_amd_disable,
+static struct x86_pmu amd_pmu = {
+ .handle_irq = amd_pmu_handle_irq,
+ .save_disable_all = amd_pmu_save_disable_all,
+ .restore_all = amd_pmu_restore_all,
+ .enable = amd_pmu_enable_counter,
+ .disable = amd_pmu_disable_counter,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
- .event_map = pmc_amd_event_map,
- .raw_event = pmc_amd_raw_event,
+ .event_map = amd_pmu_event_map,
+ .raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
};
-static struct pmc_x86_ops *pmc_intel_init(void)
+static int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
unsigned int unused;
unsigned int ebx;
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return -ENODEV;
+
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired Event or not.
*/
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
- return NULL;
+ return -ENODEV;
intel_perfmon_version = eax.split.version_id;
if (intel_perfmon_version < 2)
- return NULL;
+ return -ENODEV;
pr_info("Intel Performance Monitoring support detected.\n");
pr_info("... version: %d\n", intel_perfmon_version);
pr_info("... bit width: %d\n", eax.split.bit_width);
pr_info("... mask length: %d\n", eax.split.mask_length);
+ x86_pmu = intel_pmu;
+
nr_counters_generic = eax.split.num_counters;
nr_counters_fixed = edx.split.num_counters_fixed;
counter_value_mask = (1ULL << eax.split.bit_width) - 1;
- return &pmc_intel_ops;
+ return 0;
}
-static struct pmc_x86_ops *pmc_amd_init(void)
+static int amd_pmu_init(void)
{
+ x86_pmu = amd_pmu;
+
nr_counters_generic = 4;
nr_counters_fixed = 0;
counter_value_mask = 0x0000FFFFFFFFFFFFULL;
counter_value_bits = 48;
pr_info("AMD Performance Monitoring support detected.\n");
-
- return &pmc_amd_ops;
+ return 0;
}
void __init init_hw_perf_counters(void)
{
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return;
+ int err;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
- pmc_ops = pmc_intel_init();
+ err = intel_pmu_init();
break;
case X86_VENDOR_AMD:
- pmc_ops = pmc_amd_init();
+ err = amd_pmu_init();
break;
+ default:
+ return;
}
- if (!pmc_ops)
+ if (err != 0)
return;
pr_info("... num counters: %d\n", nr_counters_generic);
register_die_notifier(&perf_counter_nmi_notifier);
}
-static void pmc_generic_read(struct perf_counter *counter)
+static void x86_pmu_read(struct perf_counter *counter)
{
x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}
-static const struct hw_perf_counter_ops x86_perf_counter_ops = {
- .enable = pmc_generic_enable,
- .disable = pmc_generic_disable,
- .read = pmc_generic_read,
+static const struct pmu pmu = {
+ .enable = x86_pmu_enable,
+ .disable = x86_pmu_disable,
+ .read = x86_pmu_read,
};
-const struct hw_perf_counter_ops *
-hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
int err;
if (err)
return ERR_PTR(err);
- return &x86_perf_counter_ops;
+ return &pmu;
+}
+
+/*
+ * callchain support
+ */
+
+static inline
+void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
+{
+ if (entry->nr < MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
+static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
+
+
+static void
+backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+ /* Ignore warnings */
+}
+
+static void backtrace_warning(void *data, char *msg)
+{
+ /* Ignore warnings */
+}
+
+static int backtrace_stack(void *data, char *name)
+{
+ /* Don't bother with IRQ stacks for now */
+ return -1;
+}
+
+static void backtrace_address(void *data, unsigned long addr, int reliable)
+{
+ struct perf_callchain_entry *entry = data;
+
+ if (reliable)
+ callchain_store(entry, addr);
+}
+
+static const struct stacktrace_ops backtrace_ops = {
+ .warning = backtrace_warning,
+ .warning_symbol = backtrace_warning_symbol,
+ .stack = backtrace_stack,
+ .address = backtrace_address,
+};
+
+static void
+perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ unsigned long bp;
+ char *stack;
+ int nr = entry->nr;
+
+ callchain_store(entry, instruction_pointer(regs));
+
+ stack = ((char *)regs + sizeof(struct pt_regs));
+#ifdef CONFIG_FRAME_POINTER
+ bp = frame_pointer(regs);
+#else
+ bp = 0;
+#endif
+
+ dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
+
+ entry->kernel = entry->nr - nr;
+}
+
+
+struct stack_frame {
+ const void __user *next_fp;
+ unsigned long return_address;
+};
+
+static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+{
+ int ret;
+
+ if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
+ return 0;
+
+ ret = 1;
+ pagefault_disable();
+ if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
+ ret = 0;
+ pagefault_enable();
+
+ return ret;
+}
+
+static void
+perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ struct stack_frame frame;
+ const void __user *fp;
+ int nr = entry->nr;
+
+ regs = (struct pt_regs *)current->thread.sp0 - 1;
+ fp = (void __user *)regs->bp;
+
+ callchain_store(entry, regs->ip);
+
+ while (entry->nr < MAX_STACK_DEPTH) {
+ frame.next_fp = NULL;
+ frame.return_address = 0;
+
+ if (!copy_stack_frame(fp, &frame))
+ break;
+
+ if ((unsigned long)fp < user_stack_pointer(regs))
+ break;
+
+ callchain_store(entry, frame.return_address);
+ fp = frame.next_fp;
+ }
+
+ entry->user = entry->nr - nr;
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || current->pid == 0)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user)
+ perf_callchain_kernel(regs, entry);
+
+ if (current->mm)
+ perf_callchain_user(regs, entry);
+}
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry;
+
+ if (in_nmi())
+ entry = &__get_cpu_var(nmi_entry);
+ else
+ entry = &__get_cpu_var(irq_entry);
+
+ entry->nr = 0;
+ entry->hv = 0;
+ entry->kernel = 0;
+ entry->user = 0;
+
+ perf_do_callchain(regs, entry);
+
+ return entry;
}