{
struct perf_event *event = per_cpu(watchdog_ev, cpu);
- if (event) {
+ if (event)
perf_event_disable(event);
- per_cpu(watchdog_ev, cpu) = NULL;
- /* should be in cleanup, but blocks oprofile */
- perf_event_release_kernel(event);
- }
return;
}
#else
/*
* Create/destroy watchdog threads as CPUs come and go:
*/
-static int __cpuinit
+static int
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cpu_nfb = {
+static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
+void lockup_detector_bootcpu_resume(void)
+{
+ void *cpu = (void *)(long)smp_processor_id();
+
+ /*
+ * On the suspend/resume path the boot CPU does not go though the
+ * offline->online transition. This breaks the NMI detector post
+ * resume. Force an offline->online transition as a workaround.
+ */
+ cpu_callback(&cpu_nfb, CPU_DEAD, cpu);
+ cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+
+ return;
+}
+
void __init lockup_detector_init(void)
{
void *cpu = (void *)(long)smp_processor_id();