|
@@ -78,22 +78,22 @@ static __init void nmi_cpu_busy(void *data)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-int __init check_nmi_watchdog (void)
|
|
|
|
|
|
+int __init check_nmi_watchdog(void)
|
|
{
|
|
{
|
|
- int *counts;
|
|
|
|
|
|
+ int *prev_nmi_count;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
- if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
|
|
|
|
|
|
+ if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
if (!atomic_read(&nmi_active))
|
|
if (!atomic_read(&nmi_active))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
|
|
|
|
- if (!counts)
|
|
|
|
|
|
+ prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
|
|
|
|
+ if (!prev_nmi_count)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- printk(KERN_INFO "testing NMI watchdog ... ");
|
|
|
|
|
|
+ printk(KERN_INFO "Testing NMI watchdog ... ");
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
|
@@ -101,30 +101,29 @@ int __init check_nmi_watchdog (void)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
|
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
|
- counts[cpu] = cpu_pda(cpu)->__nmi_count;
|
|
|
|
|
|
+ prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count;
|
|
local_irq_enable();
|
|
local_irq_enable();
|
|
mdelay((20*1000)/nmi_hz); // wait 20 ticks
|
|
mdelay((20*1000)/nmi_hz); // wait 20 ticks
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
for_each_online_cpu(cpu) {
|
|
if (!per_cpu(wd_enabled, cpu))
|
|
if (!per_cpu(wd_enabled, cpu))
|
|
continue;
|
|
continue;
|
|
- if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
|
|
|
|
|
|
+ if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) {
|
|
printk(KERN_WARNING "WARNING: CPU#%d: NMI "
|
|
printk(KERN_WARNING "WARNING: CPU#%d: NMI "
|
|
"appears to be stuck (%d->%d)!\n",
|
|
"appears to be stuck (%d->%d)!\n",
|
|
- cpu,
|
|
|
|
- counts[cpu],
|
|
|
|
- cpu_pda(cpu)->__nmi_count);
|
|
|
|
|
|
+ cpu,
|
|
|
|
+ prev_nmi_count[cpu],
|
|
|
|
+ cpu_pda(cpu)->__nmi_count);
|
|
per_cpu(wd_enabled, cpu) = 0;
|
|
per_cpu(wd_enabled, cpu) = 0;
|
|
atomic_dec(&nmi_active);
|
|
atomic_dec(&nmi_active);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ endflag = 1;
|
|
if (!atomic_read(&nmi_active)) {
|
|
if (!atomic_read(&nmi_active)) {
|
|
- kfree(counts);
|
|
|
|
|
|
+ kfree(prev_nmi_count);
|
|
atomic_set(&nmi_active, -1);
|
|
atomic_set(&nmi_active, -1);
|
|
- endflag = 1;
|
|
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
- endflag = 1;
|
|
|
|
printk("OK.\n");
|
|
printk("OK.\n");
|
|
|
|
|
|
/* now that we know it works we can reduce NMI frequency to
|
|
/* now that we know it works we can reduce NMI frequency to
|
|
@@ -132,7 +131,7 @@ int __init check_nmi_watchdog (void)
|
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
|
nmi_hz = lapic_adjust_nmi_hz(1);
|
|
nmi_hz = lapic_adjust_nmi_hz(1);
|
|
|
|
|
|
- kfree(counts);
|
|
|
|
|
|
+ kfree(prev_nmi_count);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -159,34 +158,6 @@ static int __init setup_nmi_watchdog(char *str)
|
|
|
|
|
|
__setup("nmi_watchdog=", setup_nmi_watchdog);
|
|
__setup("nmi_watchdog=", setup_nmi_watchdog);
|
|
|
|
|
|
-
|
|
|
|
-static void __acpi_nmi_disable(void *__unused)
|
|
|
|
-{
|
|
|
|
- apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Disable timer based NMIs on all CPUs:
|
|
|
|
- */
|
|
|
|
-void acpi_nmi_disable(void)
|
|
|
|
-{
|
|
|
|
- if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
|
|
|
- on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void __acpi_nmi_enable(void *__unused)
|
|
|
|
-{
|
|
|
|
- apic_write(APIC_LVT0, APIC_DM_NMI);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Enable timer based NMIs on all CPUs:
|
|
|
|
- */
|
|
|
|
-void acpi_nmi_enable(void)
|
|
|
|
-{
|
|
|
|
- if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
|
|
|
- on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
|
|
|
|
-}
|
|
|
|
#ifdef CONFIG_PM
|
|
#ifdef CONFIG_PM
|
|
|
|
|
|
static int nmi_pm_active; /* nmi_active before suspend */
|
|
static int nmi_pm_active; /* nmi_active before suspend */
|
|
@@ -217,7 +188,7 @@ static struct sysdev_class nmi_sysclass = {
|
|
};
|
|
};
|
|
|
|
|
|
static struct sys_device device_lapic_nmi = {
|
|
static struct sys_device device_lapic_nmi = {
|
|
- .id = 0,
|
|
|
|
|
|
+ .id = 0,
|
|
.cls = &nmi_sysclass,
|
|
.cls = &nmi_sysclass,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -231,7 +202,7 @@ static int __init init_lapic_nmi_sysfs(void)
|
|
if (nmi_watchdog != NMI_LOCAL_APIC)
|
|
if (nmi_watchdog != NMI_LOCAL_APIC)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if ( atomic_read(&nmi_active) < 0 )
|
|
|
|
|
|
+ if (atomic_read(&nmi_active) < 0)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
error = sysdev_class_register(&nmi_sysclass);
|
|
error = sysdev_class_register(&nmi_sysclass);
|
|
@@ -244,9 +215,37 @@ late_initcall(init_lapic_nmi_sysfs);
|
|
|
|
|
|
#endif /* CONFIG_PM */
|
|
#endif /* CONFIG_PM */
|
|
|
|
|
|
|
|
+static void __acpi_nmi_enable(void *__unused)
|
|
|
|
+{
|
|
|
|
+ apic_write(APIC_LVT0, APIC_DM_NMI);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Enable timer based NMIs on all CPUs:
|
|
|
|
+ */
|
|
|
|
+void acpi_nmi_enable(void)
|
|
|
|
+{
|
|
|
|
+ if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
|
|
|
+ on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __acpi_nmi_disable(void *__unused)
|
|
|
|
+{
|
|
|
|
+ apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Disable timer based NMIs on all CPUs:
|
|
|
|
+ */
|
|
|
|
+void acpi_nmi_disable(void)
|
|
|
|
+{
|
|
|
|
+ if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
|
|
|
+ on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
void setup_apic_nmi_watchdog(void *unused)
|
|
void setup_apic_nmi_watchdog(void *unused)
|
|
{
|
|
{
|
|
- if (__get_cpu_var(wd_enabled) == 1)
|
|
|
|
|
|
+ if (__get_cpu_var(wd_enabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
/* cheap hack to support suspend/resume */
|
|
/* cheap hack to support suspend/resume */
|
|
@@ -311,8 +310,9 @@ void touch_nmi_watchdog(void)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- touch_softlockup_watchdog();
|
|
|
|
|
|
+ touch_softlockup_watchdog();
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL(touch_nmi_watchdog);
|
|
|
|
|
|
int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
|
int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
|
{
|
|
{
|
|
@@ -479,4 +479,3 @@ void __trigger_all_cpu_backtrace(void)
|
|
|
|
|
|
EXPORT_SYMBOL(nmi_active);
|
|
EXPORT_SYMBOL(nmi_active);
|
|
EXPORT_SYMBOL(nmi_watchdog);
|
|
EXPORT_SYMBOL(nmi_watchdog);
|
|
-EXPORT_SYMBOL(touch_nmi_watchdog);
|
|
|