|
@@ -22,9 +22,9 @@
|
|
|
|
|
|
static DEFINE_SPINLOCK(print_lock);
|
|
static DEFINE_SPINLOCK(print_lock);
|
|
|
|
|
|
-static DEFINE_PER_CPU(unsigned long, touch_timestamp);
|
|
|
|
-static DEFINE_PER_CPU(unsigned long, print_timestamp);
|
|
|
|
-static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
|
|
|
|
|
|
+static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
|
|
|
|
+static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
|
|
|
|
+static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
|
|
|
|
|
|
static int __read_mostly did_panic;
|
|
static int __read_mostly did_panic;
|
|
int __read_mostly softlockup_thresh = 60;
|
|
int __read_mostly softlockup_thresh = 60;
|
|
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
|
|
{
|
|
{
|
|
int this_cpu = raw_smp_processor_id();
|
|
int this_cpu = raw_smp_processor_id();
|
|
|
|
|
|
- __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
|
|
|
|
|
|
+ __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
|
|
}
|
|
}
|
|
|
|
|
|
void touch_softlockup_watchdog(void)
|
|
void touch_softlockup_watchdog(void)
|
|
{
|
|
{
|
|
- __raw_get_cpu_var(touch_timestamp) = 0;
|
|
|
|
|
|
+ __raw_get_cpu_var(softlockup_touch_ts) = 0;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
|
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
|
|
|
|
|
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
|
|
|
|
|
|
/* Cause each CPU to re-update its timestamp rather than complain */
|
|
/* Cause each CPU to re-update its timestamp rather than complain */
|
|
for_each_online_cpu(cpu)
|
|
for_each_online_cpu(cpu)
|
|
- per_cpu(touch_timestamp, cpu) = 0;
|
|
|
|
|
|
+ per_cpu(softlockup_touch_ts, cpu) = 0;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
|
|
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
|
|
|
|
|
|
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
|
|
void softlockup_tick(void)
|
|
void softlockup_tick(void)
|
|
{
|
|
{
|
|
int this_cpu = smp_processor_id();
|
|
int this_cpu = smp_processor_id();
|
|
- unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
|
|
|
|
- unsigned long print_timestamp;
|
|
|
|
|
|
+ unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
|
|
|
|
+ unsigned long print_ts;
|
|
struct pt_regs *regs = get_irq_regs();
|
|
struct pt_regs *regs = get_irq_regs();
|
|
unsigned long now;
|
|
unsigned long now;
|
|
|
|
|
|
/* Is detection switched off? */
|
|
/* Is detection switched off? */
|
|
- if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
|
|
|
|
|
|
+ if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
|
|
/* Be sure we don't false trigger if switched back on */
|
|
/* Be sure we don't false trigger if switched back on */
|
|
- if (touch_timestamp)
|
|
|
|
- per_cpu(touch_timestamp, this_cpu) = 0;
|
|
|
|
|
|
+ if (touch_ts)
|
|
|
|
+ per_cpu(softlockup_touch_ts, this_cpu) = 0;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- if (touch_timestamp == 0) {
|
|
|
|
|
|
+ if (touch_ts == 0) {
|
|
__touch_softlockup_watchdog();
|
|
__touch_softlockup_watchdog();
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- print_timestamp = per_cpu(print_timestamp, this_cpu);
|
|
|
|
|
|
+ print_ts = per_cpu(softlockup_print_ts, this_cpu);
|
|
|
|
|
|
/* report at most once a second */
|
|
/* report at most once a second */
|
|
- if (print_timestamp == touch_timestamp || did_panic)
|
|
|
|
|
|
+ if (print_ts == touch_ts || did_panic)
|
|
return;
|
|
return;
|
|
|
|
|
|
/* do not print during early bootup: */
|
|
/* do not print during early bootup: */
|
|
@@ -140,18 +140,18 @@ void softlockup_tick(void)
|
|
* Wake up the high-prio watchdog task twice per
|
|
* Wake up the high-prio watchdog task twice per
|
|
* threshold timespan.
|
|
* threshold timespan.
|
|
*/
|
|
*/
|
|
- if (now > touch_timestamp + softlockup_thresh/2)
|
|
|
|
- wake_up_process(per_cpu(watchdog_task, this_cpu));
|
|
|
|
|
|
+ if (now > touch_ts + softlockup_thresh/2)
|
|
|
|
+ wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
|
|
|
|
|
|
/* Warn about unreasonable delays: */
|
|
/* Warn about unreasonable delays: */
|
|
- if (now <= (touch_timestamp + softlockup_thresh))
|
|
|
|
|
|
+ if (now <= (touch_ts + softlockup_thresh))
|
|
return;
|
|
return;
|
|
|
|
|
|
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
|
|
|
|
|
|
+ per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
|
|
|
|
|
|
spin_lock(&print_lock);
|
|
spin_lock(&print_lock);
|
|
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
|
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
|
- this_cpu, now - touch_timestamp,
|
|
|
|
|
|
+ this_cpu, now - touch_ts,
|
|
current->comm, task_pid_nr(current));
|
|
current->comm, task_pid_nr(current));
|
|
print_modules();
|
|
print_modules();
|
|
print_irqtrace_events(current);
|
|
print_irqtrace_events(current);
|
|
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
switch (action) {
|
|
switch (action) {
|
|
case CPU_UP_PREPARE:
|
|
case CPU_UP_PREPARE:
|
|
case CPU_UP_PREPARE_FROZEN:
|
|
case CPU_UP_PREPARE_FROZEN:
|
|
- BUG_ON(per_cpu(watchdog_task, hotcpu));
|
|
|
|
|
|
+ BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
|
|
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
|
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
|
if (IS_ERR(p)) {
|
|
if (IS_ERR(p)) {
|
|
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
|
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
|
return NOTIFY_BAD;
|
|
return NOTIFY_BAD;
|
|
}
|
|
}
|
|
- per_cpu(touch_timestamp, hotcpu) = 0;
|
|
|
|
- per_cpu(watchdog_task, hotcpu) = p;
|
|
|
|
|
|
+ per_cpu(softlockup_touch_ts, hotcpu) = 0;
|
|
|
|
+ per_cpu(softlockup_watchdog, hotcpu) = p;
|
|
kthread_bind(p, hotcpu);
|
|
kthread_bind(p, hotcpu);
|
|
break;
|
|
break;
|
|
case CPU_ONLINE:
|
|
case CPU_ONLINE:
|
|
case CPU_ONLINE_FROZEN:
|
|
case CPU_ONLINE_FROZEN:
|
|
- wake_up_process(per_cpu(watchdog_task, hotcpu));
|
|
|
|
|
|
+ wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
|
|
break;
|
|
break;
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
case CPU_UP_CANCELED:
|
|
case CPU_UP_CANCELED:
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
- if (!per_cpu(watchdog_task, hotcpu))
|
|
|
|
|
|
+ if (!per_cpu(softlockup_watchdog, hotcpu))
|
|
break;
|
|
break;
|
|
/* Unbind so it can run. Fall thru. */
|
|
/* Unbind so it can run. Fall thru. */
|
|
- kthread_bind(per_cpu(watchdog_task, hotcpu),
|
|
|
|
|
|
+ kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
|
|
cpumask_any(cpu_online_mask));
|
|
cpumask_any(cpu_online_mask));
|
|
case CPU_DEAD:
|
|
case CPU_DEAD:
|
|
case CPU_DEAD_FROZEN:
|
|
case CPU_DEAD_FROZEN:
|
|
- p = per_cpu(watchdog_task, hotcpu);
|
|
|
|
- per_cpu(watchdog_task, hotcpu) = NULL;
|
|
|
|
|
|
+ p = per_cpu(softlockup_watchdog, hotcpu);
|
|
|
|
+ per_cpu(softlockup_watchdog, hotcpu) = NULL;
|
|
kthread_stop(p);
|
|
kthread_stop(p);
|
|
break;
|
|
break;
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|