|
@@ -23,7 +23,7 @@ static DEFINE_PER_CPU(unsigned long, touch_timestamp);
|
|
|
static DEFINE_PER_CPU(unsigned long, print_timestamp);
|
|
|
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
|
|
|
|
|
|
-static int did_panic = 0;
|
|
|
+static int did_panic;
|
|
|
|
|
|
static int
|
|
|
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
|
|
@@ -121,7 +121,7 @@ void softlockup_tick(void)
|
|
|
/*
|
|
|
* The watchdog thread - runs every second and touches the timestamp.
|
|
|
*/
|
|
|
-static int watchdog(void * __bind_cpu)
|
|
|
+static int watchdog(void *__bind_cpu)
|
|
|
{
|
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
|
|
|
|
@@ -159,13 +159,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
BUG_ON(per_cpu(watchdog_task, hotcpu));
|
|
|
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
|
|
if (IS_ERR(p)) {
|
|
|
- printk("watchdog for %i failed\n", hotcpu);
|
|
|
+ printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
|
|
return NOTIFY_BAD;
|
|
|
}
|
|
|
- per_cpu(touch_timestamp, hotcpu) = 0;
|
|
|
- per_cpu(watchdog_task, hotcpu) = p;
|
|
|
+ per_cpu(touch_timestamp, hotcpu) = 0;
|
|
|
+ per_cpu(watchdog_task, hotcpu) = p;
|
|
|
kthread_bind(p, hotcpu);
|
|
|
- break;
|
|
|
+ break;
|
|
|
case CPU_ONLINE:
|
|
|
case CPU_ONLINE_FROZEN:
|
|
|
wake_up_process(per_cpu(watchdog_task, hotcpu));
|
|
@@ -185,7 +185,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
kthread_stop(p);
|
|
|
break;
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
- }
|
|
|
+ }
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|