|
@@ -6,15 +6,58 @@
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/sched.h>
|
|
|
+#include <linux/smp.h>
|
|
|
+#include <linux/atomic.h>
|
|
|
+
|
|
|
+static void __dump_stack(void)
|
|
|
+{
|
|
|
+ dump_stack_print_info(KERN_DEFAULT);
|
|
|
+ show_stack(NULL, NULL);
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* dump_stack - dump the current task information and its stack trace
|
|
|
*
|
|
|
* Architectures can override this implementation by implementing its own.
|
|
|
*/
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+static atomic_t dump_lock = ATOMIC_INIT(-1);
|
|
|
+
|
|
|
void dump_stack(void)
|
|
|
{
|
|
|
- dump_stack_print_info(KERN_DEFAULT);
|
|
|
- show_stack(NULL, NULL);
|
|
|
+ int was_locked;
|
|
|
+ int old;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Permit this cpu to perform nested stack dumps while serialising
|
|
|
+ * against other CPUs
|
|
|
+ */
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+retry:
|
|
|
+ cpu = smp_processor_id();
|
|
|
+ old = atomic_cmpxchg(&dump_lock, -1, cpu);
|
|
|
+ if (old == -1) {
|
|
|
+ was_locked = 0;
|
|
|
+ } else if (old == cpu) {
|
|
|
+ was_locked = 1;
|
|
|
+ } else {
|
|
|
+ cpu_relax();
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+
|
|
|
+ __dump_stack();
|
|
|
+
|
|
|
+ if (!was_locked)
|
|
|
+ atomic_set(&dump_lock, -1);
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+#else
|
|
|
+void dump_stack(void)
|
|
|
+{
|
|
|
+ __dump_stack();
|
|
|
}
|
|
|
+#endif
|
|
|
EXPORT_SYMBOL(dump_stack);
|