|
@@ -2287,7 +2287,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
|
|
|
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
|
|
-static DEFINE_PER_CPU(int, in_nmi_frame);
|
|
|
|
|
|
+static DEFINE_PER_CPU(int, in_ignored_frame);
|
|
|
|
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -2303,8 +2303,9 @@ static void backtrace_warning(void *data, char *msg)
|
|
|
|
|
|
static int backtrace_stack(void *data, char *name)
|
|
static int backtrace_stack(void *data, char *name)
|
|
{
|
|
{
|
|
- per_cpu(in_nmi_frame, smp_processor_id()) =
|
|
|
|
- x86_is_stack_id(NMI_STACK, name);
|
|
|
|
|
|
+ per_cpu(in_ignored_frame, smp_processor_id()) =
|
|
|
|
+ x86_is_stack_id(NMI_STACK, name) ||
|
|
|
|
+ x86_is_stack_id(DEBUG_STACK, name);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2313,7 +2314,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
|
|
{
|
|
{
|
|
struct perf_callchain_entry *entry = data;
|
|
struct perf_callchain_entry *entry = data;
|
|
|
|
|
|
- if (per_cpu(in_nmi_frame, smp_processor_id()))
|
|
|
|
|
|
+ if (per_cpu(in_ignored_frame, smp_processor_id()))
|
|
return;
|
|
return;
|
|
|
|
|
|
if (reliable)
|
|
if (reliable)
|