|
@@ -152,6 +152,40 @@ static struct irq_chip cpu_interrupt_type = {
|
|
|
.irq_retrigger = NULL,
|
|
|
};
|
|
|
|
|
|
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
|
+#define irq_stats(x) (&per_cpu(irq_stat, x))
|
|
|
+
|
|
|
+/*
|
|
|
+ * /proc/interrupts printing for arch specific interrupts
|
|
|
+ */
|
|
|
+int arch_show_interrupts(struct seq_file *p, int prec)
|
|
|
+{
|
|
|
+ int j;
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
|
+ seq_printf(p, "%*s: ", prec, "STK");
|
|
|
+ for_each_online_cpu(j)
|
|
|
+ seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
|
|
|
+ seq_printf(p, " Kernel stack usage\n");
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ seq_printf(p, "%*s: ", prec, "RES");
|
|
|
+ for_each_online_cpu(j)
|
|
|
+ seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
|
|
+ seq_printf(p, " Rescheduling interrupts\n");
|
|
|
+ seq_printf(p, "%*s: ", prec, "CAL");
|
|
|
+ for_each_online_cpu(j)
|
|
|
+ seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
|
|
|
+ irq_stats(j)->irq_tlb_count);
|
|
|
+ seq_printf(p, " Function call interrupts\n");
|
|
|
+ seq_printf(p, "%*s: ", prec, "TLB");
|
|
|
+ for_each_online_cpu(j)
|
|
|
+ seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
|
|
+ seq_printf(p, " TLB shootdowns\n");
|
|
|
+#endif
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
|
{
|
|
|
int i = *(loff_t *) v, j;
|
|
@@ -219,6 +253,9 @@ int show_interrupts(struct seq_file *p, void *v)
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
}
|
|
|
|
|
|
+ if (i == NR_IRQS)
|
|
|
+ arch_show_interrupts(p, 3);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -340,13 +377,22 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|
|
/* Our stack starts directly behind the thread_info struct. */
|
|
|
unsigned long stack_start = (unsigned long) current_thread_info();
|
|
|
unsigned long sp = regs->gr[30];
|
|
|
+ unsigned long stack_usage;
|
|
|
+ unsigned int *last_usage;
|
|
|
|
|
|
/* if sr7 != 0, we interrupted a userspace process which we do not want
|
|
|
* to check for stack overflow. We will only check the kernel stack. */
|
|
|
if (regs->sr[7])
|
|
|
return;
|
|
|
|
|
|
- if (likely((sp - stack_start) < (THREAD_SIZE - STACK_MARGIN)))
|
|
|
+ /* calculate kernel stack usage */
|
|
|
+ stack_usage = sp - stack_start;
|
|
|
+ last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
|
|
|
+
|
|
|
+ if (unlikely(stack_usage > *last_usage))
|
|
|
+ *last_usage = stack_usage;
|
|
|
+
|
|
|
+ if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
|
|
|
return;
|
|
|
|
|
|
pr_emerg("stackcheck: %s will most likely overflow kernel stack "
|