|
@@ -61,6 +61,26 @@ static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
|
|
|
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
|
|
|
#endif
|
|
|
|
|
|
+static void stack_overflow(void)
|
|
|
+{
|
|
|
+ printk("low stack detected by irq handler\n");
|
|
|
+ dump_stack();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void call_on_stack2(void *func, void *stack,
|
|
|
+ unsigned long arg1, unsigned long arg2)
|
|
|
+{
|
|
|
+ unsigned long bx;
|
|
|
+ asm volatile(
|
|
|
+ " xchgl %%ebx,%%esp \n"
|
|
|
+ " call *%%edi \n"
|
|
|
+ " movl %%ebx,%%esp \n"
|
|
|
+ : "=a" (arg1), "=d" (arg2), "=b" (bx)
|
|
|
+ : "0" (arg1), "1" (arg2), "2" (stack),
|
|
|
+ "D" (func)
|
|
|
+ : "memory", "cc", "ecx");
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* do_IRQ handles all normal device IRQ's (the special
|
|
|
* SMP cross-CPU interrupts have their own specific
|
|
@@ -76,6 +96,7 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|
|
union irq_ctx *curctx, *irqctx;
|
|
|
u32 *isp;
|
|
|
#endif
|
|
|
+ int overflow = 0;
|
|
|
|
|
|
if (unlikely((unsigned)irq >= NR_IRQS)) {
|
|
|
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
|
@@ -92,11 +113,8 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|
|
|
|
|
__asm__ __volatile__("andl %%esp,%0" :
|
|
|
"=r" (sp) : "0" (THREAD_SIZE - 1));
|
|
|
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
|
|
|
- printk("do_IRQ: stack overflow: %ld\n",
|
|
|
- sp - sizeof(struct thread_info));
|
|
|
- dump_stack();
|
|
|
- }
|
|
|
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN)))
|
|
|
+ overflow = 1;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -112,8 +130,6 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|
|
* current stack (which is the irq stack already after all)
|
|
|
*/
|
|
|
if (curctx != irqctx) {
|
|
|
- int arg1, arg2, bx;
|
|
|
-
|
|
|
/* build the stack frame on the IRQ stack */
|
|
|
isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
|
|
|
irqctx->tinfo.task = curctx->tinfo.task;
|
|
@@ -127,18 +143,19 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|
|
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
|
|
|
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
|
|
|
|
|
|
- asm volatile(
|
|
|
- " xchgl %%ebx,%%esp \n"
|
|
|
- " call *%%edi \n"
|
|
|
- " movl %%ebx,%%esp \n"
|
|
|
- : "=a" (arg1), "=d" (arg2), "=b" (bx)
|
|
|
- : "0" (irq), "1" (desc), "2" (isp),
|
|
|
- "D" (desc->handle_irq)
|
|
|
- : "memory", "cc", "ecx"
|
|
|
- );
|
|
|
+ /* Execute warning on interrupt stack */
|
|
|
+ if (unlikely(overflow))
|
|
|
+ call_on_stack2(stack_overflow, isp, 0, 0);
|
|
|
+
|
|
|
+ call_on_stack2(desc->handle_irq, isp, irq, (unsigned long)desc);
|
|
|
} else
|
|
|
#endif
|
|
|
+ {
|
|
|
+ /* AK: Slightly bogus here */
|
|
|
+ if (overflow)
|
|
|
+ stack_overflow();
|
|
|
desc->handle_irq(irq, desc);
|
|
|
+ }
|
|
|
|
|
|
irq_exit();
|
|
|
set_irq_regs(old_regs);
|