|
@@ -100,13 +100,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|
|
irqctx->tinfo.task = curctx->tinfo.task;
|
|
|
irqctx->tinfo.previous_esp = current_stack_pointer;
|
|
|
|
|
|
- /*
|
|
|
- * Copy the softirq bits in preempt_count so that the
|
|
|
- * softirq checks work in the hardirq context.
|
|
|
- */
|
|
|
- irqctx->tinfo.preempt_count =
|
|
|
- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
|
|
|
- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
|
|
|
+ /* Copy the preempt_count so that the [soft]irq checks work. */
|
|
|
+ irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
|
|
|
|
|
|
if (unlikely(overflow))
|
|
|
call_on_stack(print_stack_overflow, isp);
|
|
@@ -196,7 +191,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|
|
if (unlikely(!desc))
|
|
|
return false;
|
|
|
|
|
|
- if (!execute_on_irq_stack(overflow, desc, irq)) {
|
|
|
+ if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
|
|
|
if (unlikely(overflow))
|
|
|
print_stack_overflow();
|
|
|
desc->handle_irq(irq, desc);
|