|
@@ -29,7 +29,6 @@
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
#include <trace/events/irq.h>
|
|
|
|
|
|
-#include <asm/irq.h>
|
|
|
/*
|
|
|
- No shared variables, all the data are CPU local.
|
|
|
- If a softirq needs serialization, let it serialize itself
|
|
@@ -134,7 +133,6 @@ EXPORT_SYMBOL(local_bh_disable);
|
|
|
|
|
|
static void __local_bh_enable(unsigned int cnt)
|
|
|
{
|
|
|
- WARN_ON_ONCE(in_irq());
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
|
|
if (softirq_count() == cnt)
|
|
@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt)
|
|
|
*/
|
|
|
void _local_bh_enable(void)
|
|
|
{
|
|
|
+ WARN_ON_ONCE(in_irq());
|
|
|
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
|
|
|
}
|
|
|
|
|
@@ -171,8 +170,13 @@ static inline void _local_bh_enable_ip(unsigned long ip)
|
|
|
*/
|
|
|
sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
|
|
|
|
|
|
- if (unlikely(!in_interrupt() && local_softirq_pending()))
|
|
|
+ if (unlikely(!in_interrupt() && local_softirq_pending())) {
|
|
|
+ /*
|
|
|
+ * Run softirq if any pending. And do it in its own stack
|
|
|
+ * as we may be calling this deep in a task call stack already.
|
|
|
+ */
|
|
|
do_softirq();
|
|
|
+ }
|
|
|
|
|
|
dec_preempt_count();
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
@@ -280,10 +284,11 @@ restart:
|
|
|
|
|
|
account_irq_exit_time(current);
|
|
|
__local_bh_enable(SOFTIRQ_OFFSET);
|
|
|
+ WARN_ON_ONCE(in_interrupt());
|
|
|
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
|
|
|
}
|
|
|
|
|
|
-#ifndef __ARCH_HAS_DO_SOFTIRQ
|
|
|
+
|
|
|
|
|
|
asmlinkage void do_softirq(void)
|
|
|
{
|
|
@@ -298,13 +303,11 @@ asmlinkage void do_softirq(void)
|
|
|
pending = local_softirq_pending();
|
|
|
|
|
|
if (pending)
|
|
|
- __do_softirq();
|
|
|
+ do_softirq_own_stack();
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
* Enter an interrupt context.
|
|
|
*/
|
|
@@ -329,15 +332,21 @@ void irq_enter(void)
|
|
|
static inline void invoke_softirq(void)
|
|
|
{
|
|
|
if (!force_irqthreads) {
|
|
|
+#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
|
|
/*
|
|
|
* We can safely execute softirq on the current stack if
|
|
|
* it is the irq stack, because it should be near empty
|
|
|
- * at this stage. But we have no way to know if the arch
|
|
|
- * calls irq_exit() on the irq stack. So call softirq
|
|
|
- * in its own stack to prevent from any overrun on top
|
|
|
- * of a potentially deep task stack.
|
|
|
+ * at this stage.
|
|
|
*/
|
|
|
- do_softirq();
|
|
|
+ __do_softirq();
|
|
|
+#else
|
|
|
+ /*
|
|
|
+ * Otherwise, irq_exit() is called on the task stack that can
|
|
|
+ * be potentially deep already. So call softirq in its own stack
|
|
|
+ * to prevent from any overrun.
|
|
|
+ */
|
|
|
+ do_softirq_own_stack();
|
|
|
+#endif
|
|
|
} else {
|
|
|
wakeup_softirqd();
|
|
|
}
|
|
@@ -771,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu)
|
|
|
{
|
|
|
local_irq_disable();
|
|
|
if (local_softirq_pending()) {
|
|
|
+ /*
|
|
|
+ * We can safely run softirq on inline stack, as we are not deep
|
|
|
+ * in the task stack here.
|
|
|
+ */
|
|
|
__do_softirq();
|
|
|
rcu_note_context_switch(cpu);
|
|
|
local_irq_enable();
|