|
@@ -131,23 +131,17 @@ void _local_bh_enable(void)
|
|
|
|
|
|
EXPORT_SYMBOL(_local_bh_enable);
|
|
|
|
|
|
-void local_bh_enable(void)
|
|
|
+static inline void _local_bh_enable_ip(unsigned long ip)
|
|
|
{
|
|
|
+ WARN_ON_ONCE(in_irq() || irqs_disabled());
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- WARN_ON_ONCE(in_irq());
|
|
|
-#endif
|
|
|
- WARN_ON_ONCE(irqs_disabled());
|
|
|
-
|
|
|
-#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
- local_irq_save(flags);
|
|
|
+ local_irq_disable();
|
|
|
#endif
|
|
|
/*
|
|
|
* Are softirqs going to be turned on now:
|
|
|
*/
|
|
|
if (softirq_count() == SOFTIRQ_OFFSET)
|
|
|
- trace_softirqs_on((unsigned long)__builtin_return_address(0));
|
|
|
+ trace_softirqs_on(ip);
|
|
|
/*
|
|
|
* Keep preemption disabled until we are done with
|
|
|
* softirq processing:
|
|
@@ -159,40 +153,20 @@ void local_bh_enable(void)
|
|
|
|
|
|
dec_preempt_count();
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
- local_irq_restore(flags);
|
|
|
+ local_irq_enable();
|
|
|
#endif
|
|
|
preempt_check_resched();
|
|
|
}
|
|
|
+
|
|
|
+void local_bh_enable(void)
|
|
|
+{
|
|
|
+ _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
|
|
+}
|
|
|
EXPORT_SYMBOL(local_bh_enable);
|
|
|
|
|
|
void local_bh_enable_ip(unsigned long ip)
|
|
|
{
|
|
|
-#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- WARN_ON_ONCE(in_irq());
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
-#endif
|
|
|
- /*
|
|
|
- * Are softirqs going to be turned on now:
|
|
|
- */
|
|
|
- if (softirq_count() == SOFTIRQ_OFFSET)
|
|
|
- trace_softirqs_on(ip);
|
|
|
- /*
|
|
|
- * Keep preemption disabled until we are done with
|
|
|
- * softirq processing:
|
|
|
- */
|
|
|
- sub_preempt_count(SOFTIRQ_OFFSET - 1);
|
|
|
-
|
|
|
- if (unlikely(!in_interrupt() && local_softirq_pending()))
|
|
|
- do_softirq();
|
|
|
-
|
|
|
- dec_preempt_count();
|
|
|
-#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
- local_irq_restore(flags);
|
|
|
-#endif
|
|
|
- preempt_check_resched();
|
|
|
+ _local_bh_enable_ip(ip);
|
|
|
}
|
|
|
EXPORT_SYMBOL(local_bh_enable_ip);
|
|
|
|
|
@@ -347,9 +321,8 @@ void raise_softirq(unsigned int nr)
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
-void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
|
|
|
+void open_softirq(int nr, void (*action)(struct softirq_action *))
|
|
|
{
|
|
|
- softirq_vec[nr].data = data;
|
|
|
softirq_vec[nr].action = action;
|
|
|
}
|
|
|
|
|
@@ -360,10 +333,8 @@ struct tasklet_head
|
|
|
struct tasklet_struct **tail;
|
|
|
};
|
|
|
|
|
|
-/* Some compilers disobey section attribute on statics when not
|
|
|
- initialized -- RR */
|
|
|
-static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
|
|
|
-static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
|
|
|
+static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
|
|
|
+static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
|
|
|
|
|
|
void __tasklet_schedule(struct tasklet_struct *t)
|
|
|
{
|
|
@@ -503,8 +474,8 @@ void __init softirq_init(void)
|
|
|
&per_cpu(tasklet_hi_vec, cpu).head;
|
|
|
}
|
|
|
|
|
|
- open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
|
|
|
- open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
|
|
|
+ open_softirq(TASKLET_SOFTIRQ, tasklet_action);
|
|
|
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
|
|
}
|
|
|
|
|
|
static int ksoftirqd(void * __bind_cpu)
|