|
@@ -356,7 +356,8 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
|
|
|
/* Tasklets */
|
|
|
struct tasklet_head
|
|
|
{
|
|
|
- struct tasklet_struct *list;
|
|
|
+ struct tasklet_struct *head;
|
|
|
+ struct tasklet_struct **tail;
|
|
|
};
|
|
|
|
|
|
/* Some compilers disobey section attribute on statics when not
|
|
@@ -369,8 +370,9 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- t->next = __get_cpu_var(tasklet_vec).list;
|
|
|
- __get_cpu_var(tasklet_vec).list = t;
|
|
|
+ t->next = NULL;
|
|
|
+ *__get_cpu_var(tasklet_vec).tail = t;
|
|
|
+ __get_cpu_var(tasklet_vec).tail = &(t->next);
|
|
|
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -382,8 +384,9 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- t->next = __get_cpu_var(tasklet_hi_vec).list;
|
|
|
- __get_cpu_var(tasklet_hi_vec).list = t;
|
|
|
+ t->next = NULL;
|
|
|
+ *__get_cpu_var(tasklet_hi_vec).tail = t;
|
|
|
+ __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
|
|
raise_softirq_irqoff(HI_SOFTIRQ);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -395,8 +398,9 @@ static void tasklet_action(struct softirq_action *a)
|
|
|
struct tasklet_struct *list;
|
|
|
|
|
|
local_irq_disable();
|
|
|
- list = __get_cpu_var(tasklet_vec).list;
|
|
|
- __get_cpu_var(tasklet_vec).list = NULL;
|
|
|
+ list = __get_cpu_var(tasklet_vec).head;
|
|
|
+ __get_cpu_var(tasklet_vec).head = NULL;
|
|
|
+ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
|
|
local_irq_enable();
|
|
|
|
|
|
while (list) {
|
|
@@ -416,8 +420,9 @@ static void tasklet_action(struct softirq_action *a)
|
|
|
}
|
|
|
|
|
|
local_irq_disable();
|
|
|
- t->next = __get_cpu_var(tasklet_vec).list;
|
|
|
- __get_cpu_var(tasklet_vec).list = t;
|
|
|
+ t->next = NULL;
|
|
|
+ *__get_cpu_var(tasklet_vec).tail = t;
|
|
|
+ __get_cpu_var(tasklet_vec).tail = &(t->next);
|
|
|
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
|
|
local_irq_enable();
|
|
|
}
|
|
@@ -428,8 +433,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|
|
struct tasklet_struct *list;
|
|
|
|
|
|
local_irq_disable();
|
|
|
- list = __get_cpu_var(tasklet_hi_vec).list;
|
|
|
- __get_cpu_var(tasklet_hi_vec).list = NULL;
|
|
|
+ list = __get_cpu_var(tasklet_hi_vec).head;
|
|
|
+ __get_cpu_var(tasklet_hi_vec).head = NULL;
|
|
|
+ __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
|
|
|
local_irq_enable();
|
|
|
|
|
|
while (list) {
|
|
@@ -449,8 +455,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|
|
}
|
|
|
|
|
|
local_irq_disable();
|
|
|
- t->next = __get_cpu_var(tasklet_hi_vec).list;
|
|
|
- __get_cpu_var(tasklet_hi_vec).list = t;
|
|
|
+ t->next = NULL;
|
|
|
+ *__get_cpu_var(tasklet_hi_vec).tail = t;
|
|
|
+ __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
|
|
__raise_softirq_irqoff(HI_SOFTIRQ);
|
|
|
local_irq_enable();
|
|
|
}
|
|
@@ -487,6 +494,15 @@ EXPORT_SYMBOL(tasklet_kill);
|
|
|
|
|
|
void __init softirq_init(void)
|
|
|
{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ per_cpu(tasklet_vec, cpu).tail =
|
|
|
+ &per_cpu(tasklet_vec, cpu).head;
|
|
|
+ per_cpu(tasklet_hi_vec, cpu).tail =
|
|
|
+ &per_cpu(tasklet_hi_vec, cpu).head;
|
|
|
+ }
|
|
|
+
|
|
|
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
|
|
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
|
|
|
}
|
|
@@ -555,9 +571,12 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
|
|
return;
|
|
|
|
|
|
/* CPU is dead, so no lock needed. */
|
|
|
- for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
|
|
|
+ for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
|
|
|
if (*i == t) {
|
|
|
*i = t->next;
|
|
|
+ /* If this was the tail element, move the tail ptr */
|
|
|
+ if (*i == NULL)
|
|
|
+ per_cpu(tasklet_vec, cpu).tail = i;
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
@@ -566,20 +585,20 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
|
|
|
|
|
static void takeover_tasklets(unsigned int cpu)
|
|
|
{
|
|
|
- struct tasklet_struct **i;
|
|
|
-
|
|
|
/* CPU is dead, so no lock needed. */
|
|
|
local_irq_disable();
|
|
|
|
|
|
/* Find end, append list for that CPU. */
|
|
|
- for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
|
|
|
- *i = per_cpu(tasklet_vec, cpu).list;
|
|
|
- per_cpu(tasklet_vec, cpu).list = NULL;
|
|
|
+ *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
|
|
|
+ __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
|
|
|
+ per_cpu(tasklet_vec, cpu).head = NULL;
|
|
|
+ per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
|
|
|
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
|
|
|
|
|
- for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
|
|
|
- *i = per_cpu(tasklet_hi_vec, cpu).list;
|
|
|
- per_cpu(tasklet_hi_vec, cpu).list = NULL;
|
|
|
+ *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
|
|
|
+ __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
|
|
|
+ per_cpu(tasklet_hi_vec, cpu).head = NULL;
|
|
|
+ per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
|
|
|
raise_softirq_irqoff(HI_SOFTIRQ);
|
|
|
|
|
|
local_irq_enable();
|