|
@@ -6,8 +6,6 @@
|
|
|
* Distribute under GPLv2.
|
|
|
*
|
|
|
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
|
|
|
- *
|
|
|
- * Remote softirq infrastructure is by Jens Axboe.
|
|
|
*/
|
|
|
|
|
|
#include <linux/export.h>
|
|
@@ -627,146 +625,17 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
|
|
|
|
|
|
-/*
|
|
|
- * Remote softirq bits
|
|
|
- */
|
|
|
-
|
|
|
-DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
|
|
|
-EXPORT_PER_CPU_SYMBOL(softirq_work_list);
|
|
|
-
|
|
|
-static void __local_trigger(struct call_single_data *cp, int softirq)
|
|
|
-{
|
|
|
- struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
|
|
|
-
|
|
|
- list_add_tail(&cp->list, head);
|
|
|
-
|
|
|
- /* Trigger the softirq only if the list was previously empty. */
|
|
|
- if (head->next == &cp->list)
|
|
|
- raise_softirq_irqoff(softirq);
|
|
|
-}
|
|
|
-
|
|
|
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
|
|
-static void remote_softirq_receive(void *data)
|
|
|
-{
|
|
|
- struct call_single_data *cp = data;
|
|
|
- unsigned long flags;
|
|
|
- int softirq;
|
|
|
-
|
|
|
- softirq = *(int *)cp->info;
|
|
|
- local_irq_save(flags);
|
|
|
- __local_trigger(cp, softirq);
|
|
|
- local_irq_restore(flags);
|
|
|
-}
|
|
|
-
|
|
|
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
|
|
-{
|
|
|
- if (cpu_online(cpu)) {
|
|
|
- cp->func = remote_softirq_receive;
|
|
|
- cp->info = &softirq;
|
|
|
- cp->flags = 0;
|
|
|
-
|
|
|
- __smp_call_function_single(cpu, cp, 0);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- return 1;
|
|
|
-}
|
|
|
-#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
|
|
|
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
|
|
-{
|
|
|
- return 1;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-/**
|
|
|
- * __send_remote_softirq - try to schedule softirq work on a remote cpu
|
|
|
- * @cp: private SMP call function data area
|
|
|
- * @cpu: the remote cpu
|
|
|
- * @this_cpu: the currently executing cpu
|
|
|
- * @softirq: the softirq for the work
|
|
|
- *
|
|
|
- * Attempt to schedule softirq work on a remote cpu. If this cannot be
|
|
|
- * done, the work is instead queued up on the local cpu.
|
|
|
- *
|
|
|
- * Interrupts must be disabled.
|
|
|
- */
|
|
|
-void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
|
|
|
-{
|
|
|
- if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
|
|
|
- __local_trigger(cp, softirq);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(__send_remote_softirq);
|
|
|
-
|
|
|
-/**
|
|
|
- * send_remote_softirq - try to schedule softirq work on a remote cpu
|
|
|
- * @cp: private SMP call function data area
|
|
|
- * @cpu: the remote cpu
|
|
|
- * @softirq: the softirq for the work
|
|
|
- *
|
|
|
- * Like __send_remote_softirq except that disabling interrupts and
|
|
|
- * computing the current cpu is done for the caller.
|
|
|
- */
|
|
|
-void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- int this_cpu;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
- this_cpu = smp_processor_id();
|
|
|
- __send_remote_softirq(cp, cpu, this_cpu, softirq);
|
|
|
- local_irq_restore(flags);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(send_remote_softirq);
|
|
|
-
|
|
|
-static int remote_softirq_cpu_notify(struct notifier_block *self,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
-{
|
|
|
- /*
|
|
|
- * If a CPU goes away, splice its entries to the current CPU
|
|
|
- * and trigger a run of the softirq
|
|
|
- */
|
|
|
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
|
|
- int cpu = (unsigned long) hcpu;
|
|
|
- int i;
|
|
|
-
|
|
|
- local_irq_disable();
|
|
|
- for (i = 0; i < NR_SOFTIRQS; i++) {
|
|
|
- struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
|
|
|
- struct list_head *local_head;
|
|
|
-
|
|
|
- if (list_empty(head))
|
|
|
- continue;
|
|
|
-
|
|
|
- local_head = &__get_cpu_var(softirq_work_list[i]);
|
|
|
- list_splice_init(head, local_head);
|
|
|
- raise_softirq_irqoff(i);
|
|
|
- }
|
|
|
- local_irq_enable();
|
|
|
- }
|
|
|
-
|
|
|
- return NOTIFY_OK;
|
|
|
-}
|
|
|
-
|
|
|
-static struct notifier_block remote_softirq_cpu_notifier = {
|
|
|
- .notifier_call = remote_softirq_cpu_notify,
|
|
|
-};
|
|
|
-
|
|
|
void __init softirq_init(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- int i;
|
|
|
-
|
|
|
per_cpu(tasklet_vec, cpu).tail =
|
|
|
&per_cpu(tasklet_vec, cpu).head;
|
|
|
per_cpu(tasklet_hi_vec, cpu).tail =
|
|
|
&per_cpu(tasklet_hi_vec, cpu).head;
|
|
|
- for (i = 0; i < NR_SOFTIRQS; i++)
|
|
|
- INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
|
|
|
}
|
|
|
|
|
|
- register_hotcpu_notifier(&remote_softirq_cpu_notifier);
|
|
|
-
|
|
|
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
|
|
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
|
|
}
|