123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437 |
- /*
- * Generic helpers for smp ipi calls
- *
- * (C) Jens Axboe <jens.axboe@oracle.com> 2008
- *
- */
- #include <linux/init.h>
- #include <linux/module.h>
- #include <linux/percpu.h>
- #include <linux/rcupdate.h>
- #include <linux/rculist.h>
- #include <linux/smp.h>
- static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
- static LIST_HEAD(call_function_queue);
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
- enum {
- CSD_FLAG_WAIT = 0x01,
- CSD_FLAG_ALLOC = 0x02,
- };
- struct call_function_data {
- struct call_single_data csd;
- spinlock_t lock;
- unsigned int refs;
- cpumask_t cpumask;
- struct rcu_head rcu_head;
- };
- struct call_single_queue {
- struct list_head list;
- spinlock_t lock;
- };
- static int __cpuinit init_call_single_data(void)
- {
- int i;
- for_each_possible_cpu(i) {
- struct call_single_queue *q = &per_cpu(call_single_queue, i);
- spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->list);
- }
- return 0;
- }
- early_initcall(init_call_single_data);
- static void csd_flag_wait(struct call_single_data *data)
- {
- /* Wait for response */
- do {
- if (!(data->flags & CSD_FLAG_WAIT))
- break;
- cpu_relax();
- } while (1);
- }
- /*
- * Insert a previously allocated call_single_data element for execution
- * on the given CPU. data must already have ->func, ->info, and ->flags set.
- */
- static void generic_exec_single(int cpu, struct call_single_data *data)
- {
- struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
- int wait = data->flags & CSD_FLAG_WAIT, ipi;
- unsigned long flags;
- spin_lock_irqsave(&dst->lock, flags);
- ipi = list_empty(&dst->list);
- list_add_tail(&data->list, &dst->list);
- spin_unlock_irqrestore(&dst->lock, flags);
- /*
- * Make the list addition visible before sending the ipi.
- */
- smp_mb();
- if (ipi)
- arch_send_call_function_single_ipi(cpu);
- if (wait)
- csd_flag_wait(data);
- }
- static void rcu_free_call_data(struct rcu_head *head)
- {
- struct call_function_data *data;
- data = container_of(head, struct call_function_data, rcu_head);
- kfree(data);
- }
- /*
- * Invoked by arch to handle an IPI for call function. Must be called with
- * interrupts disabled.
- */
- void generic_smp_call_function_interrupt(void)
- {
- struct call_function_data *data;
- int cpu = get_cpu();
- /*
- * It's ok to use list_for_each_rcu() here even though we may delete
- * 'pos', since list_del_rcu() doesn't clear ->next
- */
- rcu_read_lock();
- list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
- int refs;
- if (!cpu_isset(cpu, data->cpumask))
- continue;
- data->csd.func(data->csd.info);
- spin_lock(&data->lock);
- cpu_clear(cpu, data->cpumask);
- WARN_ON(data->refs == 0);
- data->refs--;
- refs = data->refs;
- spin_unlock(&data->lock);
- if (refs)
- continue;
- spin_lock(&call_function_lock);
- list_del_rcu(&data->csd.list);
- spin_unlock(&call_function_lock);
- if (data->csd.flags & CSD_FLAG_WAIT) {
- /*
- * serialize stores to data with the flag clear
- * and wakeup
- */
- smp_wmb();
- data->csd.flags &= ~CSD_FLAG_WAIT;
- }
- if (data->csd.flags & CSD_FLAG_ALLOC)
- call_rcu(&data->rcu_head, rcu_free_call_data);
- }
- rcu_read_unlock();
- put_cpu();
- }
- /*
- * Invoked by arch to handle an IPI for call function single. Must be called
- * from the arch with interrupts disabled.
- */
- void generic_smp_call_function_single_interrupt(void)
- {
- struct call_single_queue *q = &__get_cpu_var(call_single_queue);
- LIST_HEAD(list);
- /*
- * Need to see other stores to list head for checking whether
- * list is empty without holding q->lock
- */
- smp_read_barrier_depends();
- while (!list_empty(&q->list)) {
- unsigned int data_flags;
- spin_lock(&q->lock);
- list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
- while (!list_empty(&list)) {
- struct call_single_data *data;
- data = list_entry(list.next, struct call_single_data,
- list);
- list_del(&data->list);
- /*
- * 'data' can be invalid after this call if
- * flags == 0 (when called through
- * generic_exec_single(), so save them away before
- * making the call.
- */
- data_flags = data->flags;
- data->func(data->info);
- if (data_flags & CSD_FLAG_WAIT) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_WAIT;
- } else if (data_flags & CSD_FLAG_ALLOC)
- kfree(data);
- }
- /*
- * See comment on outer loop
- */
- smp_read_barrier_depends();
- }
- }
- /*
- * smp_call_function_single - Run a function on a specific CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code. Note that @wait
- * will be implicitly turned on in case of allocation failures, since
- * we fall back to on-stack allocation.
- */
- int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int wait)
- {
- struct call_single_data d;
- unsigned long flags;
- /* prevent preemption and reschedule on another processor,
- as well as CPU removal */
- int me = get_cpu();
- int err = 0;
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
- if (cpu == me) {
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
- struct call_single_data *data = NULL;
- if (!wait) {
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data)
- data->flags = CSD_FLAG_ALLOC;
- }
- if (!data) {
- data = &d;
- data->flags = CSD_FLAG_WAIT;
- }
- data->func = func;
- data->info = info;
- generic_exec_single(cpu, data);
- } else {
- err = -ENXIO; /* CPU not online */
- }
- put_cpu();
- return err;
- }
- EXPORT_SYMBOL(smp_call_function_single);
- /**
- * __smp_call_function_single(): Run a function on another CPU
- * @cpu: The CPU to run on.
- * @data: Pre-allocated and setup data structure
- *
- * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
- * data structure. Useful for embedding @data inside other structures, for
- * instance.
- *
- */
- void __smp_call_function_single(int cpu, struct call_single_data *data)
- {
- /* Can deadlock when called with interrupts disabled */
- WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
- generic_exec_single(cpu, data);
- }
- /* Dummy function */
- static void quiesce_dummy(void *unused)
- {
- }
- /*
- * Ensure stack based data used in call function mask is safe to free.
- *
- * This is needed by smp_call_function_mask when using on-stack data, because
- * a single call function queue is shared by all CPUs, and any CPU may pick up
- * the data item on the queue at any time before it is deleted. So we need to
- * ensure that all CPUs have transitioned through a quiescent state after
- * this call.
- *
- * This is a very slow function, implemented by sending synchronous IPIs to
- * all possible CPUs. For this reason, we have to alloc data rather than use
- * stack based data even in the case of synchronous calls. The stack based
- * data is then just used for deadlock/oom fallback which will be very rare.
- *
- * If a faster scheme can be made, we could go back to preferring stack based
- * data -- the data allocation/free is non-zero cost.
- */
- static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
- {
- struct call_single_data data;
- int cpu;
- data.func = quiesce_dummy;
- data.info = NULL;
- for_each_cpu_mask(cpu, mask) {
- data.flags = CSD_FLAG_WAIT;
- generic_exec_single(cpu, &data);
- }
- }
- /**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned. Note that @wait
- * will be implicitly turned on in case of allocation failures, since
- * we fall back to on-stack allocation.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. Preemption
- * must be disabled when calling this function.
- */
- int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
- {
- struct call_function_data d;
- struct call_function_data *data = NULL;
- cpumask_t allbutself;
- unsigned long flags;
- int cpu, num_cpus;
- int slowpath = 0;
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
- cpu = smp_processor_id();
- allbutself = cpu_online_map;
- cpu_clear(cpu, allbutself);
- cpus_and(mask, mask, allbutself);
- num_cpus = cpus_weight(mask);
- /*
- * If zero CPUs, return. If just a single CPU, turn this request
- * into a targetted single call instead since it's faster.
- */
- if (!num_cpus)
- return 0;
- else if (num_cpus == 1) {
- cpu = first_cpu(mask);
- return smp_call_function_single(cpu, func, info, wait);
- }
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data) {
- data->csd.flags = CSD_FLAG_ALLOC;
- if (wait)
- data->csd.flags |= CSD_FLAG_WAIT;
- } else {
- data = &d;
- data->csd.flags = CSD_FLAG_WAIT;
- wait = 1;
- slowpath = 1;
- }
- spin_lock_init(&data->lock);
- data->csd.func = func;
- data->csd.info = info;
- data->refs = num_cpus;
- data->cpumask = mask;
- spin_lock_irqsave(&call_function_lock, flags);
- list_add_tail_rcu(&data->csd.list, &call_function_queue);
- spin_unlock_irqrestore(&call_function_lock, flags);
- /*
- * Make the list addition visible before sending the ipi.
- */
- smp_mb();
- /* Send a message to all CPUs in the map */
- arch_send_call_function_ipi(mask);
- /* optionally wait for the CPUs to complete */
- if (wait) {
- csd_flag_wait(&data->csd);
- if (unlikely(slowpath))
- smp_call_function_mask_quiesce_stack(mask);
- }
- return 0;
- }
- EXPORT_SYMBOL(smp_call_function_mask);
- /**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func. In case of allocation
- * failure, @wait will be implicitly turned on.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
- int smp_call_function(void (*func)(void *), void *info, int wait)
- {
- int ret;
- preempt_disable();
- ret = smp_call_function_mask(cpu_online_map, func, info, wait);
- preempt_enable();
- return ret;
- }
- EXPORT_SYMBOL(smp_call_function);
- void ipi_call_lock(void)
- {
- spin_lock(&call_function_lock);
- }
- void ipi_call_unlock(void)
- {
- spin_unlock(&call_function_lock);
- }
- void ipi_call_lock_irq(void)
- {
- spin_lock_irq(&call_function_lock);
- }
- void ipi_call_unlock_irq(void)
- {
- spin_unlock_irq(&call_function_lock);
- }
|