123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512 |
- /*
- * Intel SMP support routines.
- *
- * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
- * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
- * (c) 2002,2003 Andi Kleen, SuSE Labs.
- *
- * This code is released under the GNU General Public License version 2 or
- * later.
- */
- #include <linux/init.h>
- #include <linux/mm.h>
- #include <linux/irq.h>
- #include <linux/delay.h>
- #include <linux/spinlock.h>
- #include <linux/smp_lock.h>
- #include <linux/smp.h>
- #include <linux/kernel_stat.h>
- #include <linux/mc146818rtc.h>
- #include <linux/interrupt.h>
- #include <asm/mtrr.h>
- #include <asm/pgalloc.h>
- #include <asm/tlbflush.h>
- #include <asm/mach_apic.h>
- #include <asm/mmu_context.h>
- #include <asm/proto.h>
- #include <asm/apicdef.h>
- /*
- * Smarter SMP flushing macros.
- * c/o Linus Torvalds.
- *
- * These mean you can really definitely utterly forget about
- * writing to user space from interrupts. (Its not allowed anyway).
- *
- * Optimizations Manfred Spraul <manfred@colorfullife.com>
- */
- static cpumask_t flush_cpumask;
- static struct mm_struct * flush_mm;
- static unsigned long flush_va;
- static DEFINE_SPINLOCK(tlbstate_lock);
- #define FLUSH_ALL -1ULL
- /*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- */
- static inline void leave_mm (unsigned long cpu)
- {
- if (read_pda(mmu_state) == TLBSTATE_OK)
- BUG();
- clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
- load_cr3(swapper_pg_dir);
- }
- /*
- *
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
- * Stop ipi delivery for the old mm. This is not synchronized with
- * the other cpus, but smp_invalidate_interrupt ignore flush ipis
- * for the wrong mm, and in the worst case we perform a superfluous
- * tlb flush.
- * 1a2) set cpu mmu_state to TLBSTATE_OK
- * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- * was in lazy tlb mode.
- * 1a3) update cpu active_mm
- * Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
- * Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1b) thread switch without mm change
- * cpu active_mm is correct, cpu0 already handles
- * flush ipis.
- * 1b1) set cpu mmu_state to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- * Atomically set the bit [other cpus will start sending flush ipis],
- * and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- * runs in kernel space, the cpu could load tlb entries for user space
- * pages.
- *
- * The good news is that cpu mmu_state is local to each cpu, no
- * write/read ordering problems.
- */
- /*
- * TLB flush IPI:
- *
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- */
- asmlinkage void smp_invalidate_interrupt (void)
- {
- unsigned long cpu;
- cpu = get_cpu();
- if (!cpu_isset(cpu, flush_cpumask))
- goto out;
- /*
- * This was a BUG() but until someone can quote me the
- * line from the intel manual that guarantees an IPI to
- * multiple CPUs is retried _only_ on the erroring CPUs
- * its staying as a return
- *
- * BUG();
- */
-
- if (flush_mm == read_pda(active_mm)) {
- if (read_pda(mmu_state) == TLBSTATE_OK) {
- if (flush_va == FLUSH_ALL)
- local_flush_tlb();
- else
- __flush_tlb_one(flush_va);
- } else
- leave_mm(cpu);
- }
- out:
- ack_APIC_irq();
- cpu_clear(cpu, flush_cpumask);
- put_cpu_no_resched();
- }
- static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
- unsigned long va)
- {
- cpumask_t tmp;
- /*
- * A couple of (to be removed) sanity checks:
- *
- * - we do not send IPIs to not-yet booted CPUs.
- * - current CPU must not be in mask
- * - mask must exist :)
- */
- BUG_ON(cpus_empty(cpumask));
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(tmp, cpumask));
- BUG_ON(cpu_isset(smp_processor_id(), cpumask));
- if (!mm)
- BUG();
- /*
- * I'm not happy about this global shared spinlock in the
- * MM hot path, but we'll see how contended it is.
- * Temporarily this turns IRQs off, so that lockups are
- * detected by the NMI watchdog.
- */
- spin_lock(&tlbstate_lock);
-
- flush_mm = mm;
- flush_va = va;
- cpus_or(flush_cpumask, cpumask, flush_cpumask);
- /*
- * We have to send the IPI only to
- * CPUs affected.
- */
- send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
- while (!cpus_empty(flush_cpumask))
- mb(); /* nothing. lockup detection does not belong here */;
- flush_mm = NULL;
- flush_va = 0;
- spin_unlock(&tlbstate_lock);
- }
-
- void flush_tlb_current_task(void)
- {
- struct mm_struct *mm = current->mm;
- cpumask_t cpu_mask;
- preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
- local_flush_tlb();
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
- preempt_enable();
- }
- void flush_tlb_mm (struct mm_struct * mm)
- {
- cpumask_t cpu_mask;
- preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
- if (current->active_mm == mm) {
- if (current->mm)
- local_flush_tlb();
- else
- leave_mm(smp_processor_id());
- }
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
- preempt_enable();
- }
- void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
- {
- struct mm_struct *mm = vma->vm_mm;
- cpumask_t cpu_mask;
- preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
- if (current->active_mm == mm) {
- if(current->mm)
- __flush_tlb_one(va);
- else
- leave_mm(smp_processor_id());
- }
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, va);
- preempt_enable();
- }
- static void do_flush_tlb_all(void* info)
- {
- unsigned long cpu = smp_processor_id();
- __flush_tlb_all();
- if (read_pda(mmu_state) == TLBSTATE_LAZY)
- leave_mm(cpu);
- }
- void flush_tlb_all(void)
- {
- on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
- }
- void smp_kdb_stop(void)
- {
- send_IPI_allbutself(KDB_VECTOR);
- }
- /*
- * this function sends a 'reschedule' IPI to another CPU.
- * it goes straight through and wastes no time serializing
- * anything. Worst case is that we lose a reschedule ...
- */
- void smp_send_reschedule(int cpu)
- {
- send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
- }
- /*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
- static DEFINE_SPINLOCK(call_lock);
- struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
- };
- static struct call_data_struct * call_data;
- void lock_ipi_call_lock(void)
- {
- spin_lock_irq(&call_lock);
- }
- void unlock_ipi_call_lock(void)
- {
- spin_unlock_irq(&call_lock);
- }
- /*
- * this function sends a 'generic call function' IPI to one other CPU
- * in the system.
- */
- static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
- {
- struct call_data_struct data;
- int cpus = 1;
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
- if (!wait)
- return;
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- }
- /*
- * smp_call_function_single - Run a function on another CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
- int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
- {
- /* prevent preemption and reschedule on another processor */
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
- spin_lock_bh(&call_lock);
- __smp_call_function_single(cpu, func, info, nonatomic, wait);
- spin_unlock_bh(&call_lock);
- put_cpu();
- return 0;
- }
- /*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
- static void __smp_call_function (void (*func) (void *info), void *info,
- int nonatomic, int wait)
- {
- struct call_data_struct data;
- int cpus = num_online_cpus()-1;
- if (!cpus)
- return;
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
- if (!wait)
- return;
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- }
- /*
- * smp_call_function - run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: currently unused.
- * @wait: If true, wait (atomically) until function has completed on other
- * CPUs.
- *
- * Returns 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute func or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- * Actually there are a few legal cases, like panic.
- */
- int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
- {
- spin_lock(&call_lock);
- __smp_call_function(func,info,nonatomic,wait);
- spin_unlock(&call_lock);
- return 0;
- }
- void smp_stop_cpu(void)
- {
- /*
- * Remove this CPU:
- */
- cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_disable();
- disable_local_APIC();
- local_irq_enable();
- }
- static void smp_really_stop_cpu(void *dummy)
- {
- smp_stop_cpu();
- for (;;)
- asm("hlt");
- }
- void smp_send_stop(void)
- {
- int nolock = 0;
- if (reboot_force)
- return;
- /* Don't deadlock on the call lock in panic */
- if (!spin_trylock(&call_lock)) {
- /* ignore locking because we have paniced anyways */
- nolock = 1;
- }
- __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
- if (!nolock)
- spin_unlock(&call_lock);
- local_irq_disable();
- disable_local_APIC();
- local_irq_enable();
- }
- /*
- * Reschedule call back. Nothing to do,
- * all the work is done automatically when
- * we return from the interrupt.
- */
- asmlinkage void smp_reschedule_interrupt(void)
- {
- ack_APIC_irq();
- }
- asmlinkage void smp_call_function_interrupt(void)
- {
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
- ack_APIC_irq();
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
- irq_enter();
- (*func)(info);
- irq_exit();
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
- }
- int safe_smp_processor_id(void)
- {
- int apicid, i;
- if (disable_apic)
- return 0;
- apicid = hard_smp_processor_id();
- if (x86_cpu_to_apicid[apicid] == apicid)
- return apicid;
- for (i = 0; i < NR_CPUS; ++i) {
- if (x86_cpu_to_apicid[i] == apicid)
- return i;
- }
- /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
- * or called too early. Either way, we must be CPU 0. */
- if (x86_cpu_to_apicid[0] == BAD_APICID)
- return 0;
- return 0; /* Should not happen */
- }
|