123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491 |
- #include <linux/init.h>
- #include <linux/smp.h>
- #include <linux/module.h>
- #include <linux/sched.h>
- #include <linux/percpu.h>
- #include <linux/bootmem.h>
- #include <asm/nmi.h>
- #include <asm/irq.h>
- #include <asm/smp.h>
- #include <asm/cpu.h>
- #include <asm/numa.h>
- #include <mach_apic.h>
- /* Number of siblings per CPU package */
- int smp_num_siblings = 1;
- EXPORT_SYMBOL(smp_num_siblings);
- /* Last level cache ID of each logical CPU */
- DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
- /* bitmap of online cpus */
- cpumask_t cpu_online_map __read_mostly;
- EXPORT_SYMBOL(cpu_online_map);
- cpumask_t cpu_callin_map;
- cpumask_t cpu_callout_map;
- cpumask_t cpu_possible_map;
- EXPORT_SYMBOL(cpu_possible_map);
- /* representing HT siblings of each logical CPU */
- DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
- EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
- /* representing HT and core siblings of each logical CPU */
- DEFINE_PER_CPU(cpumask_t, cpu_core_map);
- EXPORT_PER_CPU_SYMBOL(cpu_core_map);
- /* Per CPU bogomips and other parameters */
- DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
- EXPORT_PER_CPU_SYMBOL(cpu_info);
- /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
- unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
- /* representing cpus for which sibling maps can be computed */
- static cpumask_t cpu_sibling_setup_map;
- /* Set if we find a B stepping CPU */
- int __cpuinitdata smp_b_stepping;
- #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
- /* which logical CPUs are on which nodes */
- cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
- { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
- EXPORT_SYMBOL(node_to_cpumask_map);
- /* which node each logical CPU is on */
- int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
- EXPORT_SYMBOL(cpu_to_node_map);
- /* set up a mapping between cpu and node. */
- static void map_cpu_to_node(int cpu, int node)
- {
- printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
- cpu_set(cpu, node_to_cpumask_map[node]);
- cpu_to_node_map[cpu] = node;
- }
- /* undo a mapping between cpu and node. */
- static void unmap_cpu_to_node(int cpu)
- {
- int node;
- printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
- for (node = 0; node < MAX_NUMNODES; node++)
- cpu_clear(cpu, node_to_cpumask_map[node]);
- cpu_to_node_map[cpu] = 0;
- }
- #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
- #define map_cpu_to_node(cpu, node) ({})
- #define unmap_cpu_to_node(cpu) ({})
- #endif
- #ifdef CONFIG_X86_32
- u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
- { [0 ... NR_CPUS-1] = BAD_APICID };
- void map_cpu_to_logical_apicid(void)
- {
- int cpu = smp_processor_id();
- int apicid = logical_smp_processor_id();
- int node = apicid_to_node(apicid);
- if (!node_online(node))
- node = first_online_node;
- cpu_2_logical_apicid[cpu] = apicid;
- map_cpu_to_node(cpu, node);
- }
- void unmap_cpu_to_logical_apicid(int cpu)
- {
- cpu_2_logical_apicid[cpu] = BAD_APICID;
- unmap_cpu_to_node(cpu);
- }
- #else
- #define unmap_cpu_to_logical_apicid(cpu) do {} while (0)
- #define map_cpu_to_logical_apicid() do {} while (0)
- #endif
- static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
- {
- #ifdef CONFIG_X86_32
- /*
- * Mask B, Pentium, but not Pentium MMX
- */
- if (c->x86_vendor == X86_VENDOR_INTEL &&
- c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
- c->x86_model <= 3)
- /*
- * Remember we have B step Pentia with bugs
- */
- smp_b_stepping = 1;
- /*
- * Certain Athlons might work (for various values of 'work') in SMP
- * but they are not certified as MP capable.
- */
- if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
- if (num_possible_cpus() == 1)
- goto valid_k7;
- /* Athlon 660/661 is valid. */
- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
- (c->x86_mask == 1)))
- goto valid_k7;
- /* Duron 670 is valid */
- if ((c->x86_model == 7) && (c->x86_mask == 0))
- goto valid_k7;
- /*
- * Athlon 662, Duron 671, and Athlon >model 7 have capability
- * bit. It's worth noting that the A5 stepping (662) of some
- * Athlon XP's have the MP bit set.
- * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
- * more.
- */
- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
- (c->x86_model > 7))
- if (cpu_has_mp)
- goto valid_k7;
- /* If we get here, not a certified SMP capable AMD system. */
- add_taint(TAINT_UNSAFE_SMP);
- }
- valid_k7:
- ;
- #endif
- }
- void smp_checks(void)
- {
- if (smp_b_stepping)
- printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
- "with B stepping processors.\n");
- /*
- * Don't taint if we are running SMP kernel on a single non-MP
- * approved Athlon
- */
- if (tainted & TAINT_UNSAFE_SMP) {
- if (num_online_cpus())
- printk(KERN_INFO "WARNING: This combination of AMD"
- "processors is not suitable for SMP.\n");
- else
- tainted &= ~TAINT_UNSAFE_SMP;
- }
- }
- /*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
- void __cpuinit smp_store_cpu_info(int id)
- {
- struct cpuinfo_x86 *c = &cpu_data(id);
- *c = boot_cpu_data;
- c->cpu_index = id;
- if (id != 0)
- identify_secondary_cpu(c);
- smp_apply_quirks(c);
- }
- void __cpuinit set_cpu_sibling_map(int cpu)
- {
- int i;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- cpu_set(cpu, cpu_sibling_setup_map);
- if (smp_num_siblings > 1) {
- for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
- c->cpu_core_id == cpu_data(i).cpu_core_id) {
- cpu_set(i, per_cpu(cpu_sibling_map, cpu));
- cpu_set(cpu, per_cpu(cpu_sibling_map, i));
- cpu_set(i, per_cpu(cpu_core_map, cpu));
- cpu_set(cpu, per_cpu(cpu_core_map, i));
- cpu_set(i, c->llc_shared_map);
- cpu_set(cpu, cpu_data(i).llc_shared_map);
- }
- }
- } else {
- cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
- }
- cpu_set(cpu, c->llc_shared_map);
- if (current_cpu_data.x86_max_cores == 1) {
- per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
- c->booted_cores = 1;
- return;
- }
- for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
- per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
- cpu_set(i, c->llc_shared_map);
- cpu_set(cpu, cpu_data(i).llc_shared_map);
- }
- if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
- cpu_set(i, per_cpu(cpu_core_map, cpu));
- cpu_set(cpu, per_cpu(cpu_core_map, i));
- /*
- * Does this new cpu bringup a new core?
- */
- if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
- /*
- * for each core in package, increment
- * the booted_cores for this new cpu
- */
- if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
- c->booted_cores++;
- /*
- * increment the core count for all
- * the other cpus in this package
- */
- if (i != cpu)
- cpu_data(i).booted_cores++;
- } else if (i != cpu && !c->booted_cores)
- c->booted_cores = cpu_data(i).booted_cores;
- }
- }
- }
- /* maps the cpu to the sched domain representing multi-core */
- cpumask_t cpu_coregroup_map(int cpu)
- {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- /*
- * For perf, we return last level cache shared map.
- * And for power savings, we return cpu_core_map
- */
- if (sched_mc_power_savings || sched_smt_power_savings)
- return per_cpu(cpu_core_map, cpu);
- else
- return c->llc_shared_map;
- }
- /*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
- unsigned long __cpuinit setup_trampoline(void)
- {
- memcpy(trampoline_base, trampoline_data,
- trampoline_end - trampoline_data);
- return virt_to_phys(trampoline_base);
- }
- #ifdef CONFIG_X86_32
- /*
- * We are called very early to get the low memory for the
- * SMP bootup trampoline page.
- */
- void __init smp_alloc_memory(void)
- {
- trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
- /*
- * Has to be in very low memory so we can execute
- * real-mode AP code.
- */
- if (__pa(trampoline_base) >= 0x9F000)
- BUG();
- }
- #endif
- void impress_friends(void)
- {
- int cpu;
- unsigned long bogosum = 0;
- /*
- * Allow the user to impress friends.
- */
- Dprintk("Before bogomips.\n");
- for_each_possible_cpu(cpu)
- if (cpu_isset(cpu, cpu_callout_map))
- bogosum += cpu_data(cpu).loops_per_jiffy;
- printk(KERN_INFO
- "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- num_online_cpus(),
- bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
- Dprintk("Before bogocount - setting activated=1.\n");
- }
- #ifdef CONFIG_HOTPLUG_CPU
- void remove_siblinginfo(int cpu)
- {
- int sibling;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
- cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
- /*/
- * last thread sibling in this cpu core going down
- */
- if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
- cpu_data(sibling).booted_cores--;
- }
- for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
- cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
- cpus_clear(per_cpu(cpu_sibling_map, cpu));
- cpus_clear(per_cpu(cpu_core_map, cpu));
- c->phys_proc_id = 0;
- c->cpu_core_id = 0;
- cpu_clear(cpu, cpu_sibling_setup_map);
- }
- int additional_cpus __initdata = -1;
- static __init int setup_additional_cpus(char *s)
- {
- return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
- }
- early_param("additional_cpus", setup_additional_cpus);
- /*
- * cpu_possible_map should be static, it cannot change as cpu's
- * are onlined, or offlined. The reason is per-cpu data-structures
- * are allocated by some modules at init time, and dont expect to
- * do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
- * In case when cpu_hotplug is not compiled, then we resort to current
- * behaviour, which is cpu_possible == cpu_present.
- * - Ashok Raj
- *
- * Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - The user can overwrite it with additional_cpus=NUM
- * - Otherwise don't reserve additional CPUs.
- * We do this because additional CPUs waste a lot of memory.
- * -AK
- */
- __init void prefill_possible_map(void)
- {
- int i;
- int possible;
- if (additional_cpus == -1) {
- if (disabled_cpus > 0)
- additional_cpus = disabled_cpus;
- else
- additional_cpus = 0;
- }
- possible = num_processors + additional_cpus;
- if (possible > NR_CPUS)
- possible = NR_CPUS;
- printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
- possible, max_t(int, possible - num_processors, 0));
- for (i = 0; i < possible; i++)
- cpu_set(i, cpu_possible_map);
- }
- static void __ref remove_cpu_from_maps(int cpu)
- {
- cpu_clear(cpu, cpu_online_map);
- #ifdef CONFIG_X86_64
- cpu_clear(cpu, cpu_callout_map);
- cpu_clear(cpu, cpu_callin_map);
- /* was set by cpu_init() */
- clear_bit(cpu, (unsigned long *)&cpu_initialized);
- clear_node_cpumask(cpu);
- #endif
- }
- int __cpu_disable(void)
- {
- int cpu = smp_processor_id();
- /*
- * Perhaps use cpufreq to drop frequency, but that could go
- * into generic code.
- *
- * We won't take down the boot processor on i386 due to some
- * interrupts only being able to be serviced by the BSP.
- * Especially so if we're not using an IOAPIC -zwane
- */
- if (cpu == 0)
- return -EBUSY;
- if (nmi_watchdog == NMI_LOCAL_APIC)
- stop_apic_nmi_watchdog(NULL);
- clear_local_APIC();
- /*
- * HACK:
- * Allow any queued timer interrupts to get serviced
- * This is only a temporary solution until we cleanup
- * fixup_irqs as we do for IA64.
- */
- local_irq_enable();
- mdelay(1);
- local_irq_disable();
- remove_siblinginfo(cpu);
- /* It's now safe to remove this processor from the online map */
- remove_cpu_from_maps(cpu);
- fixup_irqs(cpu_online_map);
- return 0;
- }
- void __cpu_die(unsigned int cpu)
- {
- /* We don't do anything here: idle task is faking death itself. */
- unsigned int i;
- for (i = 0; i < 10; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- printk(KERN_INFO "CPU %d is now offline\n", cpu);
- if (1 == num_online_cpus())
- alternatives_smp_switch(0);
- return;
- }
- msleep(100);
- }
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
- }
- #else /* ... !CONFIG_HOTPLUG_CPU */
- int __cpu_disable(void)
- {
- return -ENOSYS;
- }
- void __cpu_die(unsigned int cpu)
- {
- /* We said "no" in __cpu_disable */
- BUG();
- }
- #endif
- /*
- * If the BIOS enumerates physical processors before logical,
- * maxcpus=N at enumeration-time can be used to disable HT.
- */
- static int __init parse_maxcpus(char *arg)
- {
- extern unsigned int maxcpus;
- maxcpus = simple_strtoul(arg, NULL, 0);
- return 0;
- }
- early_param("maxcpus", parse_maxcpus);
|