smpboot.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. #include <linux/init.h>
  2. #include <linux/smp.h>
  3. #include <linux/module.h>
  4. #include <linux/sched.h>
  5. #include <linux/percpu.h>
  6. #include <linux/bootmem.h>
  7. #include <asm/nmi.h>
  8. #include <asm/irq.h>
  9. #include <asm/smp.h>
  10. #include <asm/cpu.h>
  11. #include <asm/numa.h>
  12. /* Number of siblings per CPU package */
  13. int smp_num_siblings = 1;
  14. EXPORT_SYMBOL(smp_num_siblings);
  15. /* Last level cache ID of each logical CPU */
  16. DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
  17. /* bitmap of online cpus */
  18. cpumask_t cpu_online_map __read_mostly;
  19. EXPORT_SYMBOL(cpu_online_map);
  20. cpumask_t cpu_callin_map;
  21. cpumask_t cpu_callout_map;
  22. cpumask_t cpu_possible_map;
  23. EXPORT_SYMBOL(cpu_possible_map);
  24. /* representing HT siblings of each logical CPU */
  25. DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
  26. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  27. /* representing HT and core siblings of each logical CPU */
  28. DEFINE_PER_CPU(cpumask_t, cpu_core_map);
  29. EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  30. /* Per CPU bogomips and other parameters */
  31. DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  32. EXPORT_PER_CPU_SYMBOL(cpu_info);
  33. /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
  34. unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
  35. /* representing cpus for which sibling maps can be computed */
  36. static cpumask_t cpu_sibling_setup_map;
  37. #ifdef CONFIG_X86_32
  38. /* Set if we find a B stepping CPU */
  39. int __cpuinitdata smp_b_stepping;
  40. #endif
  41. static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
  42. {
  43. #ifdef CONFIG_X86_32
  44. /*
  45. * Mask B, Pentium, but not Pentium MMX
  46. */
  47. if (c->x86_vendor == X86_VENDOR_INTEL &&
  48. c->x86 == 5 &&
  49. c->x86_mask >= 1 && c->x86_mask <= 4 &&
  50. c->x86_model <= 3)
  51. /*
  52. * Remember we have B step Pentia with bugs
  53. */
  54. smp_b_stepping = 1;
  55. /*
  56. * Certain Athlons might work (for various values of 'work') in SMP
  57. * but they are not certified as MP capable.
  58. */
  59. if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
  60. if (num_possible_cpus() == 1)
  61. goto valid_k7;
  62. /* Athlon 660/661 is valid. */
  63. if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
  64. (c->x86_mask == 1)))
  65. goto valid_k7;
  66. /* Duron 670 is valid */
  67. if ((c->x86_model == 7) && (c->x86_mask == 0))
  68. goto valid_k7;
  69. /*
  70. * Athlon 662, Duron 671, and Athlon >model 7 have capability
  71. * bit. It's worth noting that the A5 stepping (662) of some
  72. * Athlon XP's have the MP bit set.
  73. * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
  74. * more.
  75. */
  76. if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
  77. ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
  78. (c->x86_model > 7))
  79. if (cpu_has_mp)
  80. goto valid_k7;
  81. /* If we get here, not a certified SMP capable AMD system. */
  82. add_taint(TAINT_UNSAFE_SMP);
  83. }
  84. valid_k7:
  85. ;
  86. #endif
  87. }
  88. /*
  89. * The bootstrap kernel entry code has set these up. Save them for
  90. * a given CPU
  91. */
  92. void __cpuinit smp_store_cpu_info(int id)
  93. {
  94. struct cpuinfo_x86 *c = &cpu_data(id);
  95. *c = boot_cpu_data;
  96. c->cpu_index = id;
  97. if (id != 0)
  98. identify_secondary_cpu(c);
  99. smp_apply_quirks(c);
  100. }
  101. void __cpuinit set_cpu_sibling_map(int cpu)
  102. {
  103. int i;
  104. struct cpuinfo_x86 *c = &cpu_data(cpu);
  105. cpu_set(cpu, cpu_sibling_setup_map);
  106. if (smp_num_siblings > 1) {
  107. for_each_cpu_mask(i, cpu_sibling_setup_map) {
  108. if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
  109. c->cpu_core_id == cpu_data(i).cpu_core_id) {
  110. cpu_set(i, per_cpu(cpu_sibling_map, cpu));
  111. cpu_set(cpu, per_cpu(cpu_sibling_map, i));
  112. cpu_set(i, per_cpu(cpu_core_map, cpu));
  113. cpu_set(cpu, per_cpu(cpu_core_map, i));
  114. cpu_set(i, c->llc_shared_map);
  115. cpu_set(cpu, cpu_data(i).llc_shared_map);
  116. }
  117. }
  118. } else {
  119. cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
  120. }
  121. cpu_set(cpu, c->llc_shared_map);
  122. if (current_cpu_data.x86_max_cores == 1) {
  123. per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
  124. c->booted_cores = 1;
  125. return;
  126. }
  127. for_each_cpu_mask(i, cpu_sibling_setup_map) {
  128. if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
  129. per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
  130. cpu_set(i, c->llc_shared_map);
  131. cpu_set(cpu, cpu_data(i).llc_shared_map);
  132. }
  133. if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
  134. cpu_set(i, per_cpu(cpu_core_map, cpu));
  135. cpu_set(cpu, per_cpu(cpu_core_map, i));
  136. /*
  137. * Does this new cpu bringup a new core?
  138. */
  139. if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
  140. /*
  141. * for each core in package, increment
  142. * the booted_cores for this new cpu
  143. */
  144. if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
  145. c->booted_cores++;
  146. /*
  147. * increment the core count for all
  148. * the other cpus in this package
  149. */
  150. if (i != cpu)
  151. cpu_data(i).booted_cores++;
  152. } else if (i != cpu && !c->booted_cores)
  153. c->booted_cores = cpu_data(i).booted_cores;
  154. }
  155. }
  156. }
  157. /* maps the cpu to the sched domain representing multi-core */
  158. cpumask_t cpu_coregroup_map(int cpu)
  159. {
  160. struct cpuinfo_x86 *c = &cpu_data(cpu);
  161. /*
  162. * For perf, we return last level cache shared map.
  163. * And for power savings, we return cpu_core_map
  164. */
  165. if (sched_mc_power_savings || sched_smt_power_savings)
  166. return per_cpu(cpu_core_map, cpu);
  167. else
  168. return c->llc_shared_map;
  169. }
  170. /*
  171. * Currently trivial. Write the real->protected mode
  172. * bootstrap into the page concerned. The caller
  173. * has made sure it's suitably aligned.
  174. */
  175. unsigned long __cpuinit setup_trampoline(void)
  176. {
  177. memcpy(trampoline_base, trampoline_data,
  178. trampoline_end - trampoline_data);
  179. return virt_to_phys(trampoline_base);
  180. }
  181. #ifdef CONFIG_X86_32
  182. /*
  183. * We are called very early to get the low memory for the
  184. * SMP bootup trampoline page.
  185. */
  186. void __init smp_alloc_memory(void)
  187. {
  188. trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
  189. /*
  190. * Has to be in very low memory so we can execute
  191. * real-mode AP code.
  192. */
  193. if (__pa(trampoline_base) >= 0x9F000)
  194. BUG();
  195. }
  196. #endif
  197. void impress_friends(void)
  198. {
  199. int cpu;
  200. unsigned long bogosum = 0;
  201. /*
  202. * Allow the user to impress friends.
  203. */
  204. Dprintk("Before bogomips.\n");
  205. for_each_possible_cpu(cpu)
  206. if (cpu_isset(cpu, cpu_callout_map))
  207. bogosum += cpu_data(cpu).loops_per_jiffy;
  208. printk(KERN_INFO
  209. "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  210. cpus_weight(cpu_present_map),
  211. bogosum/(500000/HZ),
  212. (bogosum/(5000/HZ))%100);
  213. Dprintk("Before bogocount - setting activated=1.\n");
  214. }
  215. #ifdef CONFIG_HOTPLUG_CPU
  216. void remove_siblinginfo(int cpu)
  217. {
  218. int sibling;
  219. struct cpuinfo_x86 *c = &cpu_data(cpu);
  220. for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
  221. cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
  222. /*/
  223. * last thread sibling in this cpu core going down
  224. */
  225. if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
  226. cpu_data(sibling).booted_cores--;
  227. }
  228. for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
  229. cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
  230. cpus_clear(per_cpu(cpu_sibling_map, cpu));
  231. cpus_clear(per_cpu(cpu_core_map, cpu));
  232. c->phys_proc_id = 0;
  233. c->cpu_core_id = 0;
  234. cpu_clear(cpu, cpu_sibling_setup_map);
  235. }
  236. int additional_cpus __initdata = -1;
  237. static __init int setup_additional_cpus(char *s)
  238. {
  239. return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
  240. }
  241. early_param("additional_cpus", setup_additional_cpus);
  242. /*
  243. * cpu_possible_map should be static, it cannot change as cpu's
  244. * are onlined, or offlined. The reason is per-cpu data-structures
  245. * are allocated by some modules at init time, and dont expect to
  246. * do this dynamically on cpu arrival/departure.
  247. * cpu_present_map on the other hand can change dynamically.
  248. * In case when cpu_hotplug is not compiled, then we resort to current
  249. * behaviour, which is cpu_possible == cpu_present.
  250. * - Ashok Raj
  251. *
  252. * Three ways to find out the number of additional hotplug CPUs:
  253. * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
  254. * - The user can overwrite it with additional_cpus=NUM
  255. * - Otherwise don't reserve additional CPUs.
  256. * We do this because additional CPUs waste a lot of memory.
  257. * -AK
  258. */
  259. __init void prefill_possible_map(void)
  260. {
  261. int i;
  262. int possible;
  263. if (additional_cpus == -1) {
  264. if (disabled_cpus > 0)
  265. additional_cpus = disabled_cpus;
  266. else
  267. additional_cpus = 0;
  268. }
  269. possible = num_processors + additional_cpus;
  270. if (possible > NR_CPUS)
  271. possible = NR_CPUS;
  272. printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
  273. possible, max_t(int, possible - num_processors, 0));
  274. for (i = 0; i < possible; i++)
  275. cpu_set(i, cpu_possible_map);
  276. }
  277. static void __ref remove_cpu_from_maps(int cpu)
  278. {
  279. cpu_clear(cpu, cpu_online_map);
  280. #ifdef CONFIG_X86_64
  281. cpu_clear(cpu, cpu_callout_map);
  282. cpu_clear(cpu, cpu_callin_map);
  283. /* was set by cpu_init() */
  284. clear_bit(cpu, (unsigned long *)&cpu_initialized);
  285. clear_node_cpumask(cpu);
  286. #endif
  287. }
  288. int __cpu_disable(void)
  289. {
  290. int cpu = smp_processor_id();
  291. /*
  292. * Perhaps use cpufreq to drop frequency, but that could go
  293. * into generic code.
  294. *
  295. * We won't take down the boot processor on i386 due to some
  296. * interrupts only being able to be serviced by the BSP.
  297. * Especially so if we're not using an IOAPIC -zwane
  298. */
  299. if (cpu == 0)
  300. return -EBUSY;
  301. if (nmi_watchdog == NMI_LOCAL_APIC)
  302. stop_apic_nmi_watchdog(NULL);
  303. clear_local_APIC();
  304. /*
  305. * HACK:
  306. * Allow any queued timer interrupts to get serviced
  307. * This is only a temporary solution until we cleanup
  308. * fixup_irqs as we do for IA64.
  309. */
  310. local_irq_enable();
  311. mdelay(1);
  312. local_irq_disable();
  313. remove_siblinginfo(cpu);
  314. /* It's now safe to remove this processor from the online map */
  315. remove_cpu_from_maps(cpu);
  316. fixup_irqs(cpu_online_map);
  317. return 0;
  318. }
  319. void __cpu_die(unsigned int cpu)
  320. {
  321. /* We don't do anything here: idle task is faking death itself. */
  322. unsigned int i;
  323. for (i = 0; i < 10; i++) {
  324. /* They ack this in play_dead by setting CPU_DEAD */
  325. if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
  326. printk(KERN_INFO "CPU %d is now offline\n", cpu);
  327. if (1 == num_online_cpus())
  328. alternatives_smp_switch(0);
  329. return;
  330. }
  331. msleep(100);
  332. }
  333. printk(KERN_ERR "CPU %u didn't die...\n", cpu);
  334. }
  335. #else /* ... !CONFIG_HOTPLUG_CPU */
  336. int __cpu_disable(void)
  337. {
  338. return -ENOSYS;
  339. }
  340. void __cpu_die(unsigned int cpu)
  341. {
  342. /* We said "no" in __cpu_disable */
  343. BUG();
  344. }
  345. #endif
  346. /*
  347. * If the BIOS enumerates physical processors before logical,
  348. * maxcpus=N at enumeration-time can be used to disable HT.
  349. */
  350. static int __init parse_maxcpus(char *arg)
  351. {
  352. extern unsigned int maxcpus;
  353. maxcpus = simple_strtoul(arg, NULL, 0);
  354. return 0;
  355. }
  356. early_param("maxcpus", parse_maxcpus);