smpboot.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440
  1. /*
  2. * x86 SMP booting functions
  3. *
  4. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  5. * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
  6. * Copyright 2001 Andi Kleen, SuSE Labs.
  7. *
  8. * Much of the core SMP work is based on previous work by Thomas Radke, to
  9. * whom a great many thanks are extended.
  10. *
  11. * Thanks to Intel for making available several different Pentium,
  12. * Pentium Pro and Pentium-II/Xeon MP machines.
  13. * Original development of Linux SMP code supported by Caldera.
  14. *
  15. * This code is released under the GNU General Public License version 2 or
  16. * later.
  17. *
  18. * Fixes
  19. * Felix Koop : NR_CPUS used properly
  20. * Jose Renau : Handle single CPU case.
  21. * Alan Cox : By repeated request 8) - Total BogoMIPS report.
  22. * Greg Wright : Fix for kernel stacks panic.
  23. * Erich Boleyn : MP v1.4 and additional changes.
  24. * Matthias Sattler : Changes for 2.1 kernel map.
  25. * Michel Lespinasse : Changes for 2.1 kernel map.
  26. * Michael Chastain : Change trampoline.S to gnu as.
  27. * Alan Cox : Dumb bug: 'B' step PPro's are fine
  28. * Ingo Molnar : Added APIC timers, based on code
  29. * from Jose Renau
  30. * Ingo Molnar : various cleanups and rewrites
  31. * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
  32. * Maciej W. Rozycki : Bits for genuine 82489DX APICs
  33. * Andi Kleen : Changed for SMP boot into long mode.
  34. * Martin J. Bligh : Added support for multi-quad systems
  35. * Dave Jones : Report invalid combinations of Athlon CPUs.
  36. * Rusty Russell : Hacked into shape for new "hotplug" boot process.
  37. * Andi Kleen : Converted to new state machine.
  38. * Ashok Raj : CPU hotplug support
  39. * Glauber Costa : i386 and x86_64 integration
  40. */
  41. #include <linux/init.h>
  42. #include <linux/smp.h>
  43. #include <linux/module.h>
  44. #include <linux/sched.h>
  45. #include <linux/percpu.h>
  46. #include <linux/bootmem.h>
  47. #include <linux/err.h>
  48. #include <linux/nmi.h>
  49. #include <asm/acpi.h>
  50. #include <asm/desc.h>
  51. #include <asm/nmi.h>
  52. #include <asm/irq.h>
  53. #include <asm/idle.h>
  54. #include <asm/smp.h>
  55. #include <asm/trampoline.h>
  56. #include <asm/cpu.h>
  57. #include <asm/numa.h>
  58. #include <asm/pgtable.h>
  59. #include <asm/tlbflush.h>
  60. #include <asm/mtrr.h>
  61. #include <asm/vmi.h>
  62. #include <asm/genapic.h>
  63. #include <linux/mc146818rtc.h>
  64. #include <mach_apic.h>
  65. #include <mach_wakecpu.h>
  66. #include <smpboot_hooks.h>
  67. #ifdef CONFIG_X86_32
  68. u8 apicid_2_node[MAX_APICID];
  69. static int low_mappings;
  70. #endif
  71. /* State of each CPU */
  72. DEFINE_PER_CPU(int, cpu_state) = { 0 };
  73. /* Store all idle threads, this can be reused instead of creating
  74. * a new thread. Also avoids complicated thread destroy functionality
  75. * for idle threads.
  76. */
  77. #ifdef CONFIG_HOTPLUG_CPU
  78. /*
  79. * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
  80. * removed after init for !CONFIG_HOTPLUG_CPU.
  81. */
  82. static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
  83. #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
  84. #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
  85. #else
  86. static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
  87. #define get_idle_for_cpu(x) (idle_thread_array[(x)])
  88. #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
  89. #endif
  90. /* Number of siblings per CPU package */
  91. int smp_num_siblings = 1;
  92. EXPORT_SYMBOL(smp_num_siblings);
  93. /* Last level cache ID of each logical CPU */
  94. DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
  95. /* bitmap of online cpus */
  96. cpumask_t cpu_online_map __read_mostly;
  97. EXPORT_SYMBOL(cpu_online_map);
  98. cpumask_t cpu_callin_map;
  99. cpumask_t cpu_callout_map;
  100. cpumask_t cpu_possible_map;
  101. EXPORT_SYMBOL(cpu_possible_map);
  102. /* representing HT siblings of each logical CPU */
  103. DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
  104. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  105. /* representing HT and core siblings of each logical CPU */
  106. DEFINE_PER_CPU(cpumask_t, cpu_core_map);
  107. EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  108. /* Per CPU bogomips and other parameters */
  109. DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  110. EXPORT_PER_CPU_SYMBOL(cpu_info);
  111. static atomic_t init_deasserted;
  112. /* representing cpus for which sibling maps can be computed */
  113. static cpumask_t cpu_sibling_setup_map;
  114. /* Set if we find a B stepping CPU */
  115. static int __cpuinitdata smp_b_stepping;
  116. #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
  117. /* which logical CPUs are on which nodes */
  118. cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
  119. { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
  120. EXPORT_SYMBOL(node_to_cpumask_map);
  121. /* which node each logical CPU is on */
  122. int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
  123. EXPORT_SYMBOL(cpu_to_node_map);
  124. /* set up a mapping between cpu and node. */
  125. static void map_cpu_to_node(int cpu, int node)
  126. {
  127. printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
  128. cpu_set(cpu, node_to_cpumask_map[node]);
  129. cpu_to_node_map[cpu] = node;
  130. }
  131. /* undo a mapping between cpu and node. */
  132. static void unmap_cpu_to_node(int cpu)
  133. {
  134. int node;
  135. printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
  136. for (node = 0; node < MAX_NUMNODES; node++)
  137. cpu_clear(cpu, node_to_cpumask_map[node]);
  138. cpu_to_node_map[cpu] = 0;
  139. }
  140. #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
  141. #define map_cpu_to_node(cpu, node) ({})
  142. #define unmap_cpu_to_node(cpu) ({})
  143. #endif
  144. #ifdef CONFIG_X86_32
  145. static int boot_cpu_logical_apicid;
  146. u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
  147. { [0 ... NR_CPUS-1] = BAD_APICID };
  148. static void map_cpu_to_logical_apicid(void)
  149. {
  150. int cpu = smp_processor_id();
  151. int apicid = logical_smp_processor_id();
  152. int node = apicid_to_node(apicid);
  153. if (!node_online(node))
  154. node = first_online_node;
  155. cpu_2_logical_apicid[cpu] = apicid;
  156. map_cpu_to_node(cpu, node);
  157. }
  158. void numa_remove_cpu(int cpu)
  159. {
  160. cpu_2_logical_apicid[cpu] = BAD_APICID;
  161. unmap_cpu_to_node(cpu);
  162. }
  163. #else
  164. #define map_cpu_to_logical_apicid() do {} while (0)
  165. #endif
  166. /*
  167. * Report back to the Boot Processor.
  168. * Running on AP.
  169. */
  170. static void __cpuinit smp_callin(void)
  171. {
  172. int cpuid, phys_id;
  173. unsigned long timeout;
  174. /*
  175. * If waken up by an INIT in an 82489DX configuration
  176. * we may get here before an INIT-deassert IPI reaches
  177. * our local APIC. We have to wait for the IPI or we'll
  178. * lock up on an APIC access.
  179. */
  180. wait_for_init_deassert(&init_deasserted);
  181. /*
  182. * (This works even if the APIC is not enabled.)
  183. */
  184. phys_id = read_apic_id();
  185. cpuid = smp_processor_id();
  186. if (cpu_isset(cpuid, cpu_callin_map)) {
  187. panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
  188. phys_id, cpuid);
  189. }
  190. pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
  191. /*
  192. * STARTUP IPIs are fragile beasts as they might sometimes
  193. * trigger some glue motherboard logic. Complete APIC bus
  194. * silence for 1 second, this overestimates the time the
  195. * boot CPU is spending to send the up to 2 STARTUP IPIs
  196. * by a factor of two. This should be enough.
  197. */
  198. /*
  199. * Waiting 2s total for startup (udelay is not yet working)
  200. */
  201. timeout = jiffies + 2*HZ;
  202. while (time_before(jiffies, timeout)) {
  203. /*
  204. * Has the boot CPU finished it's STARTUP sequence?
  205. */
  206. if (cpu_isset(cpuid, cpu_callout_map))
  207. break;
  208. cpu_relax();
  209. }
  210. if (!time_before(jiffies, timeout)) {
  211. panic("%s: CPU%d started up but did not get a callout!\n",
  212. __func__, cpuid);
  213. }
  214. /*
  215. * the boot CPU has finished the init stage and is spinning
  216. * on callin_map until we finish. We are free to set up this
  217. * CPU, first the APIC. (this is probably redundant on most
  218. * boards)
  219. */
  220. pr_debug("CALLIN, before setup_local_APIC().\n");
  221. smp_callin_clear_local_apic();
  222. setup_local_APIC();
  223. end_local_APIC_setup();
  224. map_cpu_to_logical_apicid();
  225. notify_cpu_starting(cpuid);
  226. /*
  227. * Get our bogomips.
  228. *
  229. * Need to enable IRQs because it can take longer and then
  230. * the NMI watchdog might kill us.
  231. */
  232. local_irq_enable();
  233. calibrate_delay();
  234. local_irq_disable();
  235. pr_debug("Stack at about %p\n", &cpuid);
  236. /*
  237. * Save our processor parameters
  238. */
  239. smp_store_cpu_info(cpuid);
  240. /*
  241. * Allow the master to continue.
  242. */
  243. cpu_set(cpuid, cpu_callin_map);
  244. }
  245. static int __cpuinitdata unsafe_smp;
  246. /*
  247. * Activate a secondary processor.
  248. */
  249. notrace static void __cpuinit start_secondary(void *unused)
  250. {
  251. /*
  252. * Don't put *anything* before cpu_init(), SMP booting is too
  253. * fragile that we want to limit the things done here to the
  254. * most necessary things.
  255. */
  256. vmi_bringup();
  257. cpu_init();
  258. preempt_disable();
  259. smp_callin();
  260. /* otherwise gcc will move up smp_processor_id before the cpu_init */
  261. barrier();
  262. /*
  263. * Check TSC synchronization with the BP:
  264. */
  265. check_tsc_sync_target();
  266. if (nmi_watchdog == NMI_IO_APIC) {
  267. disable_8259A_irq(0);
  268. enable_NMI_through_LVT0();
  269. enable_8259A_irq(0);
  270. }
  271. #ifdef CONFIG_X86_32
  272. while (low_mappings)
  273. cpu_relax();
  274. __flush_tlb_all();
  275. #endif
  276. /* This must be done before setting cpu_online_map */
  277. set_cpu_sibling_map(raw_smp_processor_id());
  278. wmb();
  279. /*
  280. * We need to hold call_lock, so there is no inconsistency
  281. * between the time smp_call_function() determines number of
  282. * IPI recipients, and the time when the determination is made
  283. * for which cpus receive the IPI. Holding this
  284. * lock helps us to not include this cpu in a currently in progress
  285. * smp_call_function().
  286. *
  287. * We need to hold vector_lock so there the set of online cpus
  288. * does not change while we are assigning vectors to cpus. Holding
  289. * this lock ensures we don't half assign or remove an irq from a cpu.
  290. */
  291. ipi_call_lock();
  292. lock_vector_lock();
  293. __setup_vector_irq(smp_processor_id());
  294. cpu_set(smp_processor_id(), cpu_online_map);
  295. unlock_vector_lock();
  296. ipi_call_unlock();
  297. per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
  298. /* enable local interrupts */
  299. local_irq_enable();
  300. setup_secondary_clock();
  301. wmb();
  302. cpu_idle();
  303. }
  304. static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
  305. {
  306. /*
  307. * Mask B, Pentium, but not Pentium MMX
  308. */
  309. if (c->x86_vendor == X86_VENDOR_INTEL &&
  310. c->x86 == 5 &&
  311. c->x86_mask >= 1 && c->x86_mask <= 4 &&
  312. c->x86_model <= 3)
  313. /*
  314. * Remember we have B step Pentia with bugs
  315. */
  316. smp_b_stepping = 1;
  317. /*
  318. * Certain Athlons might work (for various values of 'work') in SMP
  319. * but they are not certified as MP capable.
  320. */
  321. if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
  322. if (num_possible_cpus() == 1)
  323. goto valid_k7;
  324. /* Athlon 660/661 is valid. */
  325. if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
  326. (c->x86_mask == 1)))
  327. goto valid_k7;
  328. /* Duron 670 is valid */
  329. if ((c->x86_model == 7) && (c->x86_mask == 0))
  330. goto valid_k7;
  331. /*
  332. * Athlon 662, Duron 671, and Athlon >model 7 have capability
  333. * bit. It's worth noting that the A5 stepping (662) of some
  334. * Athlon XP's have the MP bit set.
  335. * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
  336. * more.
  337. */
  338. if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
  339. ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
  340. (c->x86_model > 7))
  341. if (cpu_has_mp)
  342. goto valid_k7;
  343. /* If we get here, not a certified SMP capable AMD system. */
  344. unsafe_smp = 1;
  345. }
  346. valid_k7:
  347. ;
  348. }
  349. static void __cpuinit smp_checks(void)
  350. {
  351. if (smp_b_stepping)
  352. printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
  353. "with B stepping processors.\n");
  354. /*
  355. * Don't taint if we are running SMP kernel on a single non-MP
  356. * approved Athlon
  357. */
  358. if (unsafe_smp && num_online_cpus() > 1) {
  359. printk(KERN_INFO "WARNING: This combination of AMD"
  360. "processors is not suitable for SMP.\n");
  361. add_taint(TAINT_UNSAFE_SMP);
  362. }
  363. }
  364. /*
  365. * The bootstrap kernel entry code has set these up. Save them for
  366. * a given CPU
  367. */
  368. void __cpuinit smp_store_cpu_info(int id)
  369. {
  370. struct cpuinfo_x86 *c = &cpu_data(id);
  371. *c = boot_cpu_data;
  372. c->cpu_index = id;
  373. if (id != 0)
  374. identify_secondary_cpu(c);
  375. smp_apply_quirks(c);
  376. }
  377. void __cpuinit set_cpu_sibling_map(int cpu)
  378. {
  379. int i;
  380. struct cpuinfo_x86 *c = &cpu_data(cpu);
  381. cpu_set(cpu, cpu_sibling_setup_map);
  382. if (smp_num_siblings > 1) {
  383. for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
  384. if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
  385. c->cpu_core_id == cpu_data(i).cpu_core_id) {
  386. cpu_set(i, per_cpu(cpu_sibling_map, cpu));
  387. cpu_set(cpu, per_cpu(cpu_sibling_map, i));
  388. cpu_set(i, per_cpu(cpu_core_map, cpu));
  389. cpu_set(cpu, per_cpu(cpu_core_map, i));
  390. cpu_set(i, c->llc_shared_map);
  391. cpu_set(cpu, cpu_data(i).llc_shared_map);
  392. }
  393. }
  394. } else {
  395. cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
  396. }
  397. cpu_set(cpu, c->llc_shared_map);
  398. if (current_cpu_data.x86_max_cores == 1) {
  399. per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
  400. c->booted_cores = 1;
  401. return;
  402. }
  403. for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
  404. if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
  405. per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
  406. cpu_set(i, c->llc_shared_map);
  407. cpu_set(cpu, cpu_data(i).llc_shared_map);
  408. }
  409. if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
  410. cpu_set(i, per_cpu(cpu_core_map, cpu));
  411. cpu_set(cpu, per_cpu(cpu_core_map, i));
  412. /*
  413. * Does this new cpu bringup a new core?
  414. */
  415. if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
  416. /*
  417. * for each core in package, increment
  418. * the booted_cores for this new cpu
  419. */
  420. if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
  421. c->booted_cores++;
  422. /*
  423. * increment the core count for all
  424. * the other cpus in this package
  425. */
  426. if (i != cpu)
  427. cpu_data(i).booted_cores++;
  428. } else if (i != cpu && !c->booted_cores)
  429. c->booted_cores = cpu_data(i).booted_cores;
  430. }
  431. }
  432. }
  433. /* maps the cpu to the sched domain representing multi-core */
  434. cpumask_t cpu_coregroup_map(int cpu)
  435. {
  436. struct cpuinfo_x86 *c = &cpu_data(cpu);
  437. /*
  438. * For perf, we return last level cache shared map.
  439. * And for power savings, we return cpu_core_map
  440. */
  441. if (sched_mc_power_savings || sched_smt_power_savings)
  442. return per_cpu(cpu_core_map, cpu);
  443. else
  444. return c->llc_shared_map;
  445. }
  446. static void impress_friends(void)
  447. {
  448. int cpu;
  449. unsigned long bogosum = 0;
  450. /*
  451. * Allow the user to impress friends.
  452. */
  453. pr_debug("Before bogomips.\n");
  454. for_each_possible_cpu(cpu)
  455. if (cpu_isset(cpu, cpu_callout_map))
  456. bogosum += cpu_data(cpu).loops_per_jiffy;
  457. printk(KERN_INFO
  458. "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  459. num_online_cpus(),
  460. bogosum/(500000/HZ),
  461. (bogosum/(5000/HZ))%100);
  462. pr_debug("Before bogocount - setting activated=1.\n");
  463. }
  464. static inline void __inquire_remote_apic(int apicid)
  465. {
  466. unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
  467. char *names[] = { "ID", "VERSION", "SPIV" };
  468. int timeout;
  469. u32 status;
  470. printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
  471. for (i = 0; i < ARRAY_SIZE(regs); i++) {
  472. printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
  473. /*
  474. * Wait for idle.
  475. */
  476. status = safe_apic_wait_icr_idle();
  477. if (status)
  478. printk(KERN_CONT
  479. "a previous APIC delivery may have failed\n");
  480. apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
  481. timeout = 0;
  482. do {
  483. udelay(100);
  484. status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
  485. } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
  486. switch (status) {
  487. case APIC_ICR_RR_VALID:
  488. status = apic_read(APIC_RRR);
  489. printk(KERN_CONT "%08x\n", status);
  490. break;
  491. default:
  492. printk(KERN_CONT "failed\n");
  493. }
  494. }
  495. }
  496. #ifdef WAKE_SECONDARY_VIA_NMI
  497. /*
  498. * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
  499. * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
  500. * won't ... remember to clear down the APIC, etc later.
  501. */
  502. static int __devinit
  503. wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
  504. {
  505. unsigned long send_status, accept_status = 0;
  506. int maxlvt;
  507. /* Target chip */
  508. /* Boot on the stack */
  509. /* Kick the second */
  510. apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
  511. pr_debug("Waiting for send to finish...\n");
  512. send_status = safe_apic_wait_icr_idle();
  513. /*
  514. * Give the other CPU some time to accept the IPI.
  515. */
  516. udelay(200);
  517. if (APIC_INTEGRATED(apic_version[phys_apicid])) {
  518. maxlvt = lapic_get_maxlvt();
  519. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  520. apic_write(APIC_ESR, 0);
  521. accept_status = (apic_read(APIC_ESR) & 0xEF);
  522. }
  523. pr_debug("NMI sent.\n");
  524. if (send_status)
  525. printk(KERN_ERR "APIC never delivered???\n");
  526. if (accept_status)
  527. printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
  528. return (send_status | accept_status);
  529. }
  530. #endif /* WAKE_SECONDARY_VIA_NMI */
  531. #ifdef WAKE_SECONDARY_VIA_INIT
  532. static int __devinit
  533. wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
  534. {
  535. unsigned long send_status, accept_status = 0;
  536. int maxlvt, num_starts, j;
  537. if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
  538. send_status = uv_wakeup_secondary(phys_apicid, start_eip);
  539. atomic_set(&init_deasserted, 1);
  540. return send_status;
  541. }
  542. maxlvt = lapic_get_maxlvt();
  543. /*
  544. * Be paranoid about clearing APIC errors.
  545. */
  546. if (APIC_INTEGRATED(apic_version[phys_apicid])) {
  547. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  548. apic_write(APIC_ESR, 0);
  549. apic_read(APIC_ESR);
  550. }
  551. pr_debug("Asserting INIT.\n");
  552. /*
  553. * Turn INIT on target chip
  554. */
  555. /*
  556. * Send IPI
  557. */
  558. apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
  559. phys_apicid);
  560. pr_debug("Waiting for send to finish...\n");
  561. send_status = safe_apic_wait_icr_idle();
  562. mdelay(10);
  563. pr_debug("Deasserting INIT.\n");
  564. /* Target chip */
  565. /* Send IPI */
  566. apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
  567. pr_debug("Waiting for send to finish...\n");
  568. send_status = safe_apic_wait_icr_idle();
  569. mb();
  570. atomic_set(&init_deasserted, 1);
  571. /*
  572. * Should we send STARTUP IPIs ?
  573. *
  574. * Determine this based on the APIC version.
  575. * If we don't have an integrated APIC, don't send the STARTUP IPIs.
  576. */
  577. if (APIC_INTEGRATED(apic_version[phys_apicid]))
  578. num_starts = 2;
  579. else
  580. num_starts = 0;
  581. /*
  582. * Paravirt / VMI wants a startup IPI hook here to set up the
  583. * target processor state.
  584. */
  585. startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
  586. (unsigned long)stack_start.sp);
  587. /*
  588. * Run STARTUP IPI loop.
  589. */
  590. pr_debug("#startup loops: %d.\n", num_starts);
  591. for (j = 1; j <= num_starts; j++) {
  592. pr_debug("Sending STARTUP #%d.\n", j);
  593. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  594. apic_write(APIC_ESR, 0);
  595. apic_read(APIC_ESR);
  596. pr_debug("After apic_write.\n");
  597. /*
  598. * STARTUP IPI
  599. */
  600. /* Target chip */
  601. /* Boot on the stack */
  602. /* Kick the second */
  603. apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
  604. phys_apicid);
  605. /*
  606. * Give the other CPU some time to accept the IPI.
  607. */
  608. udelay(300);
  609. pr_debug("Startup point 1.\n");
  610. pr_debug("Waiting for send to finish...\n");
  611. send_status = safe_apic_wait_icr_idle();
  612. /*
  613. * Give the other CPU some time to accept the IPI.
  614. */
  615. udelay(200);
  616. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  617. apic_write(APIC_ESR, 0);
  618. accept_status = (apic_read(APIC_ESR) & 0xEF);
  619. if (send_status || accept_status)
  620. break;
  621. }
  622. pr_debug("After Startup.\n");
  623. if (send_status)
  624. printk(KERN_ERR "APIC never delivered???\n");
  625. if (accept_status)
  626. printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
  627. return (send_status | accept_status);
  628. }
  629. #endif /* WAKE_SECONDARY_VIA_INIT */
  630. struct create_idle {
  631. struct work_struct work;
  632. struct task_struct *idle;
  633. struct completion done;
  634. int cpu;
  635. };
  636. static void __cpuinit do_fork_idle(struct work_struct *work)
  637. {
  638. struct create_idle *c_idle =
  639. container_of(work, struct create_idle, work);
  640. c_idle->idle = fork_idle(c_idle->cpu);
  641. complete(&c_idle->done);
  642. }
  643. #ifdef CONFIG_X86_64
  644. /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
  645. static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
  646. {
  647. if (!after_bootmem)
  648. free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
  649. }
  650. /*
  651. * Allocate node local memory for the AP pda.
  652. *
  653. * Must be called after the _cpu_pda pointer table is initialized.
  654. */
  655. int __cpuinit get_local_pda(int cpu)
  656. {
  657. struct x8664_pda *oldpda, *newpda;
  658. unsigned long size = sizeof(struct x8664_pda);
  659. int node = cpu_to_node(cpu);
  660. if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
  661. return 0;
  662. oldpda = cpu_pda(cpu);
  663. newpda = kmalloc_node(size, GFP_ATOMIC, node);
  664. if (!newpda) {
  665. printk(KERN_ERR "Could not allocate node local PDA "
  666. "for CPU %d on node %d\n", cpu, node);
  667. if (oldpda)
  668. return 0; /* have a usable pda */
  669. else
  670. return -1;
  671. }
  672. if (oldpda) {
  673. memcpy(newpda, oldpda, size);
  674. free_bootmem_pda(oldpda);
  675. }
  676. newpda->in_bootmem = 0;
  677. cpu_pda(cpu) = newpda;
  678. return 0;
  679. }
  680. #endif /* CONFIG_X86_64 */
  681. static int __cpuinit do_boot_cpu(int apicid, int cpu)
  682. /*
  683. * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  684. * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
  685. * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
  686. */
  687. {
  688. unsigned long boot_error = 0;
  689. int timeout;
  690. unsigned long start_ip;
  691. unsigned short nmi_high = 0, nmi_low = 0;
  692. struct create_idle c_idle = {
  693. .cpu = cpu,
  694. .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
  695. };
  696. INIT_WORK(&c_idle.work, do_fork_idle);
  697. #ifdef CONFIG_X86_64
  698. /* Allocate node local memory for AP pdas */
  699. if (cpu > 0) {
  700. boot_error = get_local_pda(cpu);
  701. if (boot_error)
  702. goto restore_state;
  703. /* if can't get pda memory, can't start cpu */
  704. }
  705. #endif
  706. alternatives_smp_switch(1);
  707. c_idle.idle = get_idle_for_cpu(cpu);
  708. /*
  709. * We can't use kernel_thread since we must avoid to
  710. * reschedule the child.
  711. */
  712. if (c_idle.idle) {
  713. c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
  714. (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
  715. init_idle(c_idle.idle, cpu);
  716. goto do_rest;
  717. }
  718. if (!keventd_up() || current_is_keventd())
  719. c_idle.work.func(&c_idle.work);
  720. else {
  721. schedule_work(&c_idle.work);
  722. wait_for_completion(&c_idle.done);
  723. }
  724. if (IS_ERR(c_idle.idle)) {
  725. printk("failed fork for CPU %d\n", cpu);
  726. return PTR_ERR(c_idle.idle);
  727. }
  728. set_idle_for_cpu(cpu, c_idle.idle);
  729. do_rest:
  730. #ifdef CONFIG_X86_32
  731. per_cpu(current_task, cpu) = c_idle.idle;
  732. init_gdt(cpu);
  733. /* Stack for startup_32 can be just as for start_secondary onwards */
  734. irq_ctx_init(cpu);
  735. #else
  736. cpu_pda(cpu)->pcurrent = c_idle.idle;
  737. clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
  738. #endif
  739. early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
  740. initial_code = (unsigned long)start_secondary;
  741. stack_start.sp = (void *) c_idle.idle->thread.sp;
  742. /* start_ip had better be page-aligned! */
  743. start_ip = setup_trampoline();
  744. /* So we see what's up */
  745. printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
  746. cpu, apicid, start_ip);
  747. /*
  748. * This grunge runs the startup process for
  749. * the targeted processor.
  750. */
  751. atomic_set(&init_deasserted, 0);
  752. if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
  753. pr_debug("Setting warm reset code and vector.\n");
  754. store_NMI_vector(&nmi_high, &nmi_low);
  755. smpboot_setup_warm_reset_vector(start_ip);
  756. /*
  757. * Be paranoid about clearing APIC errors.
  758. */
  759. if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
  760. apic_write(APIC_ESR, 0);
  761. apic_read(APIC_ESR);
  762. }
  763. }
  764. /*
  765. * Starting actual IPI sequence...
  766. */
  767. boot_error = wakeup_secondary_cpu(apicid, start_ip);
  768. if (!boot_error) {
  769. /*
  770. * allow APs to start initializing.
  771. */
  772. pr_debug("Before Callout %d.\n", cpu);
  773. cpu_set(cpu, cpu_callout_map);
  774. pr_debug("After Callout %d.\n", cpu);
  775. /*
  776. * Wait 5s total for a response
  777. */
  778. for (timeout = 0; timeout < 50000; timeout++) {
  779. if (cpu_isset(cpu, cpu_callin_map))
  780. break; /* It has booted */
  781. udelay(100);
  782. }
  783. if (cpu_isset(cpu, cpu_callin_map)) {
  784. /* number CPUs logically, starting from 1 (BSP is 0) */
  785. pr_debug("OK.\n");
  786. printk(KERN_INFO "CPU%d: ", cpu);
  787. print_cpu_info(&cpu_data(cpu));
  788. pr_debug("CPU has booted.\n");
  789. } else {
  790. boot_error = 1;
  791. if (*((volatile unsigned char *)trampoline_base)
  792. == 0xA5)
  793. /* trampoline started but...? */
  794. printk(KERN_ERR "Stuck ??\n");
  795. else
  796. /* trampoline code not run */
  797. printk(KERN_ERR "Not responding.\n");
  798. if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
  799. inquire_remote_apic(apicid);
  800. }
  801. }
  802. #ifdef CONFIG_X86_64
  803. restore_state:
  804. #endif
  805. if (boot_error) {
  806. /* Try to put things back the way they were before ... */
  807. numa_remove_cpu(cpu); /* was set by numa_add_cpu */
  808. cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
  809. cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
  810. cpu_clear(cpu, cpu_present_map);
  811. per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
  812. }
  813. /* mark "stuck" area as not stuck */
  814. *((volatile unsigned long *)trampoline_base) = 0;
  815. /*
  816. * Cleanup possible dangling ends...
  817. */
  818. smpboot_restore_warm_reset_vector();
  819. return boot_error;
  820. }
  821. int __cpuinit native_cpu_up(unsigned int cpu)
  822. {
  823. int apicid = cpu_present_to_apicid(cpu);
  824. unsigned long flags;
  825. int err;
  826. WARN_ON(irqs_disabled());
  827. pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
  828. if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
  829. !physid_isset(apicid, phys_cpu_present_map)) {
  830. printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
  831. return -EINVAL;
  832. }
  833. /*
  834. * Already booted CPU?
  835. */
  836. if (cpu_isset(cpu, cpu_callin_map)) {
  837. pr_debug("do_boot_cpu %d Already started\n", cpu);
  838. return -ENOSYS;
  839. }
  840. /*
  841. * Save current MTRR state in case it was changed since early boot
  842. * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
  843. */
  844. mtrr_save_state();
  845. per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  846. #ifdef CONFIG_X86_32
  847. /* init low mem mapping */
  848. clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  849. min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
  850. flush_tlb_all();
  851. low_mappings = 1;
  852. err = do_boot_cpu(apicid, cpu);
  853. zap_low_mappings();
  854. low_mappings = 0;
  855. #else
  856. err = do_boot_cpu(apicid, cpu);
  857. #endif
  858. if (err) {
  859. pr_debug("do_boot_cpu failed %d\n", err);
  860. return -EIO;
  861. }
  862. /*
  863. * Check TSC synchronization with the AP (keep irqs disabled
  864. * while doing so):
  865. */
  866. local_irq_save(flags);
  867. check_tsc_sync_source(cpu);
  868. local_irq_restore(flags);
  869. while (!cpu_online(cpu)) {
  870. cpu_relax();
  871. touch_nmi_watchdog();
  872. }
  873. return 0;
  874. }
  875. /*
  876. * Fall back to non SMP mode after errors.
  877. *
  878. * RED-PEN audit/test this more. I bet there is more state messed up here.
  879. */
  880. static __init void disable_smp(void)
  881. {
  882. cpu_present_map = cpumask_of_cpu(0);
  883. cpu_possible_map = cpumask_of_cpu(0);
  884. smpboot_clear_io_apic_irqs();
  885. if (smp_found_config)
  886. physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
  887. else
  888. physid_set_mask_of_physid(0, &phys_cpu_present_map);
  889. map_cpu_to_logical_apicid();
  890. cpu_set(0, per_cpu(cpu_sibling_map, 0));
  891. cpu_set(0, per_cpu(cpu_core_map, 0));
  892. }
  893. /*
  894. * Various sanity checks.
  895. */
  896. static int __init smp_sanity_check(unsigned max_cpus)
  897. {
  898. preempt_disable();
  899. #if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
  900. if (def_to_bigsmp && nr_cpu_ids > 8) {
  901. unsigned int cpu;
  902. unsigned nr;
  903. printk(KERN_WARNING
  904. "More than 8 CPUs detected - skipping them.\n"
  905. "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
  906. nr = 0;
  907. for_each_present_cpu(cpu) {
  908. if (nr >= 8)
  909. cpu_clear(cpu, cpu_present_map);
  910. nr++;
  911. }
  912. nr = 0;
  913. for_each_possible_cpu(cpu) {
  914. if (nr >= 8)
  915. cpu_clear(cpu, cpu_possible_map);
  916. nr++;
  917. }
  918. nr_cpu_ids = 8;
  919. }
  920. #endif
  921. if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
  922. printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
  923. "by the BIOS.\n", hard_smp_processor_id());
  924. physid_set(hard_smp_processor_id(), phys_cpu_present_map);
  925. }
  926. /*
  927. * If we couldn't find an SMP configuration at boot time,
  928. * get out of here now!
  929. */
  930. if (!smp_found_config && !acpi_lapic) {
  931. preempt_enable();
  932. printk(KERN_NOTICE "SMP motherboard not detected.\n");
  933. disable_smp();
  934. if (APIC_init_uniprocessor())
  935. printk(KERN_NOTICE "Local APIC not detected."
  936. " Using dummy APIC emulation.\n");
  937. return -1;
  938. }
  939. /*
  940. * Should not be necessary because the MP table should list the boot
  941. * CPU too, but we do it for the sake of robustness anyway.
  942. */
  943. if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
  944. printk(KERN_NOTICE
  945. "weird, boot CPU (#%d) not listed by the BIOS.\n",
  946. boot_cpu_physical_apicid);
  947. physid_set(hard_smp_processor_id(), phys_cpu_present_map);
  948. }
  949. preempt_enable();
  950. /*
  951. * If we couldn't find a local APIC, then get out of here now!
  952. */
  953. if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
  954. !cpu_has_apic) {
  955. printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
  956. boot_cpu_physical_apicid);
  957. printk(KERN_ERR "... forcing use of dummy APIC emulation."
  958. "(tell your hw vendor)\n");
  959. smpboot_clear_io_apic();
  960. return -1;
  961. }
  962. verify_local_APIC();
  963. /*
  964. * If SMP should be disabled, then really disable it!
  965. */
  966. if (!max_cpus) {
  967. printk(KERN_INFO "SMP mode deactivated.\n");
  968. smpboot_clear_io_apic();
  969. localise_nmi_watchdog();
  970. connect_bsp_APIC();
  971. setup_local_APIC();
  972. end_local_APIC_setup();
  973. return -1;
  974. }
  975. return 0;
  976. }
  977. static void __init smp_cpu_index_default(void)
  978. {
  979. int i;
  980. struct cpuinfo_x86 *c;
  981. for_each_possible_cpu(i) {
  982. c = &cpu_data(i);
  983. /* mark all to hotplug */
  984. c->cpu_index = NR_CPUS;
  985. }
  986. }
  987. /*
  988. * Prepare for SMP bootup. The MP table or ACPI has been read
  989. * earlier. Just do some sanity checking here and enable APIC mode.
  990. */
  991. void __init native_smp_prepare_cpus(unsigned int max_cpus)
  992. {
  993. preempt_disable();
  994. smp_cpu_index_default();
  995. current_cpu_data = boot_cpu_data;
  996. cpu_callin_map = cpumask_of_cpu(0);
  997. mb();
  998. /*
  999. * Setup boot CPU information
  1000. */
  1001. smp_store_cpu_info(0); /* Final full version of the data */
  1002. #ifdef CONFIG_X86_32
  1003. boot_cpu_logical_apicid = logical_smp_processor_id();
  1004. #endif
  1005. current_thread_info()->cpu = 0; /* needed? */
  1006. set_cpu_sibling_map(0);
  1007. #ifdef CONFIG_X86_64
  1008. enable_IR_x2apic();
  1009. setup_apic_routing();
  1010. #endif
  1011. if (smp_sanity_check(max_cpus) < 0) {
  1012. printk(KERN_INFO "SMP disabled\n");
  1013. disable_smp();
  1014. goto out;
  1015. }
  1016. preempt_disable();
  1017. if (read_apic_id() != boot_cpu_physical_apicid) {
  1018. panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
  1019. read_apic_id(), boot_cpu_physical_apicid);
  1020. /* Or can we switch back to PIC here? */
  1021. }
  1022. preempt_enable();
  1023. connect_bsp_APIC();
  1024. /*
  1025. * Switch from PIC to APIC mode.
  1026. */
  1027. setup_local_APIC();
  1028. #ifdef CONFIG_X86_64
  1029. /*
  1030. * Enable IO APIC before setting up error vector
  1031. */
  1032. if (!skip_ioapic_setup && nr_ioapics)
  1033. enable_IO_APIC();
  1034. #endif
  1035. end_local_APIC_setup();
  1036. map_cpu_to_logical_apicid();
  1037. setup_portio_remap();
  1038. smpboot_setup_io_apic();
  1039. /*
  1040. * Set up local APIC timer on boot CPU.
  1041. */
  1042. printk(KERN_INFO "CPU%d: ", 0);
  1043. print_cpu_info(&cpu_data(0));
  1044. setup_boot_clock();
  1045. if (is_uv_system())
  1046. uv_system_init();
  1047. out:
  1048. preempt_enable();
  1049. }
  1050. /*
  1051. * Early setup to make printk work.
  1052. */
  1053. void __init native_smp_prepare_boot_cpu(void)
  1054. {
  1055. int me = smp_processor_id();
  1056. #ifdef CONFIG_X86_32
  1057. init_gdt(me);
  1058. #endif
  1059. switch_to_new_gdt();
  1060. /* already set me in cpu_online_map in boot_cpu_init() */
  1061. cpu_set(me, cpu_callout_map);
  1062. per_cpu(cpu_state, me) = CPU_ONLINE;
  1063. }
  1064. void __init native_smp_cpus_done(unsigned int max_cpus)
  1065. {
  1066. pr_debug("Boot done.\n");
  1067. impress_friends();
  1068. smp_checks();
  1069. #ifdef CONFIG_X86_IO_APIC
  1070. setup_ioapic_dest();
  1071. #endif
  1072. check_nmi_watchdog();
  1073. }
  1074. /*
  1075. * cpu_possible_map should be static, it cannot change as cpu's
  1076. * are onlined, or offlined. The reason is per-cpu data-structures
  1077. * are allocated by some modules at init time, and dont expect to
  1078. * do this dynamically on cpu arrival/departure.
  1079. * cpu_present_map on the other hand can change dynamically.
  1080. * In case when cpu_hotplug is not compiled, then we resort to current
  1081. * behaviour, which is cpu_possible == cpu_present.
  1082. * - Ashok Raj
  1083. *
  1084. * Three ways to find out the number of additional hotplug CPUs:
  1085. * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
  1086. * - The user can overwrite it with additional_cpus=NUM
  1087. * - Otherwise don't reserve additional CPUs.
  1088. * We do this because additional CPUs waste a lot of memory.
  1089. * -AK
  1090. */
  1091. __init void prefill_possible_map(void)
  1092. {
  1093. int i, possible;
  1094. /* no processor from mptable or madt */
  1095. if (!num_processors)
  1096. num_processors = 1;
  1097. possible = num_processors + disabled_cpus;
  1098. if (possible > NR_CPUS)
  1099. possible = NR_CPUS;
  1100. printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
  1101. possible, max_t(int, possible - num_processors, 0));
  1102. for (i = 0; i < possible; i++)
  1103. cpu_set(i, cpu_possible_map);
  1104. nr_cpu_ids = possible;
  1105. }
  1106. #ifdef CONFIG_HOTPLUG_CPU
  1107. static void remove_siblinginfo(int cpu)
  1108. {
  1109. int sibling;
  1110. struct cpuinfo_x86 *c = &cpu_data(cpu);
  1111. for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
  1112. cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
  1113. /*/
  1114. * last thread sibling in this cpu core going down
  1115. */
  1116. if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
  1117. cpu_data(sibling).booted_cores--;
  1118. }
  1119. for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
  1120. cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
  1121. cpus_clear(per_cpu(cpu_sibling_map, cpu));
  1122. cpus_clear(per_cpu(cpu_core_map, cpu));
  1123. c->phys_proc_id = 0;
  1124. c->cpu_core_id = 0;
  1125. cpu_clear(cpu, cpu_sibling_setup_map);
  1126. }
  1127. static void __ref remove_cpu_from_maps(int cpu)
  1128. {
  1129. cpu_clear(cpu, cpu_online_map);
  1130. cpu_clear(cpu, cpu_callout_map);
  1131. cpu_clear(cpu, cpu_callin_map);
  1132. /* was set by cpu_init() */
  1133. cpu_clear(cpu, cpu_initialized);
  1134. numa_remove_cpu(cpu);
  1135. }
  1136. void cpu_disable_common(void)
  1137. {
  1138. int cpu = smp_processor_id();
  1139. /*
  1140. * HACK:
  1141. * Allow any queued timer interrupts to get serviced
  1142. * This is only a temporary solution until we cleanup
  1143. * fixup_irqs as we do for IA64.
  1144. */
  1145. local_irq_enable();
  1146. mdelay(1);
  1147. local_irq_disable();
  1148. remove_siblinginfo(cpu);
  1149. /* It's now safe to remove this processor from the online map */
  1150. lock_vector_lock();
  1151. remove_cpu_from_maps(cpu);
  1152. unlock_vector_lock();
  1153. fixup_irqs(cpu_online_map);
  1154. }
  1155. int native_cpu_disable(void)
  1156. {
  1157. int cpu = smp_processor_id();
  1158. /*
  1159. * Perhaps use cpufreq to drop frequency, but that could go
  1160. * into generic code.
  1161. *
  1162. * We won't take down the boot processor on i386 due to some
  1163. * interrupts only being able to be serviced by the BSP.
  1164. * Especially so if we're not using an IOAPIC -zwane
  1165. */
  1166. if (cpu == 0)
  1167. return -EBUSY;
  1168. if (nmi_watchdog == NMI_LOCAL_APIC)
  1169. stop_apic_nmi_watchdog(NULL);
  1170. clear_local_APIC();
  1171. cpu_disable_common();
  1172. return 0;
  1173. }
  1174. void native_cpu_die(unsigned int cpu)
  1175. {
  1176. /* We don't do anything here: idle task is faking death itself. */
  1177. unsigned int i;
  1178. for (i = 0; i < 10; i++) {
  1179. /* They ack this in play_dead by setting CPU_DEAD */
  1180. if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
  1181. printk(KERN_INFO "CPU %d is now offline\n", cpu);
  1182. if (1 == num_online_cpus())
  1183. alternatives_smp_switch(0);
  1184. return;
  1185. }
  1186. msleep(100);
  1187. }
  1188. printk(KERN_ERR "CPU %u didn't die...\n", cpu);
  1189. }
  1190. void play_dead_common(void)
  1191. {
  1192. idle_task_exit();
  1193. reset_lazy_tlbstate();
  1194. irq_ctx_exit(raw_smp_processor_id());
  1195. c1e_remove_cpu(raw_smp_processor_id());
  1196. mb();
  1197. /* Ack it */
  1198. __get_cpu_var(cpu_state) = CPU_DEAD;
  1199. /*
  1200. * With physical CPU hotplug, we should halt the cpu
  1201. */
  1202. local_irq_disable();
  1203. }
  1204. void native_play_dead(void)
  1205. {
  1206. play_dead_common();
  1207. wbinvd_halt();
  1208. }
  1209. #else /* ... !CONFIG_HOTPLUG_CPU */
  1210. int native_cpu_disable(void)
  1211. {
  1212. return -ENOSYS;
  1213. }
  1214. void native_cpu_die(unsigned int cpu)
  1215. {
  1216. /* We said "no" in __cpu_disable */
  1217. BUG();
  1218. }
  1219. void native_play_dead(void)
  1220. {
  1221. BUG();
  1222. }
  1223. #endif