smpboot.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. #include <linux/init.h>
  2. #include <linux/smp.h>
  3. #include <linux/module.h>
  4. /* Number of siblings per CPU package */
  5. int smp_num_siblings = 1;
  6. EXPORT_SYMBOL(smp_num_siblings);
  7. /* Last level cache ID of each logical CPU */
  8. DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
  9. /* bitmap of online cpus */
  10. cpumask_t cpu_online_map __read_mostly;
  11. EXPORT_SYMBOL(cpu_online_map);
  12. cpumask_t cpu_callin_map;
  13. cpumask_t cpu_callout_map;
  14. cpumask_t cpu_possible_map;
  15. EXPORT_SYMBOL(cpu_possible_map);
  16. /* representing HT siblings of each logical CPU */
  17. DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
  18. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  19. /* representing HT and core siblings of each logical CPU */
  20. DEFINE_PER_CPU(cpumask_t, cpu_core_map);
  21. EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  22. /* Per CPU bogomips and other parameters */
  23. DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  24. EXPORT_PER_CPU_SYMBOL(cpu_info);
  25. /* representing cpus for which sibling maps can be computed */
  26. static cpumask_t cpu_sibling_setup_map;
  27. void __cpuinit set_cpu_sibling_map(int cpu)
  28. {
  29. int i;
  30. struct cpuinfo_x86 *c = &cpu_data(cpu);
  31. cpu_set(cpu, cpu_sibling_setup_map);
  32. if (smp_num_siblings > 1) {
  33. for_each_cpu_mask(i, cpu_sibling_setup_map) {
  34. if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
  35. c->cpu_core_id == cpu_data(i).cpu_core_id) {
  36. cpu_set(i, per_cpu(cpu_sibling_map, cpu));
  37. cpu_set(cpu, per_cpu(cpu_sibling_map, i));
  38. cpu_set(i, per_cpu(cpu_core_map, cpu));
  39. cpu_set(cpu, per_cpu(cpu_core_map, i));
  40. cpu_set(i, c->llc_shared_map);
  41. cpu_set(cpu, cpu_data(i).llc_shared_map);
  42. }
  43. }
  44. } else {
  45. cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
  46. }
  47. cpu_set(cpu, c->llc_shared_map);
  48. if (current_cpu_data.x86_max_cores == 1) {
  49. per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
  50. c->booted_cores = 1;
  51. return;
  52. }
  53. for_each_cpu_mask(i, cpu_sibling_setup_map) {
  54. if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
  55. per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
  56. cpu_set(i, c->llc_shared_map);
  57. cpu_set(cpu, cpu_data(i).llc_shared_map);
  58. }
  59. if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
  60. cpu_set(i, per_cpu(cpu_core_map, cpu));
  61. cpu_set(cpu, per_cpu(cpu_core_map, i));
  62. /*
  63. * Does this new cpu bringup a new core?
  64. */
  65. if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
  66. /*
  67. * for each core in package, increment
  68. * the booted_cores for this new cpu
  69. */
  70. if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
  71. c->booted_cores++;
  72. /*
  73. * increment the core count for all
  74. * the other cpus in this package
  75. */
  76. if (i != cpu)
  77. cpu_data(i).booted_cores++;
  78. } else if (i != cpu && !c->booted_cores)
  79. c->booted_cores = cpu_data(i).booted_cores;
  80. }
  81. }
  82. }
  83. #ifdef CONFIG_HOTPLUG_CPU
  84. void remove_siblinginfo(int cpu)
  85. {
  86. int sibling;
  87. struct cpuinfo_x86 *c = &cpu_data(cpu);
  88. for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
  89. cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
  90. /*/
  91. * last thread sibling in this cpu core going down
  92. */
  93. if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
  94. cpu_data(sibling).booted_cores--;
  95. }
  96. for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
  97. cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
  98. cpus_clear(per_cpu(cpu_sibling_map, cpu));
  99. cpus_clear(per_cpu(cpu_core_map, cpu));
  100. c->phys_proc_id = 0;
  101. c->cpu_core_id = 0;
  102. cpu_clear(cpu, cpu_sibling_setup_map);
  103. }
  104. int additional_cpus __initdata = -1;
  105. static __init int setup_additional_cpus(char *s)
  106. {
  107. return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
  108. }
  109. early_param("additional_cpus", setup_additional_cpus);
  110. /*
  111. * cpu_possible_map should be static, it cannot change as cpu's
  112. * are onlined, or offlined. The reason is per-cpu data-structures
  113. * are allocated by some modules at init time, and dont expect to
  114. * do this dynamically on cpu arrival/departure.
  115. * cpu_present_map on the other hand can change dynamically.
  116. * In case when cpu_hotplug is not compiled, then we resort to current
  117. * behaviour, which is cpu_possible == cpu_present.
  118. * - Ashok Raj
  119. *
  120. * Three ways to find out the number of additional hotplug CPUs:
  121. * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
  122. * - The user can overwrite it with additional_cpus=NUM
  123. * - Otherwise don't reserve additional CPUs.
  124. * We do this because additional CPUs waste a lot of memory.
  125. * -AK
  126. */
  127. __init void prefill_possible_map(void)
  128. {
  129. int i;
  130. int possible;
  131. if (additional_cpus == -1) {
  132. if (disabled_cpus > 0)
  133. additional_cpus = disabled_cpus;
  134. else
  135. additional_cpus = 0;
  136. }
  137. possible = num_processors + additional_cpus;
  138. if (possible > NR_CPUS)
  139. possible = NR_CPUS;
  140. printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
  141. possible, max_t(int, possible - num_processors, 0));
  142. for (i = 0; i < possible; i++)
  143. cpu_set(i, cpu_possible_map);
  144. }
  145. #endif