setup_percpu.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/init.h>
  4. #include <linux/bootmem.h>
  5. #include <linux/percpu.h>
  6. #include <linux/kexec.h>
  7. #include <linux/crash_dump.h>
  8. #include <linux/smp.h>
  9. #include <linux/topology.h>
  10. #include <asm/sections.h>
  11. #include <asm/processor.h>
  12. #include <asm/setup.h>
  13. #include <asm/mpspec.h>
  14. #include <asm/apicdef.h>
  15. #include <asm/highmem.h>
  16. #include <asm/proto.h>
  17. #include <asm/cpumask.h>
  18. #ifdef CONFIG_DEBUG_PER_CPU_MAPS
  19. # define DBG(x...) printk(KERN_DEBUG x)
  20. #else
  21. # define DBG(x...)
  22. #endif
  23. /*
  24. * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
  25. * voyager wants cpu_number too.
  26. */
  27. #ifdef CONFIG_SMP
  28. DEFINE_PER_CPU(int, cpu_number);
  29. EXPORT_PER_CPU_SYMBOL(cpu_number);
  30. #endif
  31. #ifdef CONFIG_X86_LOCAL_APIC
  32. unsigned int num_processors;
  33. unsigned disabled_cpus __cpuinitdata;
  34. /* Processor that is doing the boot up */
  35. unsigned int boot_cpu_physical_apicid = -1U;
  36. EXPORT_SYMBOL(boot_cpu_physical_apicid);
  37. unsigned int max_physical_apicid;
  38. /* Bitmask of physically existing CPUs */
  39. physid_mask_t phys_cpu_present_map;
  40. #endif
  41. /*
  42. * Map cpu index to physical APIC ID
  43. */
  44. DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
  45. DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
  46. EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
  47. EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
  48. #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
  49. #define X86_64_NUMA 1 /* (used later) */
  50. DEFINE_PER_CPU(int, node_number) = 0;
  51. EXPORT_PER_CPU_SYMBOL(node_number);
  52. /*
  53. * Map cpu index to node index
  54. */
  55. DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  56. EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  57. /*
  58. * Which logical CPUs are on which nodes
  59. */
  60. cpumask_t *node_to_cpumask_map;
  61. EXPORT_SYMBOL(node_to_cpumask_map);
  62. /*
  63. * Setup node_to_cpumask_map
  64. */
  65. static void __init setup_node_to_cpumask_map(void);
  66. #else
  67. static inline void setup_node_to_cpumask_map(void) { }
  68. #endif
  69. #ifdef CONFIG_X86_64
  70. /* correctly size the local cpu masks */
  71. static void setup_cpu_local_masks(void)
  72. {
  73. alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  74. alloc_bootmem_cpumask_var(&cpu_callin_mask);
  75. alloc_bootmem_cpumask_var(&cpu_callout_mask);
  76. alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  77. }
  78. #else /* CONFIG_X86_32 */
  79. static inline void setup_cpu_local_masks(void)
  80. {
  81. }
  82. #endif /* CONFIG_X86_32 */
  83. #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
  84. /*
  85. * Copy data used in early init routines from the initial arrays to the
  86. * per cpu data areas. These arrays then become expendable and the
  87. * *_early_ptr's are zeroed indicating that the static arrays are gone.
  88. */
  89. static void __init setup_per_cpu_maps(void)
  90. {
  91. int cpu;
  92. for_each_possible_cpu(cpu) {
  93. per_cpu(x86_cpu_to_apicid, cpu) =
  94. early_per_cpu_map(x86_cpu_to_apicid, cpu);
  95. per_cpu(x86_bios_cpu_apicid, cpu) =
  96. early_per_cpu_map(x86_bios_cpu_apicid, cpu);
  97. #ifdef X86_64_NUMA
  98. per_cpu(x86_cpu_to_node_map, cpu) =
  99. early_per_cpu_map(x86_cpu_to_node_map, cpu);
  100. #endif
  101. }
  102. /* indicate the early static arrays will soon be gone */
  103. early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
  104. early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
  105. #ifdef X86_64_NUMA
  106. early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
  107. #endif
  108. }
  109. #ifdef CONFIG_X86_64
  110. unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
  111. [0] = (unsigned long)__per_cpu_load,
  112. };
  113. #else
  114. unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
  115. #endif
  116. EXPORT_SYMBOL(__per_cpu_offset);
  117. /*
  118. * Great future plan:
  119. * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
  120. * Always point %gs to its beginning
  121. */
  122. void __init setup_per_cpu_areas(void)
  123. {
  124. ssize_t size, old_size;
  125. char *ptr;
  126. int cpu;
  127. unsigned long align = 1;
  128. /* Copy section for each CPU (we discard the original) */
  129. old_size = PERCPU_ENOUGH_ROOM;
  130. align = max_t(unsigned long, PAGE_SIZE, align);
  131. size = roundup(old_size, align);
  132. pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
  133. NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
  134. pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
  135. for_each_possible_cpu(cpu) {
  136. #ifndef CONFIG_NEED_MULTIPLE_NODES
  137. ptr = __alloc_bootmem(size, align,
  138. __pa(MAX_DMA_ADDRESS));
  139. #else
  140. int node = early_cpu_to_node(cpu);
  141. if (!node_online(node) || !NODE_DATA(node)) {
  142. ptr = __alloc_bootmem(size, align,
  143. __pa(MAX_DMA_ADDRESS));
  144. pr_info("cpu %d has no node %d or node-local memory\n",
  145. cpu, node);
  146. pr_debug("per cpu data for cpu%d at %016lx\n",
  147. cpu, __pa(ptr));
  148. } else {
  149. ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
  150. __pa(MAX_DMA_ADDRESS));
  151. pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
  152. cpu, node, __pa(ptr));
  153. }
  154. #endif
  155. memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
  156. per_cpu_offset(cpu) = ptr - __per_cpu_start;
  157. per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
  158. per_cpu(cpu_number, cpu) = cpu;
  159. #ifdef CONFIG_X86_64
  160. per_cpu(irq_stack_ptr, cpu) =
  161. per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
  162. /*
  163. * Up to this point, CPU0 has been using .data.init
  164. * area. Reload %gs offset for CPU0.
  165. */
  166. if (cpu == 0)
  167. load_gs_base(cpu);
  168. #endif
  169. DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
  170. }
  171. /* Setup percpu data maps */
  172. setup_per_cpu_maps();
  173. /* Setup node to cpumask map */
  174. setup_node_to_cpumask_map();
  175. /* Setup cpu initialized, callin, callout masks */
  176. setup_cpu_local_masks();
  177. }
  178. #endif
  179. #ifdef X86_64_NUMA
  180. /*
  181. * Allocate node_to_cpumask_map based on number of available nodes
  182. * Requires node_possible_map to be valid.
  183. *
  184. * Note: node_to_cpumask() is not valid until after this is done.
  185. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
  186. */
  187. static void __init setup_node_to_cpumask_map(void)
  188. {
  189. unsigned int node, num = 0;
  190. cpumask_t *map;
  191. /* setup nr_node_ids if not done yet */
  192. if (nr_node_ids == MAX_NUMNODES) {
  193. for_each_node_mask(node, node_possible_map)
  194. num = node;
  195. nr_node_ids = num + 1;
  196. }
  197. /* allocate the map */
  198. map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
  199. DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
  200. pr_debug("Node to cpumask map at %p for %d nodes\n",
  201. map, nr_node_ids);
  202. /* node_to_cpumask() will now work */
  203. node_to_cpumask_map = map;
  204. }
  205. void __cpuinit numa_set_node(int cpu, int node)
  206. {
  207. int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  208. /* early setting, no percpu area yet */
  209. if (cpu_to_node_map) {
  210. cpu_to_node_map[cpu] = node;
  211. return;
  212. }
  213. #ifdef CONFIG_DEBUG_PER_CPU_MAPS
  214. if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
  215. printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  216. dump_stack();
  217. return;
  218. }
  219. #endif
  220. per_cpu(x86_cpu_to_node_map, cpu) = node;
  221. if (node != NUMA_NO_NODE)
  222. per_cpu(node_number, cpu) = node;
  223. }
  224. void __cpuinit numa_clear_node(int cpu)
  225. {
  226. numa_set_node(cpu, NUMA_NO_NODE);
  227. }
  228. #ifndef CONFIG_DEBUG_PER_CPU_MAPS
  229. void __cpuinit numa_add_cpu(int cpu)
  230. {
  231. cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
  232. }
  233. void __cpuinit numa_remove_cpu(int cpu)
  234. {
  235. cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
  236. }
  237. #else /* CONFIG_DEBUG_PER_CPU_MAPS */
  238. /*
  239. * --------- debug versions of the numa functions ---------
  240. */
  241. static void __cpuinit numa_set_cpumask(int cpu, int enable)
  242. {
  243. int node = early_cpu_to_node(cpu);
  244. cpumask_t *mask;
  245. char buf[64];
  246. if (node_to_cpumask_map == NULL) {
  247. printk(KERN_ERR "node_to_cpumask_map NULL\n");
  248. dump_stack();
  249. return;
  250. }
  251. mask = &node_to_cpumask_map[node];
  252. if (enable)
  253. cpu_set(cpu, *mask);
  254. else
  255. cpu_clear(cpu, *mask);
  256. cpulist_scnprintf(buf, sizeof(buf), mask);
  257. printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
  258. enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
  259. }
  260. void __cpuinit numa_add_cpu(int cpu)
  261. {
  262. numa_set_cpumask(cpu, 1);
  263. }
  264. void __cpuinit numa_remove_cpu(int cpu)
  265. {
  266. numa_set_cpumask(cpu, 0);
  267. }
  268. int cpu_to_node(int cpu)
  269. {
  270. if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
  271. printk(KERN_WARNING
  272. "cpu_to_node(%d): usage too early!\n", cpu);
  273. dump_stack();
  274. return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
  275. }
  276. return per_cpu(x86_cpu_to_node_map, cpu);
  277. }
  278. EXPORT_SYMBOL(cpu_to_node);
  279. /*
  280. * Same function as cpu_to_node() but used if called before the
  281. * per_cpu areas are setup.
  282. */
  283. int early_cpu_to_node(int cpu)
  284. {
  285. if (early_per_cpu_ptr(x86_cpu_to_node_map))
  286. return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
  287. if (!per_cpu_offset(cpu)) {
  288. printk(KERN_WARNING
  289. "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
  290. dump_stack();
  291. return NUMA_NO_NODE;
  292. }
  293. return per_cpu(x86_cpu_to_node_map, cpu);
  294. }
  295. /* empty cpumask */
  296. static const cpumask_t cpu_mask_none;
  297. /*
  298. * Returns a pointer to the bitmask of CPUs on Node 'node'.
  299. */
  300. const cpumask_t *cpumask_of_node(int node)
  301. {
  302. if (node_to_cpumask_map == NULL) {
  303. printk(KERN_WARNING
  304. "cpumask_of_node(%d): no node_to_cpumask_map!\n",
  305. node);
  306. dump_stack();
  307. return (const cpumask_t *)&cpu_online_map;
  308. }
  309. if (node >= nr_node_ids) {
  310. printk(KERN_WARNING
  311. "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
  312. node, nr_node_ids);
  313. dump_stack();
  314. return &cpu_mask_none;
  315. }
  316. return &node_to_cpumask_map[node];
  317. }
  318. EXPORT_SYMBOL(cpumask_of_node);
  319. /*
  320. * Returns a bitmask of CPUs on Node 'node'.
  321. *
  322. * Side note: this function creates the returned cpumask on the stack
  323. * so with a high NR_CPUS count, excessive stack space is used. The
  324. * node_to_cpumask_ptr function should be used whenever possible.
  325. */
  326. cpumask_t node_to_cpumask(int node)
  327. {
  328. if (node_to_cpumask_map == NULL) {
  329. printk(KERN_WARNING
  330. "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
  331. dump_stack();
  332. return cpu_online_map;
  333. }
  334. if (node >= nr_node_ids) {
  335. printk(KERN_WARNING
  336. "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
  337. node, nr_node_ids);
  338. dump_stack();
  339. return cpu_mask_none;
  340. }
  341. return node_to_cpumask_map[node];
  342. }
  343. EXPORT_SYMBOL(node_to_cpumask);
  344. /*
  345. * --------- end of debug versions of the numa functions ---------
  346. */
  347. #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
  348. #endif /* X86_64_NUMA */