amdtopology_64.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * AMD NUMA support.
  3. * Discover the memory map and associated nodes.
  4. *
  5. * This version reads it directly from the AMD northbridge.
  6. *
  7. * Copyright 2002,2003 Andi Kleen, SuSE Labs.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/string.h>
  12. #include <linux/module.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/memblock.h>
  15. #include <asm/io.h>
  16. #include <linux/pci_ids.h>
  17. #include <linux/acpi.h>
  18. #include <asm/types.h>
  19. #include <asm/mmzone.h>
  20. #include <asm/proto.h>
  21. #include <asm/e820.h>
  22. #include <asm/pci-direct.h>
  23. #include <asm/numa.h>
  24. #include <asm/mpspec.h>
  25. #include <asm/apic.h>
  26. #include <asm/amd_nb.h>
  27. static unsigned char __initdata nodeids[8];
  28. static __init int find_northbridge(void)
  29. {
  30. int num;
  31. for (num = 0; num < 32; num++) {
  32. u32 header;
  33. header = read_pci_config(0, num, 0, 0x00);
  34. if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16)) &&
  35. header != (PCI_VENDOR_ID_AMD | (0x1200<<16)) &&
  36. header != (PCI_VENDOR_ID_AMD | (0x1300<<16)))
  37. continue;
  38. header = read_pci_config(0, num, 1, 0x00);
  39. if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16)) &&
  40. header != (PCI_VENDOR_ID_AMD | (0x1201<<16)) &&
  41. header != (PCI_VENDOR_ID_AMD | (0x1301<<16)))
  42. continue;
  43. return num;
  44. }
  45. return -ENOENT;
  46. }
  47. static __init void early_get_boot_cpu_id(void)
  48. {
  49. /*
  50. * need to get the APIC ID of the BSP so can use that to
  51. * create apicid_to_node in amd_scan_nodes()
  52. */
  53. #ifdef CONFIG_X86_MPPARSE
  54. /*
  55. * get boot-time SMP configuration:
  56. */
  57. if (smp_found_config)
  58. early_get_smp_config();
  59. #endif
  60. }
  61. int __init amd_numa_init(void)
  62. {
  63. unsigned long start = PFN_PHYS(0);
  64. unsigned long end = PFN_PHYS(max_pfn);
  65. unsigned numnodes;
  66. unsigned long prevbase;
  67. int i, j, nb;
  68. u32 nodeid, reg;
  69. unsigned int bits, cores, apicid_base;
  70. if (!early_pci_allowed())
  71. return -EINVAL;
  72. nb = find_northbridge();
  73. if (nb < 0)
  74. return nb;
  75. pr_info("Scanning NUMA topology in Northbridge %d\n", nb);
  76. reg = read_pci_config(0, nb, 0, 0x60);
  77. numnodes = ((reg >> 4) & 0xF) + 1;
  78. if (numnodes <= 1)
  79. return -ENOENT;
  80. pr_info("Number of physical nodes %d\n", numnodes);
  81. prevbase = 0;
  82. for (i = 0; i < 8; i++) {
  83. unsigned long base, limit;
  84. base = read_pci_config(0, nb, 1, 0x40 + i*8);
  85. limit = read_pci_config(0, nb, 1, 0x44 + i*8);
  86. nodeids[i] = nodeid = limit & 7;
  87. if ((base & 3) == 0) {
  88. if (i < numnodes)
  89. pr_info("Skipping disabled node %d\n", i);
  90. continue;
  91. }
  92. if (nodeid >= numnodes) {
  93. pr_info("Ignoring excess node %d (%lx:%lx)\n", nodeid,
  94. base, limit);
  95. continue;
  96. }
  97. if (!limit) {
  98. pr_info("Skipping node entry %d (base %lx)\n",
  99. i, base);
  100. continue;
  101. }
  102. if ((base >> 8) & 3 || (limit >> 8) & 3) {
  103. pr_err("Node %d using interleaving mode %lx/%lx\n",
  104. nodeid, (base >> 8) & 3, (limit >> 8) & 3);
  105. return -EINVAL;
  106. }
  107. if (node_isset(nodeid, mem_nodes_parsed)) {
  108. pr_info("Node %d already present, skipping\n",
  109. nodeid);
  110. continue;
  111. }
  112. limit >>= 16;
  113. limit <<= 24;
  114. limit |= (1<<24)-1;
  115. limit++;
  116. if (limit > end)
  117. limit = end;
  118. if (limit <= base)
  119. continue;
  120. base >>= 16;
  121. base <<= 24;
  122. if (base < start)
  123. base = start;
  124. if (limit > end)
  125. limit = end;
  126. if (limit == base) {
  127. pr_err("Empty node %d\n", nodeid);
  128. continue;
  129. }
  130. if (limit < base) {
  131. pr_err("Node %d bogus settings %lx-%lx.\n",
  132. nodeid, base, limit);
  133. continue;
  134. }
  135. /* Could sort here, but pun for now. Should not happen anyroads. */
  136. if (prevbase > base) {
  137. pr_err("Node map not sorted %lx,%lx\n",
  138. prevbase, base);
  139. return -EINVAL;
  140. }
  141. pr_info("Node %d MemBase %016lx Limit %016lx\n",
  142. nodeid, base, limit);
  143. numa_nodes[nodeid].start = base;
  144. numa_nodes[nodeid].end = limit;
  145. prevbase = base;
  146. node_set(nodeid, mem_nodes_parsed);
  147. node_set(nodeid, cpu_nodes_parsed);
  148. }
  149. if (!nodes_weight(mem_nodes_parsed))
  150. return -ENOENT;
  151. /*
  152. * We seem to have valid NUMA configuration. Map apicids to nodes
  153. * using the coreid bits from early_identify_cpu.
  154. */
  155. bits = boot_cpu_data.x86_coreid_bits;
  156. cores = 1 << bits;
  157. apicid_base = 0;
  158. /* get the APIC ID of the BSP early for systems with apicid lifting */
  159. early_get_boot_cpu_id();
  160. if (boot_cpu_physical_apicid > 0) {
  161. pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
  162. apicid_base = boot_cpu_physical_apicid;
  163. }
  164. for_each_node_mask(i, cpu_nodes_parsed)
  165. for (j = apicid_base; j < cores + apicid_base; j++)
  166. set_apicid_to_node((i << bits) + j, i);
  167. return 0;
  168. }
  169. #ifdef CONFIG_NUMA_EMU
  170. static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
  171. [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  172. };
  173. void __init amd_get_nodes(struct bootnode *physnodes)
  174. {
  175. int i;
  176. for_each_node_mask(i, mem_nodes_parsed) {
  177. physnodes[i].start = numa_nodes[i].start;
  178. physnodes[i].end = numa_nodes[i].end;
  179. }
  180. }
  181. static int __init find_node_by_addr(unsigned long addr)
  182. {
  183. int ret = NUMA_NO_NODE;
  184. int i;
  185. for (i = 0; i < 8; i++)
  186. if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) {
  187. ret = i;
  188. break;
  189. }
  190. return ret;
  191. }
  192. /*
  193. * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
  194. * setup to represent the physical topology but reflect the emulated
  195. * environment. For each emulated node, the real node which it appears on is
  196. * found and a fake pxm to nid mapping is created which mirrors the actual
  197. * locality. node_distance() then represents the correct distances between
  198. * emulated nodes by using the fake acpi mappings to pxms.
  199. */
  200. void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
  201. {
  202. unsigned int bits;
  203. unsigned int cores;
  204. unsigned int apicid_base = 0;
  205. int i;
  206. bits = boot_cpu_data.x86_coreid_bits;
  207. cores = 1 << bits;
  208. early_get_boot_cpu_id();
  209. if (boot_cpu_physical_apicid > 0)
  210. apicid_base = boot_cpu_physical_apicid;
  211. for (i = 0; i < nr_nodes; i++) {
  212. int index;
  213. int nid;
  214. int j;
  215. nid = find_node_by_addr(nodes[i].start);
  216. if (nid == NUMA_NO_NODE)
  217. continue;
  218. index = nodeids[nid] << bits;
  219. if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
  220. for (j = apicid_base; j < cores + apicid_base; j++)
  221. fake_apicid_to_node[index + j] = i;
  222. #ifdef CONFIG_ACPI_NUMA
  223. __acpi_map_pxm_to_node(nid, i);
  224. #endif
  225. }
  226. memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
  227. }
  228. #endif /* CONFIG_NUMA_EMU */
  229. int __init amd_scan_nodes(void)
  230. {
  231. int i;
  232. memnode_shift = compute_hash_shift(numa_nodes, 8, NULL);
  233. if (memnode_shift < 0) {
  234. pr_err("No NUMA node hash function found. Contact maintainer\n");
  235. return -1;
  236. }
  237. pr_info("Using node hash shift of %d\n", memnode_shift);
  238. /* use the coreid bits from early_identify_cpu */
  239. for_each_node_mask(i, node_possible_map)
  240. memblock_x86_register_active_regions(i,
  241. numa_nodes[i].start >> PAGE_SHIFT,
  242. numa_nodes[i].end >> PAGE_SHIFT);
  243. init_memory_mapping_high();
  244. for_each_node_mask(i, node_possible_map)
  245. setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end);
  246. numa_init_array();
  247. return 0;
  248. }