|
@@ -290,8 +290,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
|
|
|
|
|
printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
|
|
|
start, end);
|
|
|
- e820_register_active_regions(node, start >> PAGE_SHIFT,
|
|
|
- end >> PAGE_SHIFT);
|
|
|
|
|
|
if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
|
|
|
update_nodes_add(node, start, end);
|
|
@@ -338,6 +336,19 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
|
|
|
|
|
|
void __init acpi_numa_arch_fixup(void) {}
|
|
|
|
|
|
+int __init acpi_get_nodes(struct bootnode *physnodes)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ for_each_node_mask(i, nodes_parsed) {
|
|
|
+ physnodes[ret].start = nodes[i].start;
|
|
|
+ physnodes[ret].end = nodes[i].end;
|
|
|
+ ret++;
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/* Use the information discovered above to actually set up the nodes. */
|
|
|
int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
|
|
{
|
|
@@ -350,11 +361,6 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
|
|
for (i = 0; i < MAX_NUMNODES; i++)
|
|
|
cutoff_node(i, start, end);
|
|
|
|
|
|
- if (!nodes_cover_memory(nodes)) {
|
|
|
- bad_srat();
|
|
|
- return -1;
|
|
|
- }
|
|
|
-
|
|
|
memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
|
|
|
memblk_nodeid);
|
|
|
if (memnode_shift < 0) {
|
|
@@ -364,6 +370,14 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
+ for_each_node_mask(i, nodes_parsed)
|
|
|
+ e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
|
|
|
+ nodes[i].end >> PAGE_SHIFT);
|
|
|
+ if (!nodes_cover_memory(nodes)) {
|
|
|
+ bad_srat();
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
/* Account for nodes with cpus and no memory */
|
|
|
nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
|
|
|
|