|
@@ -131,6 +131,10 @@ static unsigned long __initdata dma_reserve;
|
|
|
int __initdata nr_nodemap_entries;
|
|
|
unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
|
|
|
unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
|
|
|
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
|
|
|
+ unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
|
|
|
+ unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
|
|
|
+#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
|
|
|
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
@@ -2094,6 +2098,62 @@ void __init sparse_memory_present_with_active_regions(int nid)
|
|
|
early_node_map[i].end_pfn);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * push_node_boundaries - Push node boundaries to at least the requested boundary
|
|
|
+ * @nid: The nid of the node to push the boundary for
|
|
|
+ * @start_pfn: The start pfn of the node
|
|
|
+ * @end_pfn: The end pfn of the node
|
|
|
+ *
|
|
|
+ * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
|
|
|
+ * time. Specifically, on x86_64, SRAT will report ranges that can potentially
|
|
|
+ * be hotplugged even though no physical memory exists. This function allows
|
|
|
+ * an arch to push out the node boundaries so mem_map is allocated that can
|
|
|
+ * be used later.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
|
|
|
+void __init push_node_boundaries(unsigned int nid,
|
|
|
+ unsigned long start_pfn, unsigned long end_pfn)
|
|
|
+{
|
|
|
+ printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
|
|
|
+ nid, start_pfn, end_pfn);
|
|
|
+
|
|
|
+ /* Initialise the boundary for this node if necessary */
|
|
|
+ if (node_boundary_end_pfn[nid] == 0)
|
|
|
+ node_boundary_start_pfn[nid] = -1UL;
|
|
|
+
|
|
|
+ /* Update the boundaries */
|
|
|
+ if (node_boundary_start_pfn[nid] > start_pfn)
|
|
|
+ node_boundary_start_pfn[nid] = start_pfn;
|
|
|
+ if (node_boundary_end_pfn[nid] < end_pfn)
|
|
|
+ node_boundary_end_pfn[nid] = end_pfn;
|
|
|
+}
|
|
|
+
|
|
|
+/* If necessary, push the node boundary out for reserve hotadd */
|
|
|
+static void __init account_node_boundary(unsigned int nid,
|
|
|
+ unsigned long *start_pfn, unsigned long *end_pfn)
|
|
|
+{
|
|
|
+ printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
|
|
|
+ nid, *start_pfn, *end_pfn);
|
|
|
+
|
|
|
+ /* Return if boundary information has not been provided */
|
|
|
+ if (node_boundary_end_pfn[nid] == 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Check the boundaries and update if necessary */
|
|
|
+ if (node_boundary_start_pfn[nid] < *start_pfn)
|
|
|
+ *start_pfn = node_boundary_start_pfn[nid];
|
|
|
+ if (node_boundary_end_pfn[nid] > *end_pfn)
|
|
|
+ *end_pfn = node_boundary_end_pfn[nid];
|
|
|
+}
|
|
|
+#else
|
|
|
+void __init push_node_boundaries(unsigned int nid,
|
|
|
+ unsigned long start_pfn, unsigned long end_pfn) {}
|
|
|
+
|
|
|
+static void __init account_node_boundary(unsigned int nid,
|
|
|
+ unsigned long *start_pfn, unsigned long *end_pfn) {}
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
/**
|
|
|
* get_pfn_range_for_nid - Return the start and end page frames for a node
|
|
|
* @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned
|
|
@@ -2121,6 +2181,9 @@ void __init get_pfn_range_for_nid(unsigned int nid,
|
|
|
printk(KERN_WARNING "Node %u active with no memory\n", nid);
|
|
|
*start_pfn = 0;
|
|
|
}
|
|
|
+
|
|
|
+ /* Push the node boundaries out if requested */
|
|
|
+ account_node_boundary(nid, start_pfn, end_pfn);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2527,6 +2590,10 @@ void __init remove_all_active_ranges()
|
|
|
{
|
|
|
memset(early_node_map, 0, sizeof(early_node_map));
|
|
|
nr_nodemap_entries = 0;
|
|
|
+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
|
|
|
+ memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
|
|
|
+ memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
|
|
|
+#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
|
|
|
}
|
|
|
|
|
|
/* Compare two active node_active_regions */
|