|
@@ -181,42 +181,17 @@ static unsigned long __meminitdata nr_kernel_pages;
|
|
|
static unsigned long __meminitdata nr_all_pages;
|
|
|
static unsigned long __meminitdata dma_reserve;
|
|
|
|
|
|
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
|
|
- #ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
- /*
|
|
|
- * MAX_ACTIVE_REGIONS determines the maximum number of distinct ranges
|
|
|
- * of memory (RAM) that may be registered with add_active_range().
|
|
|
- * Ranges passed to add_active_range() will be merged if possible so
|
|
|
- * the number of times add_active_range() can be called is related to
|
|
|
- * the number of nodes and the number of holes
|
|
|
- */
|
|
|
- #ifdef CONFIG_MAX_ACTIVE_REGIONS
|
|
|
- /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
|
|
|
- #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
|
|
|
- #else
|
|
|
- #if MAX_NUMNODES >= 32
|
|
|
- /* If there can be many nodes, allow up to 50 holes per node */
|
|
|
- #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
|
|
|
- #else
|
|
|
- /* By default, allow up to 256 distinct regions */
|
|
|
- #define MAX_ACTIVE_REGIONS 256
|
|
|
- #endif
|
|
|
- #endif
|
|
|
-
|
|
|
- static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
|
|
|
- static int __meminitdata nr_nodemap_entries;
|
|
|
-#endif /* !CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
-
|
|
|
- static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
|
|
|
- static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
|
|
|
- static unsigned long __initdata required_kernelcore;
|
|
|
- static unsigned long __initdata required_movablecore;
|
|
|
- static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
|
|
|
-
|
|
|
- /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
|
|
|
- int movable_zone;
|
|
|
- EXPORT_SYMBOL(movable_zone);
|
|
|
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
|
|
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
+static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
|
|
|
+static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
|
|
|
+static unsigned long __initdata required_kernelcore;
|
|
|
+static unsigned long __initdata required_movablecore;
|
|
|
+static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
|
|
|
+
|
|
|
+/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
|
|
|
+int movable_zone;
|
|
|
+EXPORT_SYMBOL(movable_zone);
|
|
|
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
|
#if MAX_NUMNODES > 1
|
|
|
int nr_node_ids __read_mostly = MAX_NUMNODES;
|
|
@@ -3734,7 +3709,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
|
|
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
|
|
/*
|
|
|
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
|
|
@@ -4002,7 +3977,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|
|
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
|
|
|
}
|
|
|
|
|
|
-#else
|
|
|
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
|
|
|
unsigned long zone_type,
|
|
|
unsigned long *zones_size)
|
|
@@ -4020,7 +3995,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|
|
return zholes_size[zone_type];
|
|
|
}
|
|
|
|
|
|
-#endif
|
|
|
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
|
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
|
|
|
unsigned long *zones_size, unsigned long *zholes_size)
|
|
@@ -4243,10 +4218,10 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
|
|
|
*/
|
|
|
if (pgdat == NODE_DATA(0)) {
|
|
|
mem_map = NODE_DATA(0)->node_mem_map;
|
|
|
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
|
|
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
|
|
|
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
|
|
|
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
|
|
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
}
|
|
|
#endif
|
|
|
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
|
|
@@ -4271,7 +4246,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
|
|
|
free_area_init_core(pgdat, zones_size, zholes_size);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
|
|
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
|
|
|
#if MAX_NUMNODES > 1
|
|
|
/*
|
|
@@ -4292,201 +4267,6 @@ static inline void setup_nr_node_ids(void)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-#ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
-/*
|
|
|
- * Common iterator interface used to define for_each_mem_pfn_range().
|
|
|
- */
|
|
|
-void __meminit __next_mem_pfn_range(int *idx, int nid,
|
|
|
- unsigned long *out_start_pfn,
|
|
|
- unsigned long *out_end_pfn, int *out_nid)
|
|
|
-{
|
|
|
- struct node_active_region *r = NULL;
|
|
|
-
|
|
|
- while (++*idx < nr_nodemap_entries) {
|
|
|
- if (nid == MAX_NUMNODES || nid == early_node_map[*idx].nid) {
|
|
|
- r = &early_node_map[*idx];
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- if (!r) {
|
|
|
- *idx = -1;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (out_start_pfn)
|
|
|
- *out_start_pfn = r->start_pfn;
|
|
|
- if (out_end_pfn)
|
|
|
- *out_end_pfn = r->end_pfn;
|
|
|
- if (out_nid)
|
|
|
- *out_nid = r->nid;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * add_active_range - Register a range of PFNs backed by physical memory
|
|
|
- * @nid: The node ID the range resides on
|
|
|
- * @start_pfn: The start PFN of the available physical memory
|
|
|
- * @end_pfn: The end PFN of the available physical memory
|
|
|
- *
|
|
|
- * These ranges are stored in an early_node_map[] and later used by
|
|
|
- * free_area_init_nodes() to calculate zone sizes and holes. If the
|
|
|
- * range spans a memory hole, it is up to the architecture to ensure
|
|
|
- * the memory is not freed by the bootmem allocator. If possible
|
|
|
- * the range being registered will be merged with existing ranges.
|
|
|
- */
|
|
|
-void __init add_active_range(unsigned int nid, unsigned long start_pfn,
|
|
|
- unsigned long end_pfn)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- mminit_dprintk(MMINIT_TRACE, "memory_register",
|
|
|
- "Entering add_active_range(%d, %#lx, %#lx) "
|
|
|
- "%d entries of %d used\n",
|
|
|
- nid, start_pfn, end_pfn,
|
|
|
- nr_nodemap_entries, MAX_ACTIVE_REGIONS);
|
|
|
-
|
|
|
- mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
|
|
|
-
|
|
|
- /* Merge with existing active regions if possible */
|
|
|
- for (i = 0; i < nr_nodemap_entries; i++) {
|
|
|
- if (early_node_map[i].nid != nid)
|
|
|
- continue;
|
|
|
-
|
|
|
- /* Skip if an existing region covers this new one */
|
|
|
- if (start_pfn >= early_node_map[i].start_pfn &&
|
|
|
- end_pfn <= early_node_map[i].end_pfn)
|
|
|
- return;
|
|
|
-
|
|
|
- /* Merge forward if suitable */
|
|
|
- if (start_pfn <= early_node_map[i].end_pfn &&
|
|
|
- end_pfn > early_node_map[i].end_pfn) {
|
|
|
- early_node_map[i].end_pfn = end_pfn;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- /* Merge backward if suitable */
|
|
|
- if (start_pfn < early_node_map[i].start_pfn &&
|
|
|
- end_pfn >= early_node_map[i].start_pfn) {
|
|
|
- early_node_map[i].start_pfn = start_pfn;
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Check that early_node_map is large enough */
|
|
|
- if (i >= MAX_ACTIVE_REGIONS) {
|
|
|
- printk(KERN_CRIT "More than %d memory regions, truncating\n",
|
|
|
- MAX_ACTIVE_REGIONS);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- early_node_map[i].nid = nid;
|
|
|
- early_node_map[i].start_pfn = start_pfn;
|
|
|
- early_node_map[i].end_pfn = end_pfn;
|
|
|
- nr_nodemap_entries = i + 1;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * remove_active_range - Shrink an existing registered range of PFNs
|
|
|
- * @nid: The node id the range is on that should be shrunk
|
|
|
- * @start_pfn: The new PFN of the range
|
|
|
- * @end_pfn: The new PFN of the range
|
|
|
- *
|
|
|
- * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
|
|
|
- * The map is kept near the end physical page range that has already been
|
|
|
- * registered. This function allows an arch to shrink an existing registered
|
|
|
- * range.
|
|
|
- */
|
|
|
-void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
|
|
|
- unsigned long end_pfn)
|
|
|
-{
|
|
|
- unsigned long this_start_pfn, this_end_pfn;
|
|
|
- int i, j;
|
|
|
- int removed = 0;
|
|
|
-
|
|
|
- printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
|
|
|
- nid, start_pfn, end_pfn);
|
|
|
-
|
|
|
- /* Find the old active region end and shrink */
|
|
|
- for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
|
|
|
- if (this_start_pfn >= start_pfn && this_end_pfn <= end_pfn) {
|
|
|
- /* clear it */
|
|
|
- early_node_map[i].start_pfn = 0;
|
|
|
- early_node_map[i].end_pfn = 0;
|
|
|
- removed = 1;
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (this_start_pfn < start_pfn && this_end_pfn > start_pfn) {
|
|
|
- early_node_map[i].end_pfn = start_pfn;
|
|
|
- if (this_end_pfn > end_pfn)
|
|
|
- add_active_range(nid, end_pfn, this_end_pfn);
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (this_start_pfn >= start_pfn && this_end_pfn > end_pfn &&
|
|
|
- this_start_pfn < end_pfn) {
|
|
|
- early_node_map[i].start_pfn = end_pfn;
|
|
|
- continue;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (!removed)
|
|
|
- return;
|
|
|
-
|
|
|
- /* remove the blank ones */
|
|
|
- for (i = nr_nodemap_entries - 1; i > 0; i--) {
|
|
|
- if (early_node_map[i].nid != nid)
|
|
|
- continue;
|
|
|
- if (early_node_map[i].end_pfn)
|
|
|
- continue;
|
|
|
- /* we found it, get rid of it */
|
|
|
- for (j = i; j < nr_nodemap_entries - 1; j++)
|
|
|
- memcpy(&early_node_map[j], &early_node_map[j+1],
|
|
|
- sizeof(early_node_map[j]));
|
|
|
- j = nr_nodemap_entries - 1;
|
|
|
- memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
|
|
|
- nr_nodemap_entries--;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * remove_all_active_ranges - Remove all currently registered regions
|
|
|
- *
|
|
|
- * During discovery, it may be found that a table like SRAT is invalid
|
|
|
- * and an alternative discovery method must be used. This function removes
|
|
|
- * all currently registered regions.
|
|
|
- */
|
|
|
-void __init remove_all_active_ranges(void)
|
|
|
-{
|
|
|
- memset(early_node_map, 0, sizeof(early_node_map));
|
|
|
- nr_nodemap_entries = 0;
|
|
|
-}
|
|
|
-
|
|
|
-/* Compare two active node_active_regions */
|
|
|
-static int __init cmp_node_active_region(const void *a, const void *b)
|
|
|
-{
|
|
|
- struct node_active_region *arange = (struct node_active_region *)a;
|
|
|
- struct node_active_region *brange = (struct node_active_region *)b;
|
|
|
-
|
|
|
- /* Done this way to avoid overflows */
|
|
|
- if (arange->start_pfn > brange->start_pfn)
|
|
|
- return 1;
|
|
|
- if (arange->start_pfn < brange->start_pfn)
|
|
|
- return -1;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/* sort the node_map by start_pfn */
|
|
|
-void __init sort_node_map(void)
|
|
|
-{
|
|
|
- sort(early_node_map, (size_t)nr_nodemap_entries,
|
|
|
- sizeof(struct node_active_region),
|
|
|
- cmp_node_active_region, NULL);
|
|
|
-}
|
|
|
-#else /* !CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
-static inline void sort_node_map(void)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
/**
|
|
|
* node_map_pfn_alignment - determine the maximum internode alignment
|
|
|
*
|
|
@@ -4764,9 +4544,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
|
|
|
unsigned long start_pfn, end_pfn;
|
|
|
int i, nid;
|
|
|
|
|
|
- /* Sort early_node_map as initialisation assumes it is sorted */
|
|
|
- sort_node_map();
|
|
|
-
|
|
|
/* Record where the zone boundaries are */
|
|
|
memset(arch_zone_lowest_possible_pfn, 0,
|
|
|
sizeof(arch_zone_lowest_possible_pfn));
|
|
@@ -4867,7 +4644,7 @@ static int __init cmdline_parse_movablecore(char *p)
|
|
|
early_param("kernelcore", cmdline_parse_kernelcore);
|
|
|
early_param("movablecore", cmdline_parse_movablecore);
|
|
|
|
|
|
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
|
|
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
|
/**
|
|
|
* set_dma_reserve - set the specified number of pages reserved in the first zone
|