|
@@ -1040,7 +1040,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|
*
|
|
*
|
|
* If the zonelist cache is present in the passed in zonelist, then
|
|
* If the zonelist cache is present in the passed in zonelist, then
|
|
* returns a pointer to the allowed node mask (either the current
|
|
* returns a pointer to the allowed node mask (either the current
|
|
- * tasks mems_allowed, or node_online_map.)
|
|
|
|
|
|
+ * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
|
|
*
|
|
*
|
|
* If the zonelist cache is not available for this zonelist, does
|
|
* If the zonelist cache is not available for this zonelist, does
|
|
* nothing and returns NULL.
|
|
* nothing and returns NULL.
|
|
@@ -1069,7 +1069,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
|
|
|
|
|
|
allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
|
|
allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
|
|
&cpuset_current_mems_allowed :
|
|
&cpuset_current_mems_allowed :
|
|
- &node_online_map;
|
|
|
|
|
|
+ &node_states[N_HIGH_MEMORY];
|
|
return allowednodes;
|
|
return allowednodes;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1802,7 +1802,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
|
|
return node;
|
|
return node;
|
|
}
|
|
}
|
|
|
|
|
|
- for_each_online_node(n) {
|
|
|
|
|
|
+ for_each_node_state(n, N_HIGH_MEMORY) {
|
|
cpumask_t tmp;
|
|
cpumask_t tmp;
|
|
|
|
|
|
/* Don't want a node to appear more than once */
|
|
/* Don't want a node to appear more than once */
|
|
@@ -1939,7 +1939,8 @@ static int default_zonelist_order(void)
|
|
* If there is a node whose DMA/DMA32 memory is very big area on
|
|
* If there is a node whose DMA/DMA32 memory is very big area on
|
|
* local memory, NODE_ORDER may be suitable.
|
|
* local memory, NODE_ORDER may be suitable.
|
|
*/
|
|
*/
|
|
- average_size = total_size / (num_online_nodes() + 1);
|
|
|
|
|
|
+ average_size = total_size /
|
|
|
|
+ (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
|
|
for_each_online_node(nid) {
|
|
for_each_online_node(nid) {
|
|
low_kmem_size = 0;
|
|
low_kmem_size = 0;
|
|
total_size = 0;
|
|
total_size = 0;
|
|
@@ -2098,20 +2099,6 @@ static void build_zonelist_cache(pg_data_t *pgdat)
|
|
|
|
|
|
#endif /* CONFIG_NUMA */
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
|
|
-/* Any regular memory on that node ? */
|
|
|
|
-static void check_for_regular_memory(pg_data_t *pgdat)
|
|
|
|
-{
|
|
|
|
-#ifdef CONFIG_HIGHMEM
|
|
|
|
- enum zone_type zone_type;
|
|
|
|
-
|
|
|
|
- for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
|
|
|
|
- struct zone *zone = &pgdat->node_zones[zone_type];
|
|
|
|
- if (zone->present_pages)
|
|
|
|
- node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
|
|
|
|
- }
|
|
|
|
-#endif
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* return values int ....just for stop_machine_run() */
|
|
/* return values int ....just for stop_machine_run() */
|
|
static int __build_all_zonelists(void *dummy)
|
|
static int __build_all_zonelists(void *dummy)
|
|
{
|
|
{
|
|
@@ -2122,11 +2109,6 @@ static int __build_all_zonelists(void *dummy)
|
|
|
|
|
|
build_zonelists(pgdat);
|
|
build_zonelists(pgdat);
|
|
build_zonelist_cache(pgdat);
|
|
build_zonelist_cache(pgdat);
|
|
-
|
|
|
|
- /* Any memory on that node */
|
|
|
|
- if (pgdat->node_present_pages)
|
|
|
|
- node_set_state(nid, N_HIGH_MEMORY);
|
|
|
|
- check_for_regular_memory(pgdat);
|
|
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -3282,16 +3264,24 @@ unsigned long __init find_max_pfn_with_active_regions(void)
|
|
return max_pfn;
|
|
return max_pfn;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * early_calculate_totalpages()
|
|
|
|
+ * Sum pages in active regions for movable zone.
|
|
|
|
+ * Populate N_HIGH_MEMORY for calculating usable_nodes.
|
|
|
|
+ */
|
|
unsigned long __init early_calculate_totalpages(void)
|
|
unsigned long __init early_calculate_totalpages(void)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
unsigned long totalpages = 0;
|
|
unsigned long totalpages = 0;
|
|
|
|
|
|
- for (i = 0; i < nr_nodemap_entries; i++)
|
|
|
|
- totalpages += early_node_map[i].end_pfn -
|
|
|
|
|
|
+ for (i = 0; i < nr_nodemap_entries; i++) {
|
|
|
|
+ unsigned long pages = early_node_map[i].end_pfn -
|
|
early_node_map[i].start_pfn;
|
|
early_node_map[i].start_pfn;
|
|
-
|
|
|
|
- return totalpages;
|
|
|
|
|
|
+ totalpages += pages;
|
|
|
|
+ if (pages)
|
|
|
|
+ node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
|
|
|
|
+ }
|
|
|
|
+ return totalpages;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3305,7 +3295,8 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
|
int i, nid;
|
|
int i, nid;
|
|
unsigned long usable_startpfn;
|
|
unsigned long usable_startpfn;
|
|
unsigned long kernelcore_node, kernelcore_remaining;
|
|
unsigned long kernelcore_node, kernelcore_remaining;
|
|
- int usable_nodes = num_online_nodes();
|
|
|
|
|
|
+ unsigned long totalpages = early_calculate_totalpages();
|
|
|
|
+ int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
|
|
|
|
|
|
/*
|
|
/*
|
|
* If movablecore was specified, calculate what size of
|
|
* If movablecore was specified, calculate what size of
|
|
@@ -3316,7 +3307,6 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
|
* what movablecore would have allowed.
|
|
* what movablecore would have allowed.
|
|
*/
|
|
*/
|
|
if (required_movablecore) {
|
|
if (required_movablecore) {
|
|
- unsigned long totalpages = early_calculate_totalpages();
|
|
|
|
unsigned long corepages;
|
|
unsigned long corepages;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3341,7 +3331,7 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
|
restart:
|
|
restart:
|
|
/* Spread kernelcore memory as evenly as possible throughout nodes */
|
|
/* Spread kernelcore memory as evenly as possible throughout nodes */
|
|
kernelcore_node = required_kernelcore / usable_nodes;
|
|
kernelcore_node = required_kernelcore / usable_nodes;
|
|
- for_each_online_node(nid) {
|
|
|
|
|
|
+ for_each_node_state(nid, N_HIGH_MEMORY) {
|
|
/*
|
|
/*
|
|
* Recalculate kernelcore_node if the division per node
|
|
* Recalculate kernelcore_node if the division per node
|
|
* now exceeds what is necessary to satisfy the requested
|
|
* now exceeds what is necessary to satisfy the requested
|
|
@@ -3433,6 +3423,20 @@ restart:
|
|
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
|
|
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Any regular memory on that node ? */
|
|
|
|
+static void check_for_regular_memory(pg_data_t *pgdat)
|
|
|
|
+{
|
|
|
|
+#ifdef CONFIG_HIGHMEM
|
|
|
|
+ enum zone_type zone_type;
|
|
|
|
+
|
|
|
|
+ for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
|
|
|
|
+ struct zone *zone = &pgdat->node_zones[zone_type];
|
|
|
|
+ if (zone->present_pages)
|
|
|
|
+ node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* free_area_init_nodes - Initialise all pg_data_t and zone data
|
|
* free_area_init_nodes - Initialise all pg_data_t and zone data
|
|
* @max_zone_pfn: an array of max PFNs for each zone
|
|
* @max_zone_pfn: an array of max PFNs for each zone
|
|
@@ -3507,6 +3511,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
|
|
pg_data_t *pgdat = NODE_DATA(nid);
|
|
pg_data_t *pgdat = NODE_DATA(nid);
|
|
free_area_init_node(nid, pgdat, NULL,
|
|
free_area_init_node(nid, pgdat, NULL,
|
|
find_min_pfn_for_node(nid), NULL);
|
|
find_min_pfn_for_node(nid), NULL);
|
|
|
|
+
|
|
|
|
+ /* Any memory on that node */
|
|
|
|
+ if (pgdat->node_present_pages)
|
|
|
|
+ node_set_state(nid, N_HIGH_MEMORY);
|
|
|
|
+ check_for_regular_memory(pgdat);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|