|
@@ -241,20 +241,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
|
|
return count;
|
|
|
}
|
|
|
|
|
|
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
|
|
|
+static int reset_managed_pages_done __initdata;
|
|
|
+
|
|
|
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
|
|
|
{
|
|
|
struct zone *z;
|
|
|
|
|
|
- /*
|
|
|
- * In free_area_init_core(), highmem zone's managed_pages is set to
|
|
|
- * present_pages, and bootmem allocator doesn't allocate from highmem
|
|
|
- * zones. So there's no need to recalculate managed_pages because all
|
|
|
- * highmem pages will be managed by the buddy system. Here highmem
|
|
|
- * zone also includes highmem movable zone.
|
|
|
- */
|
|
|
+ if (reset_managed_pages_done)
|
|
|
+ return;
|
|
|
+
|
|
|
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
|
|
|
- if (!is_highmem(z))
|
|
|
- z->managed_pages = 0;
|
|
|
+ z->managed_pages = 0;
|
|
|
+}
|
|
|
+
|
|
|
+void __init reset_all_zones_managed_pages(void)
|
|
|
+{
|
|
|
+ struct pglist_data *pgdat;
|
|
|
+
|
|
|
+ for_each_online_pgdat(pgdat)
|
|
|
+ reset_node_managed_pages(pgdat);
|
|
|
+ reset_managed_pages_done = 1;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -266,7 +272,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
|
|
|
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
|
|
|
{
|
|
|
register_page_bootmem_info_node(pgdat);
|
|
|
- reset_node_lowmem_managed_pages(pgdat);
|
|
|
+ reset_node_managed_pages(pgdat);
|
|
|
return free_all_bootmem_core(pgdat->bdata);
|
|
|
}
|
|
|
|
|
@@ -279,10 +285,8 @@ unsigned long __init free_all_bootmem(void)
|
|
|
{
|
|
|
unsigned long total_pages = 0;
|
|
|
bootmem_data_t *bdata;
|
|
|
- struct pglist_data *pgdat;
|
|
|
|
|
|
- for_each_online_pgdat(pgdat)
|
|
|
- reset_node_lowmem_managed_pages(pgdat);
|
|
|
+ reset_all_zones_managed_pages();
|
|
|
|
|
|
list_for_each_entry(bdata, &bdata_list, list)
|
|
|
total_pages += free_all_bootmem_core(bdata);
|