|
@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
|
|
* done. Non-atomic initialization, single-pass.
|
|
* done. Non-atomic initialization, single-pass.
|
|
*/
|
|
*/
|
|
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
- unsigned long start_pfn)
|
|
|
|
|
|
+ unsigned long start_pfn, enum memmap_context context)
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
unsigned long end_pfn = start_pfn + size;
|
|
unsigned long end_pfn = start_pfn + size;
|
|
unsigned long pfn;
|
|
unsigned long pfn;
|
|
|
|
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
|
- if (!early_pfn_valid(pfn))
|
|
|
|
- continue;
|
|
|
|
- if (!early_pfn_in_nid(pfn, nid))
|
|
|
|
- continue;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * There can be holes in boot-time mem_map[]s
|
|
|
|
+ * handed to this function. They do not
|
|
|
|
+ * exist on hotplugged memory.
|
|
|
|
+ */
|
|
|
|
+ if (context == MEMMAP_EARLY) {
|
|
|
|
+ if (!early_pfn_valid(pfn))
|
|
|
|
+ continue;
|
|
|
|
+ if (!early_pfn_in_nid(pfn, nid))
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
page = pfn_to_page(pfn);
|
|
page = pfn_to_page(pfn);
|
|
set_page_links(page, zone, nid, pfn);
|
|
set_page_links(page, zone, nid, pfn);
|
|
init_page_count(page);
|
|
init_page_count(page);
|
|
@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
|
|
|
|
|
|
#ifndef __HAVE_ARCH_MEMMAP_INIT
|
|
#ifndef __HAVE_ARCH_MEMMAP_INIT
|
|
#define memmap_init(size, nid, zone, start_pfn) \
|
|
#define memmap_init(size, nid, zone, start_pfn) \
|
|
- memmap_init_zone((size), (nid), (zone), (start_pfn))
|
|
|
|
|
|
+ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
static int __cpuinit zone_batchsize(struct zone *zone)
|
|
static int __cpuinit zone_batchsize(struct zone *zone)
|
|
@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
|
|
|
|
|
|
__meminit int init_currently_empty_zone(struct zone *zone,
|
|
__meminit int init_currently_empty_zone(struct zone *zone,
|
|
unsigned long zone_start_pfn,
|
|
unsigned long zone_start_pfn,
|
|
- unsigned long size)
|
|
|
|
|
|
+ unsigned long size,
|
|
|
|
+ enum memmap_context context)
|
|
{
|
|
{
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
int ret;
|
|
int ret;
|
|
@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
|
|
if (!size)
|
|
if (!size)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- ret = init_currently_empty_zone(zone, zone_start_pfn, size);
|
|
|
|
|
|
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
|
|
|
|
+ size, MEMMAP_EARLY);
|
|
BUG_ON(ret);
|
|
BUG_ON(ret);
|
|
zone_start_pfn += size;
|
|
zone_start_pfn += size;
|
|
}
|
|
}
|