|
@@ -2524,7 +2524,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
struct page *page;
|
|
|
unsigned long end_pfn = start_pfn + size;
|
|
|
unsigned long pfn;
|
|
|
+ struct zone *z;
|
|
|
|
|
|
+ z = &NODE_DATA(nid)->node_zones[zone];
|
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
|
|
/*
|
|
|
* There can be holes in boot-time mem_map[]s
|
|
@@ -2542,7 +2544,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
init_page_count(page);
|
|
|
reset_page_mapcount(page);
|
|
|
SetPageReserved(page);
|
|
|
-
|
|
|
/*
|
|
|
* Mark the block movable so that blocks are reserved for
|
|
|
* movable at startup. This will force kernel allocations
|
|
@@ -2551,8 +2552,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
* kernel allocations are made. Later some blocks near
|
|
|
* the start are marked MIGRATE_RESERVE by
|
|
|
* setup_zone_migrate_reserve()
|
|
|
+ *
|
|
|
+ * bitmap is created for zone's valid pfn range. but memmap
|
|
|
+ * can be created for invalid pages (for alignment)
|
|
|
+ * check here not to call set_pageblock_migratetype() against
|
|
|
+ * pfn out of zone.
|
|
|
*/
|
|
|
- if ((pfn & (pageblock_nr_pages-1)))
|
|
|
+ if ((z->zone_start_pfn <= pfn)
|
|
|
+ && (pfn < z->zone_start_pfn + z->spanned_pages)
|
|
|
+ && !(pfn & (pageblock_nr_pages - 1)))
|
|
|
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
|
|
|
INIT_LIST_HEAD(&page->lru);
|
|
@@ -4464,6 +4472,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
|
|
|
pfn = page_to_pfn(page);
|
|
|
bitmap = get_pageblock_bitmap(zone, pfn);
|
|
|
bitidx = pfn_to_bitidx(zone, pfn);
|
|
|
+ VM_BUG_ON(pfn < zone->zone_start_pfn);
|
|
|
+ VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
|
|
|
|
|
|
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
|
|
|
if (flags & value)
|