|
@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
|
|
|
|
|
|
do {
|
|
|
seq = zone_span_seqbegin(zone);
|
|
|
- if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
|
|
|
- ret = 1;
|
|
|
- else if (pfn < zone->zone_start_pfn)
|
|
|
+ if (!zone_spans_pfn(zone, pfn))
|
|
|
ret = 1;
|
|
|
} while (zone_span_seqretry(zone, seq));
|
|
|
|
|
@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page,
|
|
|
end_pfn = start_pfn + pageblock_nr_pages - 1;
|
|
|
|
|
|
/* Do not cross zone boundaries */
|
|
|
- if (start_pfn < zone->zone_start_pfn)
|
|
|
+ if (!zone_spans_pfn(zone, start_pfn))
|
|
|
start_page = page;
|
|
|
- if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
|
|
|
+ if (!zone_spans_pfn(zone, end_pfn))
|
|
|
return 0;
|
|
|
|
|
|
return move_freepages(zone, start_page, end_page, migratetype);
|
|
@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone)
|
|
|
|
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
|
|
|
|
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
|
|
|
+ max_zone_pfn = zone_end_pfn(zone);
|
|
|
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
|
|
|
if (pfn_valid(pfn)) {
|
|
|
struct page *page = pfn_to_page(pfn);
|
|
@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
* the block.
|
|
|
*/
|
|
|
start_pfn = zone->zone_start_pfn;
|
|
|
- end_pfn = start_pfn + zone->spanned_pages;
|
|
|
+ end_pfn = zone_end_pfn(zone);
|
|
|
start_pfn = roundup(start_pfn, pageblock_nr_pages);
|
|
|
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
|
|
|
pageblock_order;
|
|
@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
* pfn out of zone.
|
|
|
*/
|
|
|
if ((z->zone_start_pfn <= pfn)
|
|
|
- && (pfn < z->zone_start_pfn + z->spanned_pages)
|
|
|
+ && (pfn < zone_end_pfn(z))
|
|
|
&& !(pfn & (pageblock_nr_pages - 1)))
|
|
|
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
|
|
@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
|
|
|
* for the buddy allocator to function correctly.
|
|
|
*/
|
|
|
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
|
|
|
- end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
|
|
|
+ end = pgdat_end_pfn(pgdat);
|
|
|
end = ALIGN(end, MAX_ORDER_NR_PAGES);
|
|
|
size = (end - start) * sizeof(struct page);
|
|
|
map = alloc_remap(pgdat->node_id, size);
|
|
@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
|
|
|
pfn = page_to_pfn(page);
|
|
|
bitmap = get_pageblock_bitmap(zone, pfn);
|
|
|
bitidx = pfn_to_bitidx(zone, pfn);
|
|
|
- VM_BUG_ON(pfn < zone->zone_start_pfn);
|
|
|
- VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
|
|
|
+ VM_BUG_ON(!zone_spans_pfn(zone, pfn));
|
|
|
|
|
|
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
|
|
|
if (flags & value)
|
|
@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page)
|
|
|
|
|
|
zone = page_zone(page);
|
|
|
pfn = page_to_pfn(page);
|
|
|
- if (zone->zone_start_pfn > pfn ||
|
|
|
- zone->zone_start_pfn + zone->spanned_pages <= pfn)
|
|
|
+ if (!zone_spans_pfn(zone, pfn))
|
|
|
return false;
|
|
|
|
|
|
return !has_unmovable_pages(zone, page, 0, true);
|