|
@@ -749,23 +749,6 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
|
|
|
return move_freepages(zone, start_page, end_page, migratetype);
|
|
|
}
|
|
|
|
|
|
-/* Return the page with the lowest PFN in the list */
|
|
|
-static struct page *min_page(struct list_head *list)
|
|
|
-{
|
|
|
- unsigned long min_pfn = -1UL;
|
|
|
- struct page *min_page = NULL, *page;;
|
|
|
-
|
|
|
- list_for_each_entry(page, list, lru) {
|
|
|
- unsigned long pfn = page_to_pfn(page);
|
|
|
- if (pfn < min_pfn) {
|
|
|
- min_pfn = pfn;
|
|
|
- min_page = page;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return min_page;
|
|
|
-}
|
|
|
-
|
|
|
/* Remove an element from the buddy allocator from the fallback list */
|
|
|
static struct page *__rmqueue_fallback(struct zone *zone, int order,
|
|
|
int start_migratetype)
|
|
@@ -789,11 +772,8 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
|
|
|
if (list_empty(&area->free_list[migratetype]))
|
|
|
continue;
|
|
|
|
|
|
- /* Bias kernel allocations towards low pfns */
|
|
|
page = list_entry(area->free_list[migratetype].next,
|
|
|
struct page, lru);
|
|
|
- if (unlikely(start_migratetype != MIGRATE_MOVABLE))
|
|
|
- page = min_page(&area->free_list[migratetype]);
|
|
|
area->nr_free--;
|
|
|
|
|
|
/*
|