|
@@ -158,7 +158,6 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
|
|
|
EXPORT_SYMBOL(nr_node_ids);
|
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
|
int page_group_by_mobility_disabled __read_mostly;
|
|
|
|
|
|
static inline int get_pageblock_migratetype(struct page *page)
|
|
@@ -192,22 +191,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
|
|
|
((gfp_flags & __GFP_RECLAIMABLE) != 0);
|
|
|
}
|
|
|
|
|
|
-#else
|
|
|
-static inline int get_pageblock_migratetype(struct page *page)
|
|
|
-{
|
|
|
- return MIGRATE_UNMOVABLE;
|
|
|
-}
|
|
|
-
|
|
|
-static void set_pageblock_migratetype(struct page *page, int migratetype)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
|
|
|
-{
|
|
|
- return MIGRATE_UNMOVABLE;
|
|
|
-}
|
|
|
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
-
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
|
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
|
|
|
{
|
|
@@ -718,7 +701,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|
|
}
|
|
|
|
|
|
|
|
|
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
|
/*
|
|
|
* This array describes the order lists are fallen back to when
|
|
|
* the free lists for the desirable migrate type are depleted
|
|
@@ -750,7 +732,7 @@ int move_freepages(struct zone *zone,
|
|
|
* CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
|
|
|
* anyway as we check zone boundaries in move_freepages_block().
|
|
|
* Remove at a later date when no bug reports exist related to
|
|
|
- * CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
|
+ * grouping pages by mobility
|
|
|
*/
|
|
|
BUG_ON(page_zone(start_page) != page_zone(end_page));
|
|
|
#endif
|
|
@@ -899,13 +881,6 @@ retry:
|
|
|
/* Use MIGRATE_RESERVE rather than fail an allocation */
|
|
|
return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
|
|
|
}
|
|
|
-#else
|
|
|
-static struct page *__rmqueue_fallback(struct zone *zone, int order,
|
|
|
- int start_migratetype)
|
|
|
-{
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
|
|
|
/*
|
|
|
* Do the hard work of removing an element from the buddy allocator.
|
|
@@ -1033,7 +1008,6 @@ void mark_free_pages(struct zone *zone)
|
|
|
}
|
|
|
#endif /* CONFIG_PM */
|
|
|
|
|
|
-#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
|
|
|
/*
|
|
|
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
|
|
|
*/
|
|
@@ -1064,9 +1038,6 @@ void drain_all_local_pages(void)
|
|
|
|
|
|
smp_call_function(smp_drain_local_pages, NULL, 0, 1);
|
|
|
}
|
|
|
-#else
|
|
|
-void drain_all_local_pages(void) {}
|
|
|
-#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
|
|
|
/*
|
|
|
* Free a 0-order page
|
|
@@ -1157,7 +1128,6 @@ again:
|
|
|
goto failed;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
|
/* Find a page of the appropriate migrate type */
|
|
|
list_for_each_entry(page, &pcp->list, lru)
|
|
|
if (page_private(page) == migratetype)
|
|
@@ -1169,9 +1139,6 @@ again:
|
|
|
pcp->batch, &pcp->list, migratetype);
|
|
|
page = list_entry(pcp->list.next, struct page, lru);
|
|
|
}
|
|
|
-#else
|
|
|
- page = list_entry(pcp->list.next, struct page, lru);
|
|
|
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
|
|
|
list_del(&page->lru);
|
|
|
pcp->count--;
|
|
@@ -2525,7 +2492,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
|
|
|
|
|
|
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
|
|
|
|
|
|
-#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
|
/*
|
|
|
* Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
|
|
|
* of blocks reserved is based on zone->pages_min. The memory within the
|
|
@@ -2579,11 +2545,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
-#else
|
|
|
-static inline void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
+
|
|
|
/*
|
|
|
* Initially all pages are reserved - free ones are freed
|
|
|
* up by free_all_bootmem() once the early boot process is
|