|
@@ -687,16 +687,48 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Go through the free lists for the given migratetype and remove
|
|
|
|
+ * the smallest available page from the freelists
|
|
|
|
+ */
|
|
|
|
+static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|
|
|
+ int migratetype)
|
|
|
|
+{
|
|
|
|
+ unsigned int current_order;
|
|
|
|
+ struct free_area * area;
|
|
|
|
+ struct page *page;
|
|
|
|
+
|
|
|
|
+ /* Find a page of the appropriate size in the preferred list */
|
|
|
|
+ for (current_order = order; current_order < MAX_ORDER; ++current_order) {
|
|
|
|
+ area = &(zone->free_area[current_order]);
|
|
|
|
+ if (list_empty(&area->free_list[migratetype]))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ page = list_entry(area->free_list[migratetype].next,
|
|
|
|
+ struct page, lru);
|
|
|
|
+ list_del(&page->lru);
|
|
|
|
+ rmv_page_order(page);
|
|
|
|
+ area->nr_free--;
|
|
|
|
+ __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
|
|
|
|
+ expand(zone, page, order, current_order, area, migratetype);
|
|
|
|
+ return page;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
/*
|
|
/*
|
|
* This array describes the order lists are fallen back to when
|
|
* This array describes the order lists are fallen back to when
|
|
* the free lists for the desirable migrate type are depleted
|
|
* the free lists for the desirable migrate type are depleted
|
|
*/
|
|
*/
|
|
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
|
|
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
|
|
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC },
|
|
|
|
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC },
|
|
|
|
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_HIGHATOMIC },
|
|
|
|
- [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_MOVABLE},
|
|
|
|
|
|
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
|
|
|
|
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
|
|
|
|
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
|
|
|
|
+ [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
|
|
|
+ [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -799,6 +831,9 @@ retry:
|
|
for (i = 0; i < MIGRATE_TYPES - 1; i++) {
|
|
for (i = 0; i < MIGRATE_TYPES - 1; i++) {
|
|
migratetype = fallbacks[start_migratetype][i];
|
|
migratetype = fallbacks[start_migratetype][i];
|
|
|
|
|
|
|
|
+ /* MIGRATE_RESERVE handled later if necessary */
|
|
|
|
+ if (migratetype == MIGRATE_RESERVE)
|
|
|
|
+ continue;
|
|
/*
|
|
/*
|
|
* Make it hard to fallback to blocks used for
|
|
* Make it hard to fallback to blocks used for
|
|
* high-order atomic allocations
|
|
* high-order atomic allocations
|
|
@@ -861,7 +896,8 @@ retry:
|
|
goto retry;
|
|
goto retry;
|
|
}
|
|
}
|
|
|
|
|
|
- return NULL;
|
|
|
|
|
|
+ /* Use MIGRATE_RESERVE rather than fail an allocation */
|
|
|
|
+ return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
static struct page *__rmqueue_fallback(struct zone *zone, int order,
|
|
static struct page *__rmqueue_fallback(struct zone *zone, int order,
|
|
@@ -871,36 +907,19 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
|
|
}
|
|
}
|
|
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
|
|
|
-/*
|
|
|
|
|
|
+/*
|
|
* Do the hard work of removing an element from the buddy allocator.
|
|
* Do the hard work of removing an element from the buddy allocator.
|
|
* Call me with the zone->lock already held.
|
|
* Call me with the zone->lock already held.
|
|
*/
|
|
*/
|
|
static struct page *__rmqueue(struct zone *zone, unsigned int order,
|
|
static struct page *__rmqueue(struct zone *zone, unsigned int order,
|
|
int migratetype)
|
|
int migratetype)
|
|
{
|
|
{
|
|
- struct free_area * area;
|
|
|
|
- unsigned int current_order;
|
|
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
- /* Find a page of the appropriate size in the preferred list */
|
|
|
|
- for (current_order = order; current_order < MAX_ORDER; ++current_order) {
|
|
|
|
- area = &(zone->free_area[current_order]);
|
|
|
|
- if (list_empty(&area->free_list[migratetype]))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- page = list_entry(area->free_list[migratetype].next,
|
|
|
|
- struct page, lru);
|
|
|
|
- list_del(&page->lru);
|
|
|
|
- rmv_page_order(page);
|
|
|
|
- area->nr_free--;
|
|
|
|
- __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
|
|
|
|
- expand(zone, page, order, current_order, area, migratetype);
|
|
|
|
- goto got_page;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- page = __rmqueue_fallback(zone, order, migratetype);
|
|
|
|
|
|
+ page = __rmqueue_smallest(zone, order, migratetype);
|
|
|
|
|
|
-got_page:
|
|
|
|
|
|
+ if (unlikely(!page))
|
|
|
|
+ page = __rmqueue_fallback(zone, order, migratetype);
|
|
|
|
|
|
return page;
|
|
return page;
|
|
}
|
|
}
|
|
@@ -2506,6 +2525,65 @@ static inline unsigned long wait_table_bits(unsigned long size)
|
|
|
|
|
|
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
|
|
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
|
|
|
+/*
|
|
|
|
+ * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
|
|
|
|
+ * of blocks reserved is based on zone->pages_min. The memory within the
|
|
|
|
+ * reserve will tend to store contiguous free pages. Setting min_free_kbytes
|
|
|
|
+ * higher will lead to a bigger reserve which will get freed as contiguous
|
|
|
|
+ * blocks as reclaim kicks in
|
|
|
|
+ */
|
|
|
|
+static void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
|
+{
|
|
|
|
+ unsigned long start_pfn, pfn, end_pfn;
|
|
|
|
+ struct page *page;
|
|
|
|
+ unsigned long reserve, block_migratetype;
|
|
|
|
+
|
|
|
|
+ /* Get the start pfn, end pfn and the number of blocks to reserve */
|
|
|
|
+ start_pfn = zone->zone_start_pfn;
|
|
|
|
+ end_pfn = start_pfn + zone->spanned_pages;
|
|
|
|
+ reserve = roundup(zone->pages_min, MAX_ORDER_NR_PAGES) >> (MAX_ORDER-1);
|
|
|
|
+
|
|
|
|
+ for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) {
|
|
|
|
+ if (!pfn_valid(pfn))
|
|
|
|
+ continue;
|
|
|
|
+ page = pfn_to_page(pfn);
|
|
|
|
+
|
|
|
|
+ /* Blocks with reserved pages will never free, skip them. */
|
|
|
|
+ if (PageReserved(page))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ block_migratetype = get_pageblock_migratetype(page);
|
|
|
|
+
|
|
|
|
+ /* If this block is reserved, account for it */
|
|
|
|
+ if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
|
|
|
|
+ reserve--;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Suitable for reserving if this block is movable */
|
|
|
|
+ if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
|
|
|
|
+ set_pageblock_migratetype(page, MIGRATE_RESERVE);
|
|
|
|
+ move_freepages_block(zone, page, MIGRATE_RESERVE);
|
|
|
|
+ reserve--;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the reserve is met and this is a previous reserved block,
|
|
|
|
+ * take it back
|
|
|
|
+ */
|
|
|
|
+ if (block_migratetype == MIGRATE_RESERVE) {
|
|
|
|
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
|
+ move_freepages_block(zone, page, MIGRATE_MOVABLE);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
/*
|
|
/*
|
|
* Initially all pages are reserved - free ones are freed
|
|
* Initially all pages are reserved - free ones are freed
|
|
* up by free_all_bootmem() once the early boot process is
|
|
* up by free_all_bootmem() once the early boot process is
|
|
@@ -2541,9 +2619,12 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
* movable at startup. This will force kernel allocations
|
|
* movable at startup. This will force kernel allocations
|
|
* to reserve their blocks rather than leaking throughout
|
|
* to reserve their blocks rather than leaking throughout
|
|
* the address space during boot when many long-lived
|
|
* the address space during boot when many long-lived
|
|
- * kernel allocations are made
|
|
|
|
|
|
+ * kernel allocations are made. Later some blocks near
|
|
|
|
+ * the start are marked MIGRATE_RESERVE by
|
|
|
|
+ * setup_zone_migrate_reserve()
|
|
*/
|
|
*/
|
|
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
|
|
|
+ if ((pfn & (MAX_ORDER_NR_PAGES-1)))
|
|
|
|
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
|
|
|
INIT_LIST_HEAD(&page->lru);
|
|
INIT_LIST_HEAD(&page->lru);
|
|
#ifdef WANT_PAGE_VIRTUAL
|
|
#ifdef WANT_PAGE_VIRTUAL
|
|
@@ -4078,6 +4159,7 @@ void setup_per_zone_pages_min(void)
|
|
|
|
|
|
zone->pages_low = zone->pages_min + (tmp >> 2);
|
|
zone->pages_low = zone->pages_min + (tmp >> 2);
|
|
zone->pages_high = zone->pages_min + (tmp >> 1);
|
|
zone->pages_high = zone->pages_min + (tmp >> 1);
|
|
|
|
+ setup_zone_migrate_reserve(zone);
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
}
|
|
}
|
|
|
|
|