|
@@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
|
|
|
__free_pages(page, order);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_CMA
|
|
|
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
|
|
|
+void __init init_cma_reserved_pageblock(struct page *page)
|
|
|
+{
|
|
|
+ unsigned i = pageblock_nr_pages;
|
|
|
+ struct page *p = page;
|
|
|
+
|
|
|
+ do {
|
|
|
+ __ClearPageReserved(p);
|
|
|
+ set_page_count(p, 0);
|
|
|
+ } while (++p, --i);
|
|
|
+
|
|
|
+ set_page_refcounted(page);
|
|
|
+ set_pageblock_migratetype(page, MIGRATE_CMA);
|
|
|
+ __free_pages(page, pageblock_order);
|
|
|
+ totalram_pages += pageblock_nr_pages;
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* The order of subdivision here is critical for the IO subsystem.
|
|
@@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|
|
* This array describes the order lists are fallen back to when
|
|
|
* the free lists for the desirable migrate type are depleted
|
|
|
*/
|
|
|
-static int fallbacks[MIGRATE_TYPES][3] = {
|
|
|
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
|
|
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
|
|
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
|
|
+static int fallbacks[MIGRATE_TYPES][4] = {
|
|
|
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
|
|
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
|
|
|
+#ifdef CONFIG_CMA
|
|
|
+ [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
|
|
+ [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
|
|
|
+#else
|
|
|
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
|
|
|
+#endif
|
|
|
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
|
|
|
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
|
|
|
};
|
|
@@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|
|
* pages to the preferred allocation list. If falling
|
|
|
* back for a reclaimable kernel allocation, be more
|
|
|
* aggressive about taking ownership of free pages
|
|
|
+ *
|
|
|
+ * On the other hand, never change migration
|
|
|
+ * type of MIGRATE_CMA pageblocks nor move CMA
|
|
|
+ * pages on different free lists. We don't
|
|
|
+ * want unmovable pages to be allocated from
|
|
|
+ * MIGRATE_CMA areas.
|
|
|
*/
|
|
|
- if (unlikely(current_order >= (pageblock_order >> 1)) ||
|
|
|
- start_migratetype == MIGRATE_RECLAIMABLE ||
|
|
|
- page_group_by_mobility_disabled) {
|
|
|
- unsigned long pages;
|
|
|
+ if (!is_migrate_cma(migratetype) &&
|
|
|
+ (unlikely(current_order >= pageblock_order / 2) ||
|
|
|
+ start_migratetype == MIGRATE_RECLAIMABLE ||
|
|
|
+ page_group_by_mobility_disabled)) {
|
|
|
+ int pages;
|
|
|
pages = move_freepages_block(zone, page,
|
|
|
start_migratetype);
|
|
|
|
|
@@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|
|
rmv_page_order(page);
|
|
|
|
|
|
/* Take ownership for orders >= pageblock_order */
|
|
|
- if (current_order >= pageblock_order)
|
|
|
+ if (current_order >= pageblock_order &&
|
|
|
+ !is_migrate_cma(migratetype))
|
|
|
change_pageblock_range(page, current_order,
|
|
|
start_migratetype);
|
|
|
|
|
|
- expand(zone, page, order, current_order, area, migratetype);
|
|
|
+ expand(zone, page, order, current_order, area,
|
|
|
+ is_migrate_cma(migratetype)
|
|
|
+ ? migratetype : start_migratetype);
|
|
|
|
|
|
trace_mm_page_alloc_extfrag(page, order, current_order,
|
|
|
start_migratetype, migratetype);
|
|
@@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
|
unsigned long count, struct list_head *list,
|
|
|
int migratetype, int cold)
|
|
|
{
|
|
|
- int i;
|
|
|
+ int mt = migratetype, i;
|
|
|
|
|
|
spin_lock(&zone->lock);
|
|
|
for (i = 0; i < count; ++i) {
|
|
@@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
|
list_add(&page->lru, list);
|
|
|
else
|
|
|
list_add_tail(&page->lru, list);
|
|
|
- set_page_private(page, migratetype);
|
|
|
+ if (IS_ENABLED(CONFIG_CMA)) {
|
|
|
+ mt = get_pageblock_migratetype(page);
|
|
|
+ if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
|
|
|
+ mt = migratetype;
|
|
|
+ }
|
|
|
+ set_page_private(page, mt);
|
|
|
list = &page->lru;
|
|
|
}
|
|
|
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
|
|
@@ -1373,8 +1411,12 @@ int split_free_page(struct page *page)
|
|
|
|
|
|
if (order >= pageblock_order - 1) {
|
|
|
struct page *endpage = page + (1 << order) - 1;
|
|
|
- for (; page < endpage; page += pageblock_nr_pages)
|
|
|
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
+ for (; page < endpage; page += pageblock_nr_pages) {
|
|
|
+ int mt = get_pageblock_migratetype(page);
|
|
|
+ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
|
|
|
+ set_pageblock_migratetype(page,
|
|
|
+ MIGRATE_MOVABLE);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
return 1 << order;
|
|
@@ -5414,14 +5456,16 @@ static int
|
|
|
__count_immobile_pages(struct zone *zone, struct page *page, int count)
|
|
|
{
|
|
|
unsigned long pfn, iter, found;
|
|
|
+ int mt;
|
|
|
+
|
|
|
/*
|
|
|
* For avoiding noise data, lru_add_drain_all() should be called
|
|
|
* If ZONE_MOVABLE, the zone never contains immobile pages
|
|
|
*/
|
|
|
if (zone_idx(zone) == ZONE_MOVABLE)
|
|
|
return true;
|
|
|
-
|
|
|
- if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
|
|
|
+ mt = get_pageblock_migratetype(page);
|
|
|
+ if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
|
|
|
return true;
|
|
|
|
|
|
pfn = page_to_pfn(page);
|