|
@@ -57,6 +57,7 @@
|
|
|
#include <linux/ftrace_event.h>
|
|
|
#include <linux/memcontrol.h>
|
|
|
#include <linux/prefetch.h>
|
|
|
+#include <linux/migrate.h>
|
|
|
#include <linux/page-debug-flags.h>
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
@@ -5550,6 +5551,193 @@ out:
|
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_CMA
|
|
|
+
|
|
|
+static unsigned long pfn_max_align_down(unsigned long pfn)
|
|
|
+{
|
|
|
+ return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
|
|
|
+ pageblock_nr_pages) - 1);
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long pfn_max_align_up(unsigned long pfn)
|
|
|
+{
|
|
|
+ return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
|
|
|
+ pageblock_nr_pages));
|
|
|
+}
|
|
|
+
|
|
|
+static struct page *
|
|
|
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
|
|
|
+ int **resultp)
|
|
|
+{
|
|
|
+ return alloc_page(GFP_HIGHUSER_MOVABLE);
|
|
|
+}
|
|
|
+
|
|
|
+/* [start, end) must belong to a single zone. */
|
|
|
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
|
|
|
+{
|
|
|
+ /* This function is based on compact_zone() from compaction.c. */
|
|
|
+
|
|
|
+ unsigned long pfn = start;
|
|
|
+ unsigned int tries = 0;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ struct compact_control cc = {
|
|
|
+ .nr_migratepages = 0,
|
|
|
+ .order = -1,
|
|
|
+ .zone = page_zone(pfn_to_page(start)),
|
|
|
+ .sync = true,
|
|
|
+ };
|
|
|
+ INIT_LIST_HEAD(&cc.migratepages);
|
|
|
+
|
|
|
+ migrate_prep_local();
|
|
|
+
|
|
|
+ while (pfn < end || !list_empty(&cc.migratepages)) {
|
|
|
+ if (fatal_signal_pending(current)) {
|
|
|
+ ret = -EINTR;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (list_empty(&cc.migratepages)) {
|
|
|
+ cc.nr_migratepages = 0;
|
|
|
+ pfn = isolate_migratepages_range(cc.zone, &cc,
|
|
|
+ pfn, end);
|
|
|
+ if (!pfn) {
|
|
|
+ ret = -EINTR;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ tries = 0;
|
|
|
+ } else if (++tries == 5) {
|
|
|
+ ret = ret < 0 ? ret : -EBUSY;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = migrate_pages(&cc.migratepages,
|
|
|
+ __alloc_contig_migrate_alloc,
|
|
|
+ 0, false, true);
|
|
|
+ }
|
|
|
+
|
|
|
+ putback_lru_pages(&cc.migratepages);
|
|
|
+ return ret > 0 ? 0 : ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * alloc_contig_range() -- tries to allocate given range of pages
|
|
|
+ * @start: start PFN to allocate
|
|
|
+ * @end: one-past-the-last PFN to allocate
|
|
|
+ *
|
|
|
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
|
|
|
+ * aligned, however it's the caller's responsibility to guarantee that
|
|
|
+ * we are the only thread that changes migrate type of pageblocks the
|
|
|
+ * pages fall in.
|
|
|
+ *
|
|
|
+ * The PFN range must belong to a single zone.
|
|
|
+ *
|
|
|
+ * Returns zero on success or negative error code. On success all
|
|
|
+ * pages which PFN is in [start, end) are allocated for the caller and
|
|
|
+ * need to be freed with free_contig_range().
|
|
|
+ */
|
|
|
+int alloc_contig_range(unsigned long start, unsigned long end)
|
|
|
+{
|
|
|
+ struct zone *zone = page_zone(pfn_to_page(start));
|
|
|
+ unsigned long outer_start, outer_end;
|
|
|
+ int ret = 0, order;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * What we do here is we mark all pageblocks in range as
|
|
|
+ * MIGRATE_ISOLATE. Because pageblock and max order pages may
|
|
|
+ * have different sizes, and due to the way page allocator
|
|
|
+ * work, we align the range to biggest of the two pages so
|
|
|
+ * that page allocator won't try to merge buddies from
|
|
|
+ * different pageblocks and change MIGRATE_ISOLATE to some
|
|
|
+ * other migration type.
|
|
|
+ *
|
|
|
+ * Once the pageblocks are marked as MIGRATE_ISOLATE, we
|
|
|
+ * migrate the pages from an unaligned range (ie. pages that
|
|
|
+ * we are interested in). This will put all the pages in
|
|
|
+ * range back to page allocator as MIGRATE_ISOLATE.
|
|
|
+ *
|
|
|
+ * When this is done, we take the pages in range from page
|
|
|
+ * allocator removing them from the buddy system. This way
|
|
|
+ * page allocator will never consider using them.
|
|
|
+ *
|
|
|
+ * This lets us mark the pageblocks back as
|
|
|
+ * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
|
|
|
+ * aligned range but not in the unaligned, original range are
|
|
|
+ * put back to page allocator so that buddy can use them.
|
|
|
+ */
|
|
|
+
|
|
|
+ ret = start_isolate_page_range(pfn_max_align_down(start),
|
|
|
+ pfn_max_align_up(end));
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ ret = __alloc_contig_migrate_range(start, end);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
|
|
|
+ * aligned blocks that are marked as MIGRATE_ISOLATE. What's
|
|
|
+ * more, all pages in [start, end) are free in page allocator.
|
|
|
+ * What we are going to do is to allocate all pages from
|
|
|
+ * [start, end) (that is remove them from page allocator).
|
|
|
+ *
|
|
|
+ * The only problem is that pages at the beginning and at the
|
|
|
+ * end of interesting range may be not aligned with pages that
|
|
|
+ * page allocator holds, ie. they can be part of higher order
|
|
|
+ * pages. Because of this, we reserve the bigger range and
|
|
|
+ * once this is done free the pages we are not interested in.
|
|
|
+ *
|
|
|
+ * We don't have to hold zone->lock here because the pages are
|
|
|
+ * isolated thus they won't get removed from buddy.
|
|
|
+ */
|
|
|
+
|
|
|
+ lru_add_drain_all();
|
|
|
+ drain_all_pages();
|
|
|
+
|
|
|
+ order = 0;
|
|
|
+ outer_start = start;
|
|
|
+ while (!PageBuddy(pfn_to_page(outer_start))) {
|
|
|
+ if (++order >= MAX_ORDER) {
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+ outer_start &= ~0UL << order;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Make sure the range is really isolated. */
|
|
|
+ if (test_pages_isolated(outer_start, end)) {
|
|
|
+ pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
|
|
|
+ outer_start, end);
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
+ outer_end = isolate_freepages_range(outer_start, end);
|
|
|
+ if (!outer_end) {
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Free head and tail (if any) */
|
|
|
+ if (start != outer_start)
|
|
|
+ free_contig_range(outer_start, start - outer_start);
|
|
|
+ if (end != outer_end)
|
|
|
+ free_contig_range(end, outer_end - end);
|
|
|
+
|
|
|
+done:
|
|
|
+ undo_isolate_page_range(pfn_max_align_down(start),
|
|
|
+ pfn_max_align_up(end));
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
|
|
|
+{
|
|
|
+ for (; nr_pages--; ++pfn)
|
|
|
+ __free_page(pfn_to_page(pfn));
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
|
/*
|
|
|
* All pages in the range must be isolated before calling this.
|