|
@@ -282,7 +282,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|
|
struct page *newpage, struct page *page,
|
|
|
struct buffer_head *head, enum migrate_mode mode)
|
|
|
{
|
|
|
- int expected_count;
|
|
|
+ int expected_count = 0;
|
|
|
void **pslot;
|
|
|
|
|
|
if (!mapping) {
|
|
@@ -1415,4 +1415,108 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
|
|
|
}
|
|
|
return err;
|
|
|
}
|
|
|
-#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_NUMA_BALANCING
|
|
|
+/*
|
|
|
+ * Returns true if this is a safe migration target node for misplaced NUMA
|
|
|
+ * pages. Currently it only checks the watermarks which crude
|
|
|
+ */
|
|
|
+static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
|
|
|
+ int nr_migrate_pages)
|
|
|
+{
|
|
|
+ int z;
|
|
|
+ for (z = pgdat->nr_zones - 1; z >= 0; z--) {
|
|
|
+ struct zone *zone = pgdat->node_zones + z;
|
|
|
+
|
|
|
+ if (!populated_zone(zone))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (zone->all_unreclaimable)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Avoid waking kswapd by allocating pages_to_migrate pages. */
|
|
|
+ if (!zone_watermark_ok(zone, 0,
|
|
|
+ high_wmark_pages(zone) +
|
|
|
+ nr_migrate_pages,
|
|
|
+ 0, 0))
|
|
|
+ continue;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static struct page *alloc_misplaced_dst_page(struct page *page,
|
|
|
+ unsigned long data,
|
|
|
+ int **result)
|
|
|
+{
|
|
|
+ int nid = (int) data;
|
|
|
+ struct page *newpage;
|
|
|
+
|
|
|
+ newpage = alloc_pages_exact_node(nid,
|
|
|
+ (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
|
|
|
+ __GFP_NOMEMALLOC | __GFP_NORETRY |
|
|
|
+ __GFP_NOWARN) &
|
|
|
+ ~GFP_IOFS, 0);
|
|
|
+ return newpage;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Attempt to migrate a misplaced page to the specified destination
|
|
|
+ * node. Caller is expected to have an elevated reference count on
|
|
|
+ * the page that will be dropped by this function before returning.
|
|
|
+ */
|
|
|
+int migrate_misplaced_page(struct page *page, int node)
|
|
|
+{
|
|
|
+ int isolated = 0;
|
|
|
+ LIST_HEAD(migratepages);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't migrate pages that are mapped in multiple processes.
|
|
|
+ * TODO: Handle false sharing detection instead of this hammer
|
|
|
+ */
|
|
|
+ if (page_mapcount(page) != 1) {
|
|
|
+ put_page(page);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Avoid migrating to a node that is nearly full */
|
|
|
+ if (migrate_balanced_pgdat(NODE_DATA(node), 1)) {
|
|
|
+ int page_lru;
|
|
|
+
|
|
|
+ if (isolate_lru_page(page)) {
|
|
|
+ put_page(page);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ isolated = 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Page is isolated which takes a reference count so now the
|
|
|
+ * callers reference can be safely dropped without the page
|
|
|
+ * disappearing underneath us during migration
|
|
|
+ */
|
|
|
+ put_page(page);
|
|
|
+
|
|
|
+ page_lru = page_is_file_cache(page);
|
|
|
+ inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
|
|
|
+ list_add(&page->lru, &migratepages);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (isolated) {
|
|
|
+ int nr_remaining;
|
|
|
+
|
|
|
+ nr_remaining = migrate_pages(&migratepages,
|
|
|
+ alloc_misplaced_dst_page,
|
|
|
+ node, false, MIGRATE_ASYNC,
|
|
|
+ MR_NUMA_MISPLACED);
|
|
|
+ if (nr_remaining) {
|
|
|
+ putback_lru_pages(&migratepages);
|
|
|
+ isolated = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ BUG_ON(!list_empty(&migratepages));
|
|
|
+out:
|
|
|
+ return isolated;
|
|
|
+}
|
|
|
+#endif /* CONFIG_NUMA_BALANCING */
|
|
|
+
|
|
|
+#endif /* CONFIG_NUMA */
|