|
@@ -876,7 +876,9 @@ void mark_free_pages(struct zone *zone)
|
|
|
}
|
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
}
|
|
|
+#endif /* CONFIG_PM */
|
|
|
|
|
|
+#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
|
|
|
/*
|
|
|
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
|
|
|
*/
|
|
@@ -888,7 +890,28 @@ void drain_local_pages(void)
|
|
|
__drain_pages(smp_processor_id());
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
-#endif /* CONFIG_HIBERNATION */
|
|
|
+
|
|
|
+void smp_drain_local_pages(void *arg)
|
|
|
+{
|
|
|
+ drain_local_pages();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator
|
|
|
+ */
|
|
|
+void drain_all_local_pages(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ __drain_pages(smp_processor_id());
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ smp_call_function(smp_drain_local_pages, NULL, 0, 1);
|
|
|
+}
|
|
|
+#else
|
|
|
+void drain_all_local_pages(void) {}
|
|
|
+#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
|
|
|
|
|
|
/*
|
|
|
* Free a 0-order page
|
|
@@ -1480,6 +1503,9 @@ nofail_alloc:
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
+ if (order != 0)
|
|
|
+ drain_all_local_pages();
|
|
|
+
|
|
|
if (likely(did_some_progress)) {
|
|
|
page = get_page_from_freelist(gfp_mask, order,
|
|
|
zonelist, alloc_flags);
|