|
@@ -1981,14 +1981,20 @@ static struct page *
|
|
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
|
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
|
|
- int migratetype, unsigned long *did_some_progress,
|
|
|
- bool sync_migration)
|
|
|
+ int migratetype, bool sync_migration,
|
|
|
+ bool *deferred_compaction,
|
|
|
+ unsigned long *did_some_progress)
|
|
|
{
|
|
|
struct page *page;
|
|
|
|
|
|
- if (!order || compaction_deferred(preferred_zone))
|
|
|
+ if (!order)
|
|
|
return NULL;
|
|
|
|
|
|
+ if (compaction_deferred(preferred_zone)) {
|
|
|
+ *deferred_compaction = true;
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
current->flags |= PF_MEMALLOC;
|
|
|
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
|
|
|
nodemask, sync_migration);
|
|
@@ -2016,7 +2022,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
* but not enough to satisfy watermarks.
|
|
|
*/
|
|
|
count_vm_event(COMPACTFAIL);
|
|
|
- defer_compaction(preferred_zone);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * As async compaction considers a subset of pageblocks, only
|
|
|
+ * defer if the failure was a sync compaction failure.
|
|
|
+ */
|
|
|
+ if (sync_migration)
|
|
|
+ defer_compaction(preferred_zone);
|
|
|
|
|
|
cond_resched();
|
|
|
}
|
|
@@ -2028,8 +2040,9 @@ static inline struct page *
|
|
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
|
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
|
|
- int migratetype, unsigned long *did_some_progress,
|
|
|
- bool sync_migration)
|
|
|
+ int migratetype, bool sync_migration,
|
|
|
+ bool *deferred_compaction,
|
|
|
+ unsigned long *did_some_progress)
|
|
|
{
|
|
|
return NULL;
|
|
|
}
|
|
@@ -2179,6 +2192,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
unsigned long pages_reclaimed = 0;
|
|
|
unsigned long did_some_progress;
|
|
|
bool sync_migration = false;
|
|
|
+ bool deferred_compaction = false;
|
|
|
|
|
|
/*
|
|
|
* In the slowpath, we sanity check order to avoid ever trying to
|
|
@@ -2259,12 +2273,22 @@ rebalance:
|
|
|
zonelist, high_zoneidx,
|
|
|
nodemask,
|
|
|
alloc_flags, preferred_zone,
|
|
|
- migratetype, &did_some_progress,
|
|
|
- sync_migration);
|
|
|
+ migratetype, sync_migration,
|
|
|
+ &deferred_compaction,
|
|
|
+ &did_some_progress);
|
|
|
if (page)
|
|
|
goto got_pg;
|
|
|
sync_migration = true;
|
|
|
|
|
|
+ /*
|
|
|
+ * If compaction is deferred for high-order allocations, it is because
|
|
|
+ * sync compaction recently failed. In this is the case and the caller
|
|
|
+ * has requested the system not be heavily disrupted, fail the
|
|
|
+ * allocation now instead of entering direct reclaim
|
|
|
+ */
|
|
|
+ if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
|
|
|
+ goto nopage;
|
|
|
+
|
|
|
/* Try direct reclaim and then allocating */
|
|
|
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
|
|
zonelist, high_zoneidx,
|
|
@@ -2328,8 +2352,9 @@ rebalance:
|
|
|
zonelist, high_zoneidx,
|
|
|
nodemask,
|
|
|
alloc_flags, preferred_zone,
|
|
|
- migratetype, &did_some_progress,
|
|
|
- sync_migration);
|
|
|
+ migratetype, sync_migration,
|
|
|
+ &deferred_compaction,
|
|
|
+ &did_some_progress);
|
|
|
if (page)
|
|
|
goto got_pg;
|
|
|
}
|