|
@@ -2378,15 +2378,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
|
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
|
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
|
}
|
|
}
|
|
|
|
|
|
-/* Returns true if the allocation is likely for THP */
|
|
|
|
-static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
|
|
|
|
-{
|
|
|
|
- if (order == pageblock_order &&
|
|
|
|
- (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
|
|
|
- return true;
|
|
|
|
- return false;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline struct page *
|
|
static inline struct page *
|
|
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
@@ -2425,9 +2416,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
goto nopage;
|
|
goto nopage;
|
|
|
|
|
|
restart:
|
|
restart:
|
|
- /* The decision whether to wake kswapd for THP is made later */
|
|
|
|
- if (!is_thp_alloc(gfp_mask, order))
|
|
|
|
- wake_all_kswapd(order, zonelist, high_zoneidx,
|
|
|
|
|
|
+ wake_all_kswapd(order, zonelist, high_zoneidx,
|
|
zone_idx(preferred_zone));
|
|
zone_idx(preferred_zone));
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2498,21 +2487,15 @@ rebalance:
|
|
goto got_pg;
|
|
goto got_pg;
|
|
sync_migration = true;
|
|
sync_migration = true;
|
|
|
|
|
|
- if (is_thp_alloc(gfp_mask, order)) {
|
|
|
|
- /*
|
|
|
|
- * If compaction is deferred for high-order allocations, it is
|
|
|
|
- * because sync compaction recently failed. If this is the case
|
|
|
|
- * and the caller requested a movable allocation that does not
|
|
|
|
- * heavily disrupt the system then fail the allocation instead
|
|
|
|
- * of entering direct reclaim.
|
|
|
|
- */
|
|
|
|
- if (deferred_compaction || contended_compaction)
|
|
|
|
- goto nopage;
|
|
|
|
-
|
|
|
|
- /* If process is willing to reclaim/compact then wake kswapd */
|
|
|
|
- wake_all_kswapd(order, zonelist, high_zoneidx,
|
|
|
|
- zone_idx(preferred_zone));
|
|
|
|
- }
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If compaction is deferred for high-order allocations, it is because
|
|
|
|
+ * sync compaction recently failed. In this is the case and the caller
|
|
|
|
+ * requested a movable allocation that does not heavily disrupt the
|
|
|
|
+ * system then fail the allocation instead of entering direct reclaim.
|
|
|
|
+ */
|
|
|
|
+ if ((deferred_compaction || contended_compaction) &&
|
|
|
|
+ (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
|
|
|
+ goto nopage;
|
|
|
|
|
|
/* Try direct reclaim and then allocating */
|
|
/* Try direct reclaim and then allocating */
|
|
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
|
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|