|
@@ -66,24 +66,15 @@ static inline bool isolation_suitable(struct compact_control *cc,
|
|
|
* should be skipped for page isolation when the migrate and free page scanner
|
|
|
* meet.
|
|
|
*/
|
|
|
-static void reset_isolation_suitable(struct zone *zone)
|
|
|
+static void __reset_isolation_suitable(struct zone *zone)
|
|
|
{
|
|
|
unsigned long start_pfn = zone->zone_start_pfn;
|
|
|
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
|
|
|
unsigned long pfn;
|
|
|
|
|
|
- /*
|
|
|
- * Do not reset more than once every five seconds. If allocations are
|
|
|
- * failing sufficiently quickly to allow this to happen then continually
|
|
|
- * scanning for compaction is not going to help. The choice of five
|
|
|
- * seconds is arbitrary but will mitigate excessive scanning.
|
|
|
- */
|
|
|
- if (time_before(jiffies, zone->compact_blockskip_expire))
|
|
|
- return;
|
|
|
-
|
|
|
zone->compact_cached_migrate_pfn = start_pfn;
|
|
|
zone->compact_cached_free_pfn = end_pfn;
|
|
|
- zone->compact_blockskip_expire = jiffies + (HZ * 5);
|
|
|
+ zone->compact_blockskip_flush = false;
|
|
|
|
|
|
/* Walk the zone and mark every pageblock as suitable for isolation */
|
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
|
|
@@ -102,9 +93,24 @@ static void reset_isolation_suitable(struct zone *zone)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+void reset_isolation_suitable(pg_data_t *pgdat)
|
|
|
+{
|
|
|
+ int zoneid;
|
|
|
+
|
|
|
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
|
|
|
+ struct zone *zone = &pgdat->node_zones[zoneid];
|
|
|
+ if (!populated_zone(zone))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Only flush if a full compaction finished recently */
|
|
|
+ if (zone->compact_blockskip_flush)
|
|
|
+ __reset_isolation_suitable(zone);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* If no pages were isolated then mark this pageblock to be skipped in the
|
|
|
- * future. The information is later cleared by reset_isolation_suitable().
|
|
|
+ * future. The information is later cleared by __reset_isolation_suitable().
|
|
|
*/
|
|
|
static void update_pageblock_skip(struct compact_control *cc,
|
|
|
struct page *page, unsigned long nr_isolated,
|
|
@@ -820,7 +826,15 @@ static int compact_finished(struct zone *zone,
|
|
|
|
|
|
/* Compaction run completes if the migrate and free scanner meet */
|
|
|
if (cc->free_pfn <= cc->migrate_pfn) {
|
|
|
- reset_isolation_suitable(cc->zone);
|
|
|
+ /*
|
|
|
+ * Mark that the PG_migrate_skip information should be cleared
|
|
|
+ * by kswapd when it goes to sleep. kswapd does not set the
|
|
|
+ * flag itself as the decision to be clear should be directly
|
|
|
+ * based on an allocation request.
|
|
|
+ */
|
|
|
+ if (!current_is_kswapd())
|
|
|
+ zone->compact_blockskip_flush = true;
|
|
|
+
|
|
|
return COMPACT_COMPLETE;
|
|
|
}
|
|
|
|
|
@@ -943,9 +957,13 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
|
zone->compact_cached_migrate_pfn = cc->migrate_pfn;
|
|
|
}
|
|
|
|
|
|
- /* Clear pageblock skip if there are numerous alloc failures */
|
|
|
- if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT)
|
|
|
- reset_isolation_suitable(zone);
|
|
|
+ /*
|
|
|
+ * Clear pageblock skip if there were failures recently and compaction
|
|
|
+ * is about to be retried after being deferred. kswapd does not do
|
|
|
+ * this reset as it'll reset the cached information when going to sleep.
|
|
|
+ */
|
|
|
+ if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
|
|
|
+ __reset_isolation_suitable(zone);
|
|
|
|
|
|
migrate_prep_local();
|
|
|
|