|
@@ -35,6 +35,8 @@ struct compact_control {
|
|
|
unsigned long nr_anon;
|
|
|
unsigned long nr_file;
|
|
|
|
|
|
+ unsigned int order; /* order a direct compactor needs */
|
|
|
+ int migratetype; /* MOVABLE, RECLAIMABLE etc */
|
|
|
struct zone *zone;
|
|
|
};
|
|
|
|
|
@@ -341,6 +343,9 @@ static void update_nr_listpages(struct compact_control *cc)
|
|
|
static int compact_finished(struct zone *zone,
|
|
|
struct compact_control *cc)
|
|
|
{
|
|
|
+ unsigned int order;
|
|
|
+ unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
|
|
|
+
|
|
|
if (fatal_signal_pending(current))
|
|
|
return COMPACT_PARTIAL;
|
|
|
|
|
@@ -348,6 +353,24 @@ static int compact_finished(struct zone *zone,
|
|
|
if (cc->free_pfn <= cc->migrate_pfn)
|
|
|
return COMPACT_COMPLETE;
|
|
|
|
|
|
+ /* Compaction run is not finished if the watermark is not met */
|
|
|
+ if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
|
|
|
+ return COMPACT_CONTINUE;
|
|
|
+
|
|
|
+ if (cc->order == -1)
|
|
|
+ return COMPACT_CONTINUE;
|
|
|
+
|
|
|
+ /* Direct compactor: Is a suitable page free? */
|
|
|
+ for (order = cc->order; order < MAX_ORDER; order++) {
|
|
|
+ /* Job done if page is free of the right migratetype */
|
|
|
+ if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
|
|
|
+ return COMPACT_PARTIAL;
|
|
|
+
|
|
|
+ /* Job done if allocation would set block type */
|
|
|
+ if (order >= pageblock_order && zone->free_area[order].nr_free)
|
|
|
+ return COMPACT_PARTIAL;
|
|
|
+ }
|
|
|
+
|
|
|
return COMPACT_CONTINUE;
|
|
|
}
|
|
|
|
|
@@ -394,6 +417,99 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static unsigned long compact_zone_order(struct zone *zone,
|
|
|
+ int order, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ struct compact_control cc = {
|
|
|
+ .nr_freepages = 0,
|
|
|
+ .nr_migratepages = 0,
|
|
|
+ .order = order,
|
|
|
+ .migratetype = allocflags_to_migratetype(gfp_mask),
|
|
|
+ .zone = zone,
|
|
|
+ };
|
|
|
+ INIT_LIST_HEAD(&cc.freepages);
|
|
|
+ INIT_LIST_HEAD(&cc.migratepages);
|
|
|
+
|
|
|
+ return compact_zone(zone, &cc);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * try_to_compact_pages - Direct compact to satisfy a high-order allocation
|
|
|
+ * @zonelist: The zonelist used for the current allocation
|
|
|
+ * @order: The order of the current allocation
|
|
|
+ * @gfp_mask: The GFP mask of the current allocation
|
|
|
+ * @nodemask: The allowed nodes to allocate from
|
|
|
+ *
|
|
|
+ * This is the main entry point for direct page compaction.
|
|
|
+ */
|
|
|
+unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
|
+ int order, gfp_t gfp_mask, nodemask_t *nodemask)
|
|
|
+{
|
|
|
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
|
|
|
+ int may_enter_fs = gfp_mask & __GFP_FS;
|
|
|
+ int may_perform_io = gfp_mask & __GFP_IO;
|
|
|
+ unsigned long watermark;
|
|
|
+ struct zoneref *z;
|
|
|
+ struct zone *zone;
|
|
|
+ int rc = COMPACT_SKIPPED;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check whether it is worth even starting compaction. The order check is
|
|
|
+ * made because an assumption is made that the page allocator can satisfy
|
|
|
+ * the "cheaper" orders without taking special steps
|
|
|
+ */
|
|
|
+ if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ count_vm_event(COMPACTSTALL);
|
|
|
+
|
|
|
+ /* Compact each zone in the list */
|
|
|
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
|
|
|
+ nodemask) {
|
|
|
+ int fragindex;
|
|
|
+ int status;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Watermarks for order-0 must be met for compaction. Note
|
|
|
+ * the 2UL. This is because during migration, copies of
|
|
|
+ * pages need to be allocated and for a short time, the
|
|
|
+ * footprint is higher
|
|
|
+ */
|
|
|
+ watermark = low_wmark_pages(zone) + (2UL << order);
|
|
|
+ if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * fragmentation index determines if allocation failures are
|
|
|
+ * due to low memory or external fragmentation
|
|
|
+ *
|
|
|
+ * index of -1 implies allocations might succeed depending
|
|
|
+ * on watermarks
|
|
|
+ * index towards 0 implies failure is due to lack of memory
|
|
|
+ * index towards 1000 implies failure is due to fragmentation
|
|
|
+ *
|
|
|
+ * Only compact if a failure would be due to fragmentation.
|
|
|
+ */
|
|
|
+ fragindex = fragmentation_index(zone, order);
|
|
|
+ if (fragindex >= 0 && fragindex <= 500)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
|
|
|
+ rc = COMPACT_PARTIAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ status = compact_zone_order(zone, order, gfp_mask);
|
|
|
+ rc = max(status, rc);
|
|
|
+
|
|
|
+ if (zone_watermark_ok(zone, order, watermark, 0, 0))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/* Compact all zones within a node */
|
|
|
static int compact_node(int nid)
|
|
|
{
|
|
@@ -412,6 +528,7 @@ static int compact_node(int nid)
|
|
|
struct compact_control cc = {
|
|
|
.nr_freepages = 0,
|
|
|
.nr_migratepages = 0,
|
|
|
+ .order = -1,
|
|
|
};
|
|
|
|
|
|
zone = &pgdat->node_zones[zoneid];
|