|
@@ -22,6 +22,36 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|
|
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
|
|
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
|
int order, gfp_t gfp_mask, nodemask_t *mask);
|
|
|
+
|
|
|
+/* Do not skip compaction more than 64 times */
|
|
|
+#define COMPACT_MAX_DEFER_SHIFT 6
|
|
|
+
|
|
|
+/*
|
|
|
+ * Compaction is deferred when compaction fails to result in a page
|
|
|
+ * allocation success. 1 << compact_defer_limit compactions are skipped up
|
|
|
+ * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
|
|
|
+ */
|
|
|
+static inline void defer_compaction(struct zone *zone)
|
|
|
+{
|
|
|
+ zone->compact_considered = 0;
|
|
|
+ zone->compact_defer_shift++;
|
|
|
+
|
|
|
+ if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
|
|
|
+ zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
|
|
|
+}
|
|
|
+
|
|
|
+/* Returns true if compaction should be skipped this time */
|
|
|
+static inline bool compaction_deferred(struct zone *zone)
|
|
|
+{
|
|
|
+ unsigned long defer_limit = 1UL << zone->compact_defer_shift;
|
|
|
+
|
|
|
+ /* Avoid possible overflow */
|
|
|
+ if (++zone->compact_considered > defer_limit)
|
|
|
+ zone->compact_considered = defer_limit;
|
|
|
+
|
|
|
+ return zone->compact_considered < (1UL << zone->compact_defer_shift);
|
|
|
+}
|
|
|
+
|
|
|
#else
|
|
|
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
|
int order, gfp_t gfp_mask, nodemask_t *nodemask)
|
|
@@ -29,6 +59,15 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
|
return COMPACT_CONTINUE;
|
|
|
}
|
|
|
|
|
|
+static inline void defer_compaction(struct zone *zone)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool compaction_deferred(struct zone *zone)
|
|
|
+{
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_COMPACTION */
|
|
|
|
|
|
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
|