|
@@ -1233,6 +1233,47 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
|
|
reclaim_stat->recent_scanned[1] += *nr_file;
|
|
reclaim_stat->recent_scanned[1] += *nr_file;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Returns true if the caller should wait to clean dirty/writeback pages.
|
|
|
|
+ *
|
|
|
|
+ * If we are direct reclaiming for contiguous pages and we do not reclaim
|
|
|
|
+ * everything in the list, try again and wait for writeback IO to complete.
|
|
|
|
+ * This will stall high-order allocations noticeably. Only do that when really
|
|
|
|
+ * need to free the pages under high memory pressure.
|
|
|
|
+ */
|
|
|
|
+static inline bool should_reclaim_stall(unsigned long nr_taken,
|
|
|
|
+ unsigned long nr_freed,
|
|
|
|
+ int priority,
|
|
|
|
+ struct scan_control *sc)
|
|
|
|
+{
|
|
|
|
+ int lumpy_stall_priority;
|
|
|
|
+
|
|
|
|
+ /* kswapd should not stall on sync IO */
|
|
|
|
+ if (current_is_kswapd())
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /* Only stall on lumpy reclaim */
|
|
|
|
+ if (!sc->lumpy_reclaim_mode)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /* If we have relaimed everything on the isolated list, no stall */
|
|
|
|
+ if (nr_freed == nr_taken)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * For high-order allocations, there are two stall thresholds.
|
|
|
|
+ * High-cost allocations stall immediately where as lower
|
|
|
|
+ * order allocations such as stacks require the scanning
|
|
|
|
+ * priority to be much higher before stalling.
|
|
|
|
+ */
|
|
|
|
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
|
+ lumpy_stall_priority = DEF_PRIORITY;
|
|
|
|
+ else
|
|
|
|
+ lumpy_stall_priority = DEF_PRIORITY / 3;
|
|
|
|
+
|
|
|
|
+ return priority <= lumpy_stall_priority;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
|
|
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
|
|
* of reclaimed pages
|
|
* of reclaimed pages
|
|
@@ -1298,14 +1339,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
|
|
|
|
|
|
nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
|
|
nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
|
|
|
|
|
|
- /*
|
|
|
|
- * If we are direct reclaiming for contiguous pages and we do
|
|
|
|
- * not reclaim everything in the list, try again and wait
|
|
|
|
- * for IO to complete. This will stall high-order allocations
|
|
|
|
- * but that should be acceptable to the caller
|
|
|
|
- */
|
|
|
|
- if (nr_reclaimed < nr_taken && !current_is_kswapd() &&
|
|
|
|
- sc->lumpy_reclaim_mode) {
|
|
|
|
|
|
+ /* Check if we should syncronously wait for writeback */
|
|
|
|
+ if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
|
|
|
|
/*
|
|
/*
|