|
@@ -750,7 +750,8 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
|
|
|
*/
|
|
|
static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
struct zone *zone,
|
|
|
- struct scan_control *sc)
|
|
|
+ struct scan_control *sc,
|
|
|
+ int priority)
|
|
|
{
|
|
|
LIST_HEAD(ret_pages);
|
|
|
LIST_HEAD(free_pages);
|
|
@@ -856,9 +857,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
|
|
|
/*
|
|
|
* Only kswapd can writeback filesystem pages to
|
|
|
- * avoid risk of stack overflow
|
|
|
+ * avoid risk of stack overflow but do not writeback
|
|
|
+ * unless under significant pressure.
|
|
|
*/
|
|
|
- if (page_is_file_cache(page) && !current_is_kswapd()) {
|
|
|
+ if (page_is_file_cache(page) &&
|
|
|
+ (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
|
|
|
inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
|
|
|
goto keep_locked;
|
|
|
}
|
|
@@ -1509,12 +1512,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
|
|
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
- nr_reclaimed = shrink_page_list(&page_list, zone, sc);
|
|
|
+ nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority);
|
|
|
|
|
|
/* Check if we should syncronously wait for writeback */
|
|
|
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
|
|
|
set_reclaim_mode(priority, sc, true);
|
|
|
- nr_reclaimed += shrink_page_list(&page_list, zone, sc);
|
|
|
+ nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority);
|
|
|
}
|
|
|
|
|
|
local_irq_disable();
|