|
@@ -146,6 +146,25 @@ static bool global_reclaim(struct scan_control *sc)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+unsigned long zone_reclaimable_pages(struct zone *zone)
|
|
|
+{
|
|
|
+ int nr;
|
|
|
+
|
|
|
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
|
|
|
+ zone_page_state(zone, NR_INACTIVE_FILE);
|
|
|
+
|
|
|
+ if (get_nr_swap_pages() > 0)
|
|
|
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
|
|
|
+ zone_page_state(zone, NR_INACTIVE_ANON);
|
|
|
+
|
|
|
+ return nr;
|
|
|
+}
|
|
|
+
|
|
|
+bool zone_reclaimable(struct zone *zone)
|
|
|
+{
|
|
|
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
|
|
|
+}
|
|
|
+
|
|
|
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|
|
{
|
|
|
if (!mem_cgroup_disabled())
|
|
@@ -1789,7 +1808,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
|
|
* latencies, so it's better to scan a minimum amount there as
|
|
|
* well.
|
|
|
*/
|
|
|
- if (current_is_kswapd() && zone->all_unreclaimable)
|
|
|
+ if (current_is_kswapd() && !zone_reclaimable(zone))
|
|
|
force_scan = true;
|
|
|
if (!global_reclaim(sc))
|
|
|
force_scan = true;
|
|
@@ -2244,8 +2263,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|
|
if (global_reclaim(sc)) {
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
continue;
|
|
|
- if (zone->all_unreclaimable &&
|
|
|
- sc->priority != DEF_PRIORITY)
|
|
|
+ if (sc->priority != DEF_PRIORITY &&
|
|
|
+ !zone_reclaimable(zone))
|
|
|
continue; /* Let kswapd poll it */
|
|
|
if (IS_ENABLED(CONFIG_COMPACTION)) {
|
|
|
/*
|
|
@@ -2283,11 +2302,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|
|
return aborted_reclaim;
|
|
|
}
|
|
|
|
|
|
-static bool zone_reclaimable(struct zone *zone)
|
|
|
-{
|
|
|
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
|
|
|
-}
|
|
|
-
|
|
|
/* All zones in zonelist are unreclaimable? */
|
|
|
static bool all_unreclaimable(struct zonelist *zonelist,
|
|
|
struct scan_control *sc)
|
|
@@ -2301,7 +2315,7 @@ static bool all_unreclaimable(struct zonelist *zonelist,
|
|
|
continue;
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
continue;
|
|
|
- if (!zone->all_unreclaimable)
|
|
|
+ if (zone_reclaimable(zone))
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -2712,7 +2726,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
|
|
|
* DEF_PRIORITY. Effectively, it considers them balanced so
|
|
|
* they must be considered balanced here as well!
|
|
|
*/
|
|
|
- if (zone->all_unreclaimable) {
|
|
|
+ if (!zone_reclaimable(zone)) {
|
|
|
balanced_pages += zone->managed_pages;
|
|
|
continue;
|
|
|
}
|
|
@@ -2773,7 +2787,6 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
|
|
unsigned long lru_pages,
|
|
|
unsigned long *nr_attempted)
|
|
|
{
|
|
|
- unsigned long nr_slab;
|
|
|
int testorder = sc->order;
|
|
|
unsigned long balance_gap;
|
|
|
struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
@@ -2818,15 +2831,12 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
|
|
shrink_zone(zone, sc);
|
|
|
|
|
|
reclaim_state->reclaimed_slab = 0;
|
|
|
- nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
|
|
|
+ shrink_slab(&shrink, sc->nr_scanned, lru_pages);
|
|
|
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
|
|
|
/* Account for the number of pages attempted to reclaim */
|
|
|
*nr_attempted += sc->nr_to_reclaim;
|
|
|
|
|
|
- if (nr_slab == 0 && !zone_reclaimable(zone))
|
|
|
- zone->all_unreclaimable = 1;
|
|
|
-
|
|
|
zone_clear_flag(zone, ZONE_WRITEBACK);
|
|
|
|
|
|
/*
|
|
@@ -2835,7 +2845,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
|
|
* BDIs but as pressure is relieved, speculatively avoid congestion
|
|
|
* waits.
|
|
|
*/
|
|
|
- if (!zone->all_unreclaimable &&
|
|
|
+ if (zone_reclaimable(zone) &&
|
|
|
zone_balanced(zone, testorder, 0, classzone_idx)) {
|
|
|
zone_clear_flag(zone, ZONE_CONGESTED);
|
|
|
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
|
@@ -2901,8 +2911,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
|
|
if (!populated_zone(zone))
|
|
|
continue;
|
|
|
|
|
|
- if (zone->all_unreclaimable &&
|
|
|
- sc.priority != DEF_PRIORITY)
|
|
|
+ if (sc.priority != DEF_PRIORITY &&
|
|
|
+ !zone_reclaimable(zone))
|
|
|
continue;
|
|
|
|
|
|
/*
|
|
@@ -2980,8 +2990,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
|
|
if (!populated_zone(zone))
|
|
|
continue;
|
|
|
|
|
|
- if (zone->all_unreclaimable &&
|
|
|
- sc.priority != DEF_PRIORITY)
|
|
|
+ if (sc.priority != DEF_PRIORITY &&
|
|
|
+ !zone_reclaimable(zone))
|
|
|
continue;
|
|
|
|
|
|
sc.nr_scanned = 0;
|
|
@@ -3265,20 +3275,6 @@ unsigned long global_reclaimable_pages(void)
|
|
|
return nr;
|
|
|
}
|
|
|
|
|
|
-unsigned long zone_reclaimable_pages(struct zone *zone)
|
|
|
-{
|
|
|
- int nr;
|
|
|
-
|
|
|
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
|
|
|
- zone_page_state(zone, NR_INACTIVE_FILE);
|
|
|
-
|
|
|
- if (get_nr_swap_pages() > 0)
|
|
|
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
|
|
|
- zone_page_state(zone, NR_INACTIVE_ANON);
|
|
|
-
|
|
|
- return nr;
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_HIBERNATION
|
|
|
/*
|
|
|
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
|
|
@@ -3576,7 +3572,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
|
|
|
return ZONE_RECLAIM_FULL;
|
|
|
|
|
|
- if (zone->all_unreclaimable)
|
|
|
+ if (!zone_reclaimable(zone))
|
|
|
return ZONE_RECLAIM_FULL;
|
|
|
|
|
|
/*
|