|
@@ -1995,14 +1995,13 @@ restart:
|
|
* If a zone is deemed to be full of pinned pages then just give it a light
|
|
* If a zone is deemed to be full of pinned pages then just give it a light
|
|
* scan then give up on it.
|
|
* scan then give up on it.
|
|
*/
|
|
*/
|
|
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
|
|
|
|
|
|
+static void shrink_zones(int priority, struct zonelist *zonelist,
|
|
struct scan_control *sc)
|
|
struct scan_control *sc)
|
|
{
|
|
{
|
|
struct zoneref *z;
|
|
struct zoneref *z;
|
|
struct zone *zone;
|
|
struct zone *zone;
|
|
unsigned long nr_soft_reclaimed;
|
|
unsigned long nr_soft_reclaimed;
|
|
unsigned long nr_soft_scanned;
|
|
unsigned long nr_soft_scanned;
|
|
- unsigned long total_scanned = 0;
|
|
|
|
|
|
|
|
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
|
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
|
gfp_zone(sc->gfp_mask), sc->nodemask) {
|
|
gfp_zone(sc->gfp_mask), sc->nodemask) {
|
|
@@ -2017,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
|
|
continue;
|
|
continue;
|
|
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
|
|
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
|
|
continue; /* Let kswapd poll it */
|
|
continue; /* Let kswapd poll it */
|
|
|
|
+ /*
|
|
|
|
+ * This steals pages from memory cgroups over softlimit
|
|
|
|
+ * and returns the number of reclaimed pages and
|
|
|
|
+ * scanned pages. This works for global memory pressure
|
|
|
|
+ * and balancing, not for a memcg's limit.
|
|
|
|
+ */
|
|
|
|
+ nr_soft_scanned = 0;
|
|
|
|
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
|
|
|
|
+ sc->order, sc->gfp_mask,
|
|
|
|
+ &nr_soft_scanned);
|
|
|
|
+ sc->nr_reclaimed += nr_soft_reclaimed;
|
|
|
|
+ sc->nr_scanned += nr_soft_scanned;
|
|
|
|
+ /* need some check for avoid more shrink_zone() */
|
|
}
|
|
}
|
|
|
|
|
|
- nr_soft_scanned = 0;
|
|
|
|
- nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
|
|
|
|
- sc->order, sc->gfp_mask,
|
|
|
|
- &nr_soft_scanned);
|
|
|
|
- sc->nr_reclaimed += nr_soft_reclaimed;
|
|
|
|
- total_scanned += nr_soft_scanned;
|
|
|
|
-
|
|
|
|
shrink_zone(priority, zone, sc);
|
|
shrink_zone(priority, zone, sc);
|
|
}
|
|
}
|
|
-
|
|
|
|
- return total_scanned;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static bool zone_reclaimable(struct zone *zone)
|
|
static bool zone_reclaimable(struct zone *zone)
|
|
@@ -2094,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
sc->nr_scanned = 0;
|
|
sc->nr_scanned = 0;
|
|
if (!priority)
|
|
if (!priority)
|
|
disable_swap_token(sc->mem_cgroup);
|
|
disable_swap_token(sc->mem_cgroup);
|
|
- total_scanned += shrink_zones(priority, zonelist, sc);
|
|
|
|
|
|
+ shrink_zones(priority, zonelist, sc);
|
|
/*
|
|
/*
|
|
* Don't shrink slabs when reclaiming memory from
|
|
* Don't shrink slabs when reclaiming memory from
|
|
* over limit cgroups
|
|
* over limit cgroups
|