|
@@ -1787,7 +1787,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
|
bool all_unreclaimable;
|
|
|
unsigned long total_scanned = 0;
|
|
|
struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
- unsigned long lru_pages = 0;
|
|
|
struct zoneref *z;
|
|
|
struct zone *zone;
|
|
|
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
|
|
@@ -1798,18 +1797,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
|
|
|
|
if (scanning_global_lru(sc))
|
|
|
count_vm_event(ALLOCSTALL);
|
|
|
- /*
|
|
|
- * mem_cgroup will not do shrink_slab.
|
|
|
- */
|
|
|
- if (scanning_global_lru(sc)) {
|
|
|
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
|
|
-
|
|
|
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
- continue;
|
|
|
-
|
|
|
- lru_pages += zone_reclaimable_pages(zone);
|
|
|
- }
|
|
|
- }
|
|
|
|
|
|
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
|
|
|
sc->nr_scanned = 0;
|
|
@@ -1821,6 +1808,14 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
|
* over limit cgroups
|
|
|
*/
|
|
|
if (scanning_global_lru(sc)) {
|
|
|
+ unsigned long lru_pages = 0;
|
|
|
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
|
|
+ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ lru_pages += zone_reclaimable_pages(zone);
|
|
|
+ }
|
|
|
+
|
|
|
shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
|
|
|
if (reclaim_state) {
|
|
|
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|