|
@@ -1886,7 +1886,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
|
|
|
* latencies, so it's better to scan a minimum amount there as
|
|
|
* well.
|
|
|
*/
|
|
|
- if (current_is_kswapd())
|
|
|
+ if (current_is_kswapd() && mz->zone->all_unreclaimable)
|
|
|
force_scan = true;
|
|
|
if (!global_reclaim(sc))
|
|
|
force_scan = true;
|
|
@@ -2111,16 +2111,6 @@ static void shrink_zone(int priority, struct zone *zone,
|
|
|
};
|
|
|
struct mem_cgroup *memcg;
|
|
|
|
|
|
- if (global_reclaim(sc)) {
|
|
|
- struct mem_cgroup_zone mz = {
|
|
|
- .mem_cgroup = NULL,
|
|
|
- .zone = zone,
|
|
|
- };
|
|
|
-
|
|
|
- shrink_mem_cgroup_zone(priority, &mz, sc);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
memcg = mem_cgroup_iter(root, NULL, &reclaim);
|
|
|
do {
|
|
|
struct mem_cgroup_zone mz = {
|
|
@@ -2134,6 +2124,10 @@ static void shrink_zone(int priority, struct zone *zone,
|
|
|
* scanned it with decreasing priority levels until
|
|
|
* nr_to_reclaim had been reclaimed. This priority
|
|
|
* cycle is thus over after a single memcg.
|
|
|
+ *
|
|
|
+ * Direct reclaim and kswapd, on the other hand, have
|
|
|
+ * to scan all memory cgroups to fulfill the overall
|
|
|
+ * scan target for the zone.
|
|
|
*/
|
|
|
if (!global_reclaim(sc)) {
|
|
|
mem_cgroup_iter_break(root, memcg);
|
|
@@ -2478,13 +2472,24 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
|
|
|
static void age_active_anon(struct zone *zone, struct scan_control *sc,
|
|
|
int priority)
|
|
|
{
|
|
|
- struct mem_cgroup_zone mz = {
|
|
|
- .mem_cgroup = NULL,
|
|
|
- .zone = zone,
|
|
|
- };
|
|
|
+ struct mem_cgroup *memcg;
|
|
|
|
|
|
- if (inactive_anon_is_low(&mz))
|
|
|
- shrink_active_list(SWAP_CLUSTER_MAX, &mz, sc, priority, 0);
|
|
|
+ if (!total_swap_pages)
|
|
|
+ return;
|
|
|
+
|
|
|
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
|
|
|
+ do {
|
|
|
+ struct mem_cgroup_zone mz = {
|
|
|
+ .mem_cgroup = memcg,
|
|
|
+ .zone = zone,
|
|
|
+ };
|
|
|
+
|
|
|
+ if (inactive_anon_is_low(&mz))
|
|
|
+ shrink_active_list(SWAP_CLUSTER_MAX, &mz,
|
|
|
+ sc, priority, 0);
|
|
|
+
|
|
|
+ memcg = mem_cgroup_iter(NULL, memcg, NULL);
|
|
|
+ } while (memcg);
|
|
|
}
|
|
|
|
|
|
/*
|