|
@@ -1505,6 +1505,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SWAP
|
|
|
static int inactive_anon_is_low_global(struct zone *zone)
|
|
|
{
|
|
|
unsigned long active, inactive;
|
|
@@ -1530,12 +1531,26 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
|
|
|
{
|
|
|
int low;
|
|
|
|
|
|
+ /*
|
|
|
+ * If we don't have swap space, anonymous page deactivation
|
|
|
+ * is pointless.
|
|
|
+ */
|
|
|
+ if (!total_swap_pages)
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (scanning_global_lru(sc))
|
|
|
low = inactive_anon_is_low_global(zone);
|
|
|
else
|
|
|
low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
|
|
|
return low;
|
|
|
}
|
|
|
+#else
|
|
|
+static inline int inactive_anon_is_low(struct zone *zone,
|
|
|
+ struct scan_control *sc)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
static int inactive_file_is_low_global(struct zone *zone)
|
|
|
{
|
|
@@ -1781,7 +1796,7 @@ static void shrink_zone(int priority, struct zone *zone,
|
|
|
* Even if we did not try to evict anon pages at all, we want to
|
|
|
* rebalance the anon lru active/inactive ratio.
|
|
|
*/
|
|
|
- if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
|
|
|
+ if (inactive_anon_is_low(zone, sc))
|
|
|
shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
|
|
|
|
|
|
throttle_vm_writeout(sc->gfp_mask);
|