|
@@ -71,12 +71,6 @@ struct scan_control {
|
|
|
/* Can pages be swapped as part of reclaim? */
|
|
|
int may_swap;
|
|
|
|
|
|
- /* This context's SWAP_CLUSTER_MAX. If freeing memory for
|
|
|
- * suspend, we effectively ignore SWAP_CLUSTER_MAX.
|
|
|
- * In this context, it doesn't matter that we scan the
|
|
|
- * whole list at once. */
|
|
|
- int swap_cluster_max;
|
|
|
-
|
|
|
int swappiness;
|
|
|
|
|
|
int all_unreclaimable;
|
|
@@ -1137,7 +1131,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
|
|
|
unsigned long nr_anon;
|
|
|
unsigned long nr_file;
|
|
|
|
|
|
- nr_taken = sc->isolate_pages(sc->swap_cluster_max,
|
|
|
+ nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
|
|
|
&page_list, &nr_scan, sc->order, mode,
|
|
|
zone, sc->mem_cgroup, 0, file);
|
|
|
|
|
@@ -1572,15 +1566,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
|
|
|
* until we collected @swap_cluster_max pages to scan.
|
|
|
*/
|
|
|
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
|
|
|
- unsigned long *nr_saved_scan,
|
|
|
- unsigned long swap_cluster_max)
|
|
|
+ unsigned long *nr_saved_scan)
|
|
|
{
|
|
|
unsigned long nr;
|
|
|
|
|
|
*nr_saved_scan += nr_to_scan;
|
|
|
nr = *nr_saved_scan;
|
|
|
|
|
|
- if (nr >= swap_cluster_max)
|
|
|
+ if (nr >= SWAP_CLUSTER_MAX)
|
|
|
*nr_saved_scan = 0;
|
|
|
else
|
|
|
nr = 0;
|
|
@@ -1599,7 +1592,6 @@ static void shrink_zone(int priority, struct zone *zone,
|
|
|
unsigned long percent[2]; /* anon @ 0; file @ 1 */
|
|
|
enum lru_list l;
|
|
|
unsigned long nr_reclaimed = sc->nr_reclaimed;
|
|
|
- unsigned long swap_cluster_max = sc->swap_cluster_max;
|
|
|
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
|
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
|
|
int noswap = 0;
|
|
@@ -1622,15 +1614,15 @@ static void shrink_zone(int priority, struct zone *zone,
|
|
|
scan = (scan * percent[file]) / 100;
|
|
|
}
|
|
|
nr[l] = nr_scan_try_batch(scan,
|
|
|
- &reclaim_stat->nr_saved_scan[l],
|
|
|
- swap_cluster_max);
|
|
|
+ &reclaim_stat->nr_saved_scan[l]);
|
|
|
}
|
|
|
|
|
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
|
|
nr[LRU_INACTIVE_FILE]) {
|
|
|
for_each_evictable_lru(l) {
|
|
|
if (nr[l]) {
|
|
|
- nr_to_scan = min(nr[l], swap_cluster_max);
|
|
|
+ nr_to_scan = min_t(unsigned long,
|
|
|
+ nr[l], SWAP_CLUSTER_MAX);
|
|
|
nr[l] -= nr_to_scan;
|
|
|
|
|
|
nr_reclaimed += shrink_list(l, nr_to_scan,
|
|
@@ -1838,7 +1830,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|
|
struct scan_control sc = {
|
|
|
.gfp_mask = gfp_mask,
|
|
|
.may_writepage = !laptop_mode,
|
|
|
- .swap_cluster_max = SWAP_CLUSTER_MAX,
|
|
|
.nr_to_reclaim = SWAP_CLUSTER_MAX,
|
|
|
.may_unmap = 1,
|
|
|
.may_swap = 1,
|
|
@@ -1863,7 +1854,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
|
|
|
.may_writepage = !laptop_mode,
|
|
|
.may_unmap = 1,
|
|
|
.may_swap = !noswap,
|
|
|
- .swap_cluster_max = SWAP_CLUSTER_MAX,
|
|
|
.swappiness = swappiness,
|
|
|
.order = 0,
|
|
|
.mem_cgroup = mem,
|
|
@@ -1897,7 +1887,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
|
|
|
.may_writepage = !laptop_mode,
|
|
|
.may_unmap = 1,
|
|
|
.may_swap = !noswap,
|
|
|
- .swap_cluster_max = SWAP_CLUSTER_MAX,
|
|
|
.nr_to_reclaim = SWAP_CLUSTER_MAX,
|
|
|
.swappiness = swappiness,
|
|
|
.order = 0,
|
|
@@ -1969,7 +1958,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
|
|
|
.gfp_mask = GFP_KERNEL,
|
|
|
.may_unmap = 1,
|
|
|
.may_swap = 1,
|
|
|
- .swap_cluster_max = SWAP_CLUSTER_MAX,
|
|
|
/*
|
|
|
* kswapd doesn't want to be bailed out while reclaim. because
|
|
|
* we want to put equal scanning pressure on each zone.
|
|
@@ -2354,7 +2342,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
|
|
|
.may_swap = 1,
|
|
|
.may_unmap = 1,
|
|
|
.may_writepage = 1,
|
|
|
- .swap_cluster_max = SWAP_CLUSTER_MAX,
|
|
|
.nr_to_reclaim = nr_to_reclaim,
|
|
|
.hibernation_mode = 1,
|
|
|
.swappiness = vm_swappiness,
|
|
@@ -2539,7 +2526,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
|
|
|
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
|
|
|
.may_swap = 1,
|
|
|
- .swap_cluster_max = SWAP_CLUSTER_MAX,
|
|
|
.nr_to_reclaim = max_t(unsigned long, nr_pages,
|
|
|
SWAP_CLUSTER_MAX),
|
|
|
.gfp_mask = gfp_mask,
|