|
@@ -2600,6 +2600,32 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
|
|
|
return pgdat_balanced(pgdat, order, classzone_idx);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * kswapd shrinks the zone by the number of pages required to reach
|
|
|
+ * the high watermark.
|
|
|
+ */
|
|
|
+static void kswapd_shrink_zone(struct zone *zone,
|
|
|
+ struct scan_control *sc,
|
|
|
+ unsigned long lru_pages)
|
|
|
+{
|
|
|
+ unsigned long nr_slab;
|
|
|
+ struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
+ struct shrink_control shrink = {
|
|
|
+ .gfp_mask = sc->gfp_mask,
|
|
|
+ };
|
|
|
+
|
|
|
+ /* Reclaim above the high watermark. */
|
|
|
+ sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
|
|
|
+ shrink_zone(zone, sc);
|
|
|
+
|
|
|
+ reclaim_state->reclaimed_slab = 0;
|
|
|
+ nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
|
|
|
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
+
|
|
|
+ if (nr_slab == 0 && !zone_reclaimable(zone))
|
|
|
+ zone->all_unreclaimable = 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* For kswapd, balance_pgdat() will work across all this node's zones until
|
|
|
* they are all at high_wmark_pages(zone).
|
|
@@ -2627,24 +2653,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
|
|
bool pgdat_is_balanced = false;
|
|
|
int i;
|
|
|
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
|
|
|
- struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
unsigned long nr_soft_reclaimed;
|
|
|
unsigned long nr_soft_scanned;
|
|
|
struct scan_control sc = {
|
|
|
.gfp_mask = GFP_KERNEL,
|
|
|
.may_unmap = 1,
|
|
|
.may_swap = 1,
|
|
|
- /*
|
|
|
- * kswapd doesn't want to be bailed out while reclaim. because
|
|
|
- * we want to put equal scanning pressure on each zone.
|
|
|
- */
|
|
|
- .nr_to_reclaim = ULONG_MAX,
|
|
|
.order = order,
|
|
|
.target_mem_cgroup = NULL,
|
|
|
};
|
|
|
- struct shrink_control shrink = {
|
|
|
- .gfp_mask = sc.gfp_mask,
|
|
|
- };
|
|
|
loop_again:
|
|
|
sc.priority = DEF_PRIORITY;
|
|
|
sc.nr_reclaimed = 0;
|
|
@@ -2716,7 +2733,7 @@ loop_again:
|
|
|
*/
|
|
|
for (i = 0; i <= end_zone; i++) {
|
|
|
struct zone *zone = pgdat->node_zones + i;
|
|
|
- int nr_slab, testorder;
|
|
|
+ int testorder;
|
|
|
unsigned long balance_gap;
|
|
|
|
|
|
if (!populated_zone(zone))
|
|
@@ -2764,16 +2781,8 @@ loop_again:
|
|
|
|
|
|
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
|
|
|
!zone_balanced(zone, testorder,
|
|
|
- balance_gap, end_zone)) {
|
|
|
- shrink_zone(zone, &sc);
|
|
|
-
|
|
|
- reclaim_state->reclaimed_slab = 0;
|
|
|
- nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
|
|
|
- sc.nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
-
|
|
|
- if (nr_slab == 0 && !zone_reclaimable(zone))
|
|
|
- zone->all_unreclaimable = 1;
|
|
|
- }
|
|
|
+ balance_gap, end_zone))
|
|
|
+ kswapd_shrink_zone(zone, &sc, lru_pages);
|
|
|
|
|
|
/*
|
|
|
* If we're getting trouble reclaiming, start doing
|