|
@@ -77,6 +77,12 @@ struct scan_control {
|
|
|
|
|
|
int order;
|
|
int order;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Intend to reclaim enough contenious memory rather than to reclaim
|
|
|
|
+ * enough amount memory. I.e, it's the mode for high order allocation.
|
|
|
|
+ */
|
|
|
|
+ bool lumpy_reclaim_mode;
|
|
|
|
+
|
|
/* Which cgroup do we reclaim from */
|
|
/* Which cgroup do we reclaim from */
|
|
struct mem_cgroup *mem_cgroup;
|
|
struct mem_cgroup *mem_cgroup;
|
|
|
|
|
|
@@ -575,7 +581,7 @@ static enum page_references page_check_references(struct page *page,
|
|
referenced_page = TestClearPageReferenced(page);
|
|
referenced_page = TestClearPageReferenced(page);
|
|
|
|
|
|
/* Lumpy reclaim - ignore references */
|
|
/* Lumpy reclaim - ignore references */
|
|
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
|
|
|
+ if (sc->lumpy_reclaim_mode)
|
|
return PAGEREF_RECLAIM;
|
|
return PAGEREF_RECLAIM;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1125,7 +1131,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
|
|
unsigned long nr_scanned = 0;
|
|
unsigned long nr_scanned = 0;
|
|
unsigned long nr_reclaimed = 0;
|
|
unsigned long nr_reclaimed = 0;
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
|
- int lumpy_reclaim = 0;
|
|
|
|
|
|
|
|
while (unlikely(too_many_isolated(zone, file, sc))) {
|
|
while (unlikely(too_many_isolated(zone, file, sc))) {
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
@@ -1135,17 +1140,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
|
|
return SWAP_CLUSTER_MAX;
|
|
return SWAP_CLUSTER_MAX;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * If we need a large contiguous chunk of memory, or have
|
|
|
|
- * trouble getting a small set of contiguous pages, we
|
|
|
|
- * will reclaim both active and inactive pages.
|
|
|
|
- *
|
|
|
|
- * We use the same threshold as pageout congestion_wait below.
|
|
|
|
- */
|
|
|
|
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
|
- lumpy_reclaim = 1;
|
|
|
|
- else if (sc->order && priority < DEF_PRIORITY - 2)
|
|
|
|
- lumpy_reclaim = 1;
|
|
|
|
|
|
|
|
pagevec_init(&pvec, 1);
|
|
pagevec_init(&pvec, 1);
|
|
|
|
|
|
@@ -1158,7 +1152,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
|
|
unsigned long nr_freed;
|
|
unsigned long nr_freed;
|
|
unsigned long nr_active;
|
|
unsigned long nr_active;
|
|
unsigned int count[NR_LRU_LISTS] = { 0, };
|
|
unsigned int count[NR_LRU_LISTS] = { 0, };
|
|
- int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
|
|
|
|
|
|
+ int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
|
|
unsigned long nr_anon;
|
|
unsigned long nr_anon;
|
|
unsigned long nr_file;
|
|
unsigned long nr_file;
|
|
|
|
|
|
@@ -1211,7 +1205,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
|
|
* but that should be acceptable to the caller
|
|
* but that should be acceptable to the caller
|
|
*/
|
|
*/
|
|
if (nr_freed < nr_taken && !current_is_kswapd() &&
|
|
if (nr_freed < nr_taken && !current_is_kswapd() &&
|
|
- lumpy_reclaim) {
|
|
|
|
|
|
+ sc->lumpy_reclaim_mode) {
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1639,6 +1633,21 @@ out:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * If we need a large contiguous chunk of memory, or have
|
|
|
|
+ * trouble getting a small set of contiguous pages, we
|
|
|
|
+ * will reclaim both active and inactive pages.
|
|
|
|
+ */
|
|
|
|
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
|
+ sc->lumpy_reclaim_mode = 1;
|
|
|
|
+ else if (sc->order && priority < DEF_PRIORITY - 2)
|
|
|
|
+ sc->lumpy_reclaim_mode = 1;
|
|
|
|
+ else
|
|
|
|
+ sc->lumpy_reclaim_mode = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
|
|
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
|
|
*/
|
|
*/
|
|
@@ -1653,6 +1662,8 @@ static void shrink_zone(int priority, struct zone *zone,
|
|
|
|
|
|
get_scan_count(zone, sc, nr, priority);
|
|
get_scan_count(zone, sc, nr, priority);
|
|
|
|
|
|
|
|
+ set_lumpy_reclaim_mode(priority, sc);
|
|
|
|
+
|
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
|
nr[LRU_INACTIVE_FILE]) {
|
|
nr[LRU_INACTIVE_FILE]) {
|
|
for_each_evictable_lru(l) {
|
|
for_each_evictable_lru(l) {
|