|
@@ -51,11 +51,20 @@
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
#include <trace/events/vmscan.h>
|
|
|
|
|
|
-enum lumpy_mode {
|
|
|
- LUMPY_MODE_NONE,
|
|
|
- LUMPY_MODE_ASYNC,
|
|
|
- LUMPY_MODE_SYNC,
|
|
|
-};
|
|
|
+/*
|
|
|
+ * lumpy_mode determines how the inactive list is shrunk
|
|
|
+ * LUMPY_MODE_SINGLE: Reclaim only order-0 pages
|
|
|
+ * LUMPY_MODE_ASYNC: Do not block
|
|
|
+ * LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
|
|
|
+ * LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference
|
|
|
+ * page from the LRU and reclaim all pages within a
|
|
|
+ * naturally aligned range
|
|
|
+ */
|
|
|
+typedef unsigned __bitwise__ lumpy_mode;
|
|
|
+#define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u)
|
|
|
+#define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u)
|
|
|
+#define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u)
|
|
|
+#define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u)
|
|
|
|
|
|
struct scan_control {
|
|
|
/* Incremented by the number of inactive pages that were scanned */
|
|
@@ -88,7 +97,7 @@ struct scan_control {
|
|
|
* Intend to reclaim enough continuous memory rather than reclaim
|
|
|
* enough amount of memory. i.e, mode for high order allocation.
|
|
|
*/
|
|
|
- enum lumpy_mode lumpy_reclaim_mode;
|
|
|
+ lumpy_mode lumpy_reclaim_mode;
|
|
|
|
|
|
/* Which cgroup do we reclaim from */
|
|
|
struct mem_cgroup *mem_cgroup;
|
|
@@ -274,13 +283,13 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
|
|
|
static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
|
|
|
bool sync)
|
|
|
{
|
|
|
- enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
|
|
|
+ lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
|
|
|
|
|
|
/*
|
|
|
* Some reclaim have alredy been failed. No worth to try synchronous
|
|
|
* lumpy reclaim.
|
|
|
*/
|
|
|
- if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
|
|
|
+ if (sync && sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
|
|
|
return;
|
|
|
|
|
|
/*
|
|
@@ -288,17 +297,18 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
|
|
|
* trouble getting a small set of contiguous pages, we
|
|
|
* will reclaim both active and inactive pages.
|
|
|
*/
|
|
|
+ sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM;
|
|
|
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
- sc->lumpy_reclaim_mode = mode;
|
|
|
+ sc->lumpy_reclaim_mode |= syncmode;
|
|
|
else if (sc->order && priority < DEF_PRIORITY - 2)
|
|
|
- sc->lumpy_reclaim_mode = mode;
|
|
|
+ sc->lumpy_reclaim_mode |= syncmode;
|
|
|
else
|
|
|
- sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
|
|
|
+ sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
|
|
|
}
|
|
|
|
|
|
static void disable_lumpy_reclaim_mode(struct scan_control *sc)
|
|
|
{
|
|
|
- sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
|
|
|
+ sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
|
|
|
}
|
|
|
|
|
|
static inline int is_page_cache_freeable(struct page *page)
|
|
@@ -429,7 +439,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
|
|
|
* first attempt to free a range of pages fails.
|
|
|
*/
|
|
|
if (PageWriteback(page) &&
|
|
|
- sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
|
|
|
+ (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC))
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
|
if (!PageWriteback(page)) {
|
|
@@ -622,7 +632,7 @@ static enum page_references page_check_references(struct page *page,
|
|
|
referenced_page = TestClearPageReferenced(page);
|
|
|
|
|
|
/* Lumpy reclaim - ignore references */
|
|
|
- if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
|
|
|
+ if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM)
|
|
|
return PAGEREF_RECLAIM;
|
|
|
|
|
|
/*
|
|
@@ -739,7 +749,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
* for any page for which writeback has already
|
|
|
* started.
|
|
|
*/
|
|
|
- if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
|
|
|
+ if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) &&
|
|
|
may_enter_fs)
|
|
|
wait_on_page_writeback(page);
|
|
|
else {
|
|
@@ -1324,7 +1334,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
|
|
|
return false;
|
|
|
|
|
|
/* Only stall on lumpy reclaim */
|
|
|
- if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
|
|
|
+ if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
|
|
|
return false;
|
|
|
|
|
|
/* If we have relaimed everything on the isolated list, no stall */
|
|
@@ -1375,7 +1385,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
|
|
|
if (scanning_global_lru(sc)) {
|
|
|
nr_taken = isolate_pages_global(nr_to_scan,
|
|
|
&page_list, &nr_scanned, sc->order,
|
|
|
- sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
|
|
|
+ sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ?
|
|
|
ISOLATE_INACTIVE : ISOLATE_BOTH,
|
|
|
zone, 0, file);
|
|
|
zone->pages_scanned += nr_scanned;
|
|
@@ -1388,7 +1398,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
|
|
|
} else {
|
|
|
nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
|
|
|
&page_list, &nr_scanned, sc->order,
|
|
|
- sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
|
|
|
+ sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ?
|
|
|
ISOLATE_INACTIVE : ISOLATE_BOTH,
|
|
|
zone, sc->mem_cgroup,
|
|
|
0, file);
|