|
@@ -579,6 +579,40 @@ redo:
|
|
|
put_page(page); /* drop ref from isolate */
|
|
|
}
|
|
|
|
|
|
+enum page_references {
|
|
|
+ PAGEREF_RECLAIM,
|
|
|
+ PAGEREF_RECLAIM_CLEAN,
|
|
|
+ PAGEREF_ACTIVATE,
|
|
|
+};
|
|
|
+
|
|
|
+static enum page_references page_check_references(struct page *page,
|
|
|
+ struct scan_control *sc)
|
|
|
+{
|
|
|
+ unsigned long vm_flags;
|
|
|
+ int referenced;
|
|
|
+
|
|
|
+ referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
|
|
|
+ if (!referenced)
|
|
|
+ return PAGEREF_RECLAIM;
|
|
|
+
|
|
|
+ /* Lumpy reclaim - ignore references */
|
|
|
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
+ return PAGEREF_RECLAIM;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mlock lost the isolation race with us. Let try_to_unmap()
|
|
|
+ * move the page to the unevictable list.
|
|
|
+ */
|
|
|
+ if (vm_flags & VM_LOCKED)
|
|
|
+ return PAGEREF_RECLAIM;
|
|
|
+
|
|
|
+ if (page_mapping_inuse(page))
|
|
|
+ return PAGEREF_ACTIVATE;
|
|
|
+
|
|
|
+ /* Reclaim if clean, defer dirty pages to writeback */
|
|
|
+ return PAGEREF_RECLAIM_CLEAN;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* shrink_page_list() returns the number of reclaimed pages
|
|
|
*/
|
|
@@ -590,16 +624,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
struct pagevec freed_pvec;
|
|
|
int pgactivate = 0;
|
|
|
unsigned long nr_reclaimed = 0;
|
|
|
- unsigned long vm_flags;
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
pagevec_init(&freed_pvec, 1);
|
|
|
while (!list_empty(page_list)) {
|
|
|
+ enum page_references references;
|
|
|
struct address_space *mapping;
|
|
|
struct page *page;
|
|
|
int may_enter_fs;
|
|
|
- int referenced;
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
@@ -641,17 +674,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
goto keep_locked;
|
|
|
}
|
|
|
|
|
|
- referenced = page_referenced(page, 1,
|
|
|
- sc->mem_cgroup, &vm_flags);
|
|
|
- /*
|
|
|
- * In active use or really unfreeable? Activate it.
|
|
|
- * If page which have PG_mlocked lost isoltation race,
|
|
|
- * try_to_unmap moves it to unevictable list
|
|
|
- */
|
|
|
- if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
|
|
|
- referenced && page_mapping_inuse(page)
|
|
|
- && !(vm_flags & VM_LOCKED))
|
|
|
+ references = page_check_references(page, sc);
|
|
|
+ switch (references) {
|
|
|
+ case PAGEREF_ACTIVATE:
|
|
|
goto activate_locked;
|
|
|
+ case PAGEREF_RECLAIM:
|
|
|
+ case PAGEREF_RECLAIM_CLEAN:
|
|
|
+ ; /* try to reclaim the page below */
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Anonymous process memory has backing store?
|
|
@@ -685,7 +715,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
}
|
|
|
|
|
|
if (PageDirty(page)) {
|
|
|
- if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
|
|
|
+ if (references == PAGEREF_RECLAIM_CLEAN)
|
|
|
goto keep_locked;
|
|
|
if (!may_enter_fs)
|
|
|
goto keep_locked;
|