|
@@ -1204,6 +1204,43 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority)
|
|
|
* But we had to alter page->flags anyway.
|
|
|
*/
|
|
|
|
|
|
+static void move_active_pages_to_lru(struct zone *zone,
|
|
|
+ struct list_head *list,
|
|
|
+ enum lru_list lru)
|
|
|
+{
|
|
|
+ unsigned long pgmoved = 0;
|
|
|
+ struct pagevec pvec;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ pagevec_init(&pvec, 1);
|
|
|
+
|
|
|
+ while (!list_empty(list)) {
|
|
|
+ page = lru_to_page(list);
|
|
|
+ prefetchw_prev_lru_page(page, list, flags);
|
|
|
+
|
|
|
+ VM_BUG_ON(PageLRU(page));
|
|
|
+ SetPageLRU(page);
|
|
|
+
|
|
|
+ VM_BUG_ON(!PageActive(page));
|
|
|
+ if (!is_active_lru(lru))
|
|
|
+ ClearPageActive(page); /* we are de-activating */
|
|
|
+
|
|
|
+ list_move(&page->lru, &zone->lru[lru].list);
|
|
|
+ mem_cgroup_add_lru_list(page, lru);
|
|
|
+ pgmoved++;
|
|
|
+
|
|
|
+ if (!pagevec_add(&pvec, page) || list_empty(list)) {
|
|
|
+ spin_unlock_irq(&zone->lru_lock);
|
|
|
+ if (buffer_heads_over_limit)
|
|
|
+ pagevec_strip(&pvec);
|
|
|
+ __pagevec_release(&pvec);
|
|
|
+ spin_lock_irq(&zone->lru_lock);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
|
|
|
+ if (!is_active_lru(lru))
|
|
|
+ __count_vm_events(PGDEACTIVATE, pgmoved);
|
|
|
+}
|
|
|
|
|
|
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
struct scan_control *sc, int priority, int file)
|
|
@@ -1215,8 +1252,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
LIST_HEAD(l_active);
|
|
|
LIST_HEAD(l_inactive);
|
|
|
struct page *page;
|
|
|
- struct pagevec pvec;
|
|
|
- enum lru_list lru;
|
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
|
|
|
|
|
lru_add_drain();
|
|
@@ -1233,6 +1268,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
}
|
|
|
reclaim_stat->recent_scanned[!!file] += pgmoved;
|
|
|
|
|
|
+ __count_zone_vm_events(PGREFILL, zone, pgscanned);
|
|
|
if (file)
|
|
|
__mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
|
|
|
else
|
|
@@ -1275,8 +1311,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
/*
|
|
|
* Move pages back to the lru list.
|
|
|
*/
|
|
|
- pagevec_init(&pvec, 1);
|
|
|
-
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
/*
|
|
|
* Count referenced pages from currently used mappings as rotated,
|
|
@@ -1286,57 +1320,12 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
*/
|
|
|
reclaim_stat->recent_rotated[!!file] += pgmoved;
|
|
|
|
|
|
- pgmoved = 0; /* count pages moved to inactive list */
|
|
|
- lru = LRU_BASE + file * LRU_FILE;
|
|
|
- while (!list_empty(&l_inactive)) {
|
|
|
- page = lru_to_page(&l_inactive);
|
|
|
- prefetchw_prev_lru_page(page, &l_inactive, flags);
|
|
|
- VM_BUG_ON(PageLRU(page));
|
|
|
- SetPageLRU(page);
|
|
|
- VM_BUG_ON(!PageActive(page));
|
|
|
- ClearPageActive(page);
|
|
|
-
|
|
|
- list_move(&page->lru, &zone->lru[lru].list);
|
|
|
- mem_cgroup_add_lru_list(page, lru);
|
|
|
- pgmoved++;
|
|
|
- if (!pagevec_add(&pvec, page)) {
|
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
- if (buffer_heads_over_limit)
|
|
|
- pagevec_strip(&pvec);
|
|
|
- __pagevec_release(&pvec);
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
- }
|
|
|
- }
|
|
|
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
|
|
|
- __count_zone_vm_events(PGREFILL, zone, pgscanned);
|
|
|
- __count_vm_events(PGDEACTIVATE, pgmoved);
|
|
|
-
|
|
|
- pgmoved = 0; /* count pages moved back to active list */
|
|
|
- lru = LRU_ACTIVE + file * LRU_FILE;
|
|
|
- while (!list_empty(&l_active)) {
|
|
|
- page = lru_to_page(&l_active);
|
|
|
- prefetchw_prev_lru_page(page, &l_active, flags);
|
|
|
- VM_BUG_ON(PageLRU(page));
|
|
|
- SetPageLRU(page);
|
|
|
- VM_BUG_ON(!PageActive(page));
|
|
|
-
|
|
|
- list_move(&page->lru, &zone->lru[lru].list);
|
|
|
- mem_cgroup_add_lru_list(page, lru);
|
|
|
- pgmoved++;
|
|
|
- if (!pagevec_add(&pvec, page)) {
|
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
- if (buffer_heads_over_limit)
|
|
|
- pagevec_strip(&pvec);
|
|
|
- __pagevec_release(&pvec);
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
- }
|
|
|
- }
|
|
|
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
|
|
|
+ move_active_pages_to_lru(zone, &l_active,
|
|
|
+ LRU_ACTIVE + file * LRU_FILE);
|
|
|
+ move_active_pages_to_lru(zone, &l_inactive,
|
|
|
+ LRU_BASE + file * LRU_FILE);
|
|
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
- if (buffer_heads_over_limit)
|
|
|
- pagevec_strip(&pvec);
|
|
|
- pagevec_release(&pvec);
|
|
|
}
|
|
|
|
|
|
static int inactive_anon_is_low_global(struct zone *zone)
|