|
@@ -1276,7 +1276,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
|
|
|
add_page_to_lru_list(zone, page, lru);
|
|
|
if (is_active_lru(lru)) {
|
|
|
int file = is_file_lru(lru);
|
|
|
- reclaim_stat->recent_rotated[file]++;
|
|
|
+ int numpages = hpage_nr_pages(page);
|
|
|
+ reclaim_stat->recent_rotated[file] += numpages;
|
|
|
}
|
|
|
if (!pagevec_add(&pvec, page)) {
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
@@ -1552,7 +1553,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|
|
}
|
|
|
|
|
|
if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
|
|
|
- nr_rotated++;
|
|
|
+ nr_rotated += hpage_nr_pages(page);
|
|
|
/*
|
|
|
* Identify referenced, file-backed active pages and
|
|
|
* give them one more trip around the active list. So
|