|
@@ -155,19 +155,14 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
|
|
|
return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat;
|
|
|
}
|
|
|
|
|
|
-static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
|
|
|
- enum lru_list lru)
|
|
|
+static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
|
|
|
{
|
|
|
if (!mem_cgroup_disabled())
|
|
|
- return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
|
|
|
- zone_to_nid(mz->zone),
|
|
|
- zone_idx(mz->zone),
|
|
|
- BIT(lru));
|
|
|
+ return mem_cgroup_get_lruvec_size(lruvec, lru);
|
|
|
|
|
|
- return zone_page_state(mz->zone, NR_LRU_BASE + lru);
|
|
|
+ return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* Add a shrinker callback to be called from the vm
|
|
|
*/
|
|
@@ -1603,6 +1598,9 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
|
|
|
enum lru_list lru;
|
|
|
int noswap = 0;
|
|
|
bool force_scan = false;
|
|
|
+ struct lruvec *lruvec;
|
|
|
+
|
|
|
+ lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
|
|
|
|
|
|
/*
|
|
|
* If the zone or memcg is small, nr[l] can be 0. This
|
|
@@ -1628,10 +1626,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
|
|
|
- zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
|
|
|
- file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
|
|
|
- zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
|
|
|
+ anon = get_lruvec_size(lruvec, LRU_ACTIVE_ANON) +
|
|
|
+ get_lruvec_size(lruvec, LRU_INACTIVE_ANON);
|
|
|
+ file = get_lruvec_size(lruvec, LRU_ACTIVE_FILE) +
|
|
|
+ get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
|
|
|
|
|
|
if (global_reclaim(sc)) {
|
|
|
free = zone_page_state(mz->zone, NR_FREE_PAGES);
|
|
@@ -1694,7 +1692,7 @@ out:
|
|
|
int file = is_file_lru(lru);
|
|
|
unsigned long scan;
|
|
|
|
|
|
- scan = zone_nr_lru_pages(mz, lru);
|
|
|
+ scan = get_lruvec_size(lruvec, lru);
|
|
|
if (sc->priority || noswap || !vmscan_swappiness(sc)) {
|
|
|
scan >>= sc->priority;
|
|
|
if (!scan && force_scan)
|
|
@@ -1730,6 +1728,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
|
|
|
{
|
|
|
unsigned long pages_for_compaction;
|
|
|
unsigned long inactive_lru_pages;
|
|
|
+ struct lruvec *lruvec;
|
|
|
|
|
|
/* If not in reclaim/compaction mode, stop */
|
|
|
if (!in_reclaim_compaction(sc))
|
|
@@ -1762,10 +1761,12 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
|
|
|
* If we have not reclaimed enough pages for compaction and the
|
|
|
* inactive lists are large enough, continue reclaiming
|
|
|
*/
|
|
|
+ lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
|
|
|
pages_for_compaction = (2UL << sc->order);
|
|
|
- inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
|
|
|
+ inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
|
|
|
if (nr_swap_pages > 0)
|
|
|
- inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
|
|
|
+ inactive_lru_pages += get_lruvec_size(lruvec,
|
|
|
+ LRU_INACTIVE_ANON);
|
|
|
if (sc->nr_reclaimed < pages_for_compaction &&
|
|
|
inactive_lru_pages > pages_for_compaction)
|
|
|
return true;
|