|
@@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter {
|
|
|
* per-zone information in memory controller.
|
|
|
*/
|
|
|
struct mem_cgroup_per_zone {
|
|
|
- /*
|
|
|
- * spin_lock to protect the per cgroup LRU
|
|
|
- */
|
|
|
- struct list_head lists[NR_LRU_LISTS];
|
|
|
+ struct lruvec lruvec;
|
|
|
unsigned long count[NR_LRU_LISTS];
|
|
|
|
|
|
struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
|
|
@@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
|
|
|
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
|
|
smp_rmb();
|
|
|
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
|
|
- list_move_tail(&pc->lru, &mz->lists[lru]);
|
|
|
+ list_move_tail(&pc->lru, &mz->lruvec.lists[lru]);
|
|
|
}
|
|
|
|
|
|
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
|
|
@@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
|
|
|
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
|
|
smp_rmb();
|
|
|
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
|
|
- list_move(&pc->lru, &mz->lists[lru]);
|
|
|
+ list_move(&pc->lru, &mz->lruvec.lists[lru]);
|
|
|
}
|
|
|
|
|
|
void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
|
|
@@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
|
|
|
/* huge page split is done under lru_lock. so, we have no races. */
|
|
|
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
|
|
|
SetPageCgroupAcctLRU(pc);
|
|
|
- list_add(&pc->lru, &mz->lists[lru]);
|
|
|
+ list_add(&pc->lru, &mz->lruvec.lists[lru]);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
|
|
|
|
|
BUG_ON(!mem_cont);
|
|
|
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
|
|
|
- src = &mz->lists[lru];
|
|
|
+ src = &mz->lruvec.lists[lru];
|
|
|
|
|
|
scan = 0;
|
|
|
list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
|
|
@@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
|
|
|
|
|
|
zone = &NODE_DATA(node)->node_zones[zid];
|
|
|
mz = mem_cgroup_zoneinfo(memcg, node, zid);
|
|
|
- list = &mz->lists[lru];
|
|
|
+ list = &mz->lruvec.lists[lru];
|
|
|
|
|
|
loop = MEM_CGROUP_ZSTAT(mz, lru);
|
|
|
/* give some margin against EBUSY etc...*/
|
|
@@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
|
|
|
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
|
|
mz = &pn->zoneinfo[zone];
|
|
|
for_each_lru(l)
|
|
|
- INIT_LIST_HEAD(&mz->lists[l]);
|
|
|
+ INIT_LIST_HEAD(&mz->lruvec.lists[l]);
|
|
|
mz->usage_in_excess = 0;
|
|
|
mz->on_tree = false;
|
|
|
mz->mem = memcg;
|