|
@@ -1040,30 +1040,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
|
|
return &zone->lruvec;
|
|
return &zone->lruvec;
|
|
|
|
|
|
pc = lookup_page_cgroup(page);
|
|
pc = lookup_page_cgroup(page);
|
|
- VM_BUG_ON(PageCgroupAcctLRU(pc));
|
|
|
|
- /*
|
|
|
|
- * putback: charge:
|
|
|
|
- * SetPageLRU SetPageCgroupUsed
|
|
|
|
- * smp_mb smp_mb
|
|
|
|
- * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
|
|
|
|
- *
|
|
|
|
- * Ensure that one of the two sides adds the page to the memcg
|
|
|
|
- * LRU during a race.
|
|
|
|
- */
|
|
|
|
- smp_mb();
|
|
|
|
- /*
|
|
|
|
- * If the page is uncharged, it may be freed soon, but it
|
|
|
|
- * could also be swap cache (readahead, swapoff) that needs to
|
|
|
|
- * be reclaimable in the future. root_mem_cgroup will babysit
|
|
|
|
- * it for the time being.
|
|
|
|
- */
|
|
|
|
- if (PageCgroupUsed(pc)) {
|
|
|
|
- /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
|
|
|
- smp_rmb();
|
|
|
|
- memcg = pc->mem_cgroup;
|
|
|
|
- SetPageCgroupAcctLRU(pc);
|
|
|
|
- } else
|
|
|
|
- memcg = root_mem_cgroup;
|
|
|
|
|
|
+ memcg = pc->mem_cgroup;
|
|
mz = page_cgroup_zoneinfo(memcg, page);
|
|
mz = page_cgroup_zoneinfo(memcg, page);
|
|
/* compound_order() is stabilized through lru_lock */
|
|
/* compound_order() is stabilized through lru_lock */
|
|
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
|
|
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
|
|
@@ -1090,18 +1067,8 @@ void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
|
|
return;
|
|
return;
|
|
|
|
|
|
pc = lookup_page_cgroup(page);
|
|
pc = lookup_page_cgroup(page);
|
|
- /*
|
|
|
|
- * root_mem_cgroup babysits uncharged LRU pages, but
|
|
|
|
- * PageCgroupUsed is cleared when the page is about to get
|
|
|
|
- * freed. PageCgroupAcctLRU remembers whether the
|
|
|
|
- * LRU-accounting happened against pc->mem_cgroup or
|
|
|
|
- * root_mem_cgroup.
|
|
|
|
- */
|
|
|
|
- if (TestClearPageCgroupAcctLRU(pc)) {
|
|
|
|
- VM_BUG_ON(!pc->mem_cgroup);
|
|
|
|
- memcg = pc->mem_cgroup;
|
|
|
|
- } else
|
|
|
|
- memcg = root_mem_cgroup;
|
|
|
|
|
|
+ memcg = pc->mem_cgroup;
|
|
|
|
+ VM_BUG_ON(!memcg);
|
|
mz = page_cgroup_zoneinfo(memcg, page);
|
|
mz = page_cgroup_zoneinfo(memcg, page);
|
|
/* huge page split is done under lru_lock. so, we have no races. */
|
|
/* huge page split is done under lru_lock. so, we have no races. */
|
|
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
|
|
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
|
|
@@ -2217,8 +2184,25 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Unlike exported interface, "oom" parameter is added. if oom==true,
|
|
|
|
- * oom-killer can be invoked.
|
|
|
|
|
|
+ * __mem_cgroup_try_charge() does
|
|
|
|
+ * 1. detect memcg to be charged against from passed *mm and *ptr,
|
|
|
|
+ * 2. update res_counter
|
|
|
|
+ * 3. call memory reclaim if necessary.
|
|
|
|
+ *
|
|
|
|
+ * In some special case, if the task is fatal, fatal_signal_pending() or
|
|
|
|
+ * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
|
|
|
|
+ * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
|
|
|
|
+ * as possible without any hazards. 2: all pages should have a valid
|
|
|
|
+ * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
|
|
|
|
+ * pointer, that is treated as a charge to root_mem_cgroup.
|
|
|
|
+ *
|
|
|
|
+ * So __mem_cgroup_try_charge() will return
|
|
|
|
+ * 0 ... on success, filling *ptr with a valid memcg pointer.
|
|
|
|
+ * -ENOMEM ... charge failure because of resource limits.
|
|
|
|
+ * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup.
|
|
|
|
+ *
|
|
|
|
+ * Unlike the exported interface, an "oom" parameter is added. if oom==true,
|
|
|
|
+ * the oom-killer can be invoked.
|
|
*/
|
|
*/
|
|
static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
gfp_t gfp_mask,
|
|
gfp_t gfp_mask,
|
|
@@ -2247,7 +2231,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|
* set, if so charge the init_mm (happens for pagecache usage).
|
|
* set, if so charge the init_mm (happens for pagecache usage).
|
|
*/
|
|
*/
|
|
if (!*ptr && !mm)
|
|
if (!*ptr && !mm)
|
|
- goto bypass;
|
|
|
|
|
|
+ *ptr = root_mem_cgroup;
|
|
again:
|
|
again:
|
|
if (*ptr) { /* css should be a valid one */
|
|
if (*ptr) { /* css should be a valid one */
|
|
memcg = *ptr;
|
|
memcg = *ptr;
|
|
@@ -2273,7 +2257,9 @@ again:
|
|
* task-struct. So, mm->owner can be NULL.
|
|
* task-struct. So, mm->owner can be NULL.
|
|
*/
|
|
*/
|
|
memcg = mem_cgroup_from_task(p);
|
|
memcg = mem_cgroup_from_task(p);
|
|
- if (!memcg || mem_cgroup_is_root(memcg)) {
|
|
|
|
|
|
+ if (!memcg)
|
|
|
|
+ memcg = root_mem_cgroup;
|
|
|
|
+ if (mem_cgroup_is_root(memcg)) {
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
@@ -2348,8 +2334,8 @@ nomem:
|
|
*ptr = NULL;
|
|
*ptr = NULL;
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
bypass:
|
|
bypass:
|
|
- *ptr = NULL;
|
|
|
|
- return 0;
|
|
|
|
|
|
+ *ptr = root_mem_cgroup;
|
|
|
|
+ return -EINTR;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2457,6 +2443,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
|
|
|
|
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
|
|
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
|
|
unlock_page_cgroup(pc);
|
|
unlock_page_cgroup(pc);
|
|
|
|
+ WARN_ON_ONCE(PageLRU(page));
|
|
/*
|
|
/*
|
|
* "charge_statistics" updated event counter. Then, check it.
|
|
* "charge_statistics" updated event counter. Then, check it.
|
|
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
|
|
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
|
|
@@ -2468,7 +2455,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
|
|
#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
|
|
#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
|
|
- (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
|
|
|
|
|
|
+ (1 << PCG_MIGRATION))
|
|
/*
|
|
/*
|
|
* Because tail pages are not marked as "used", set it. We're under
|
|
* Because tail pages are not marked as "used", set it. We're under
|
|
* zone->lru_lock, 'splitting on pmd' and compound_lock.
|
|
* zone->lru_lock, 'splitting on pmd' and compound_lock.
|
|
@@ -2478,7 +2465,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
void mem_cgroup_split_huge_fixup(struct page *head)
|
|
void mem_cgroup_split_huge_fixup(struct page *head)
|
|
{
|
|
{
|
|
struct page_cgroup *head_pc = lookup_page_cgroup(head);
|
|
struct page_cgroup *head_pc = lookup_page_cgroup(head);
|
|
|
|
+ struct mem_cgroup_per_zone *mz;
|
|
struct page_cgroup *pc;
|
|
struct page_cgroup *pc;
|
|
|
|
+ enum lru_list lru;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
if (mem_cgroup_disabled())
|
|
if (mem_cgroup_disabled())
|
|
@@ -2487,23 +2476,15 @@ void mem_cgroup_split_huge_fixup(struct page *head)
|
|
pc = head_pc + i;
|
|
pc = head_pc + i;
|
|
pc->mem_cgroup = head_pc->mem_cgroup;
|
|
pc->mem_cgroup = head_pc->mem_cgroup;
|
|
smp_wmb();/* see __commit_charge() */
|
|
smp_wmb();/* see __commit_charge() */
|
|
- /*
|
|
|
|
- * LRU flags cannot be copied because we need to add tail
|
|
|
|
- * page to LRU by generic call and our hooks will be called.
|
|
|
|
- */
|
|
|
|
pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
|
|
pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
|
|
}
|
|
}
|
|
-
|
|
|
|
- if (PageCgroupAcctLRU(head_pc)) {
|
|
|
|
- enum lru_list lru;
|
|
|
|
- struct mem_cgroup_per_zone *mz;
|
|
|
|
- /*
|
|
|
|
- * We hold lru_lock, then, reduce counter directly.
|
|
|
|
- */
|
|
|
|
- lru = page_lru(head);
|
|
|
|
- mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
|
|
|
|
- MEM_CGROUP_ZSTAT(mz, lru) -= HPAGE_PMD_NR - 1;
|
|
|
|
- }
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Tail pages will be added to LRU.
|
|
|
|
+ * We hold lru_lock,then,reduce counter directly.
|
|
|
|
+ */
|
|
|
|
+ lru = page_lru(head);
|
|
|
|
+ mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
|
|
|
|
+ MEM_CGROUP_ZSTAT(mz, lru) -= HPAGE_PMD_NR - 1;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -2620,7 +2601,7 @@ static int mem_cgroup_move_parent(struct page *page,
|
|
|
|
|
|
parent = mem_cgroup_from_cont(pcg);
|
|
parent = mem_cgroup_from_cont(pcg);
|
|
ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
|
|
ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
|
|
- if (ret || !parent)
|
|
|
|
|
|
+ if (ret)
|
|
goto put_back;
|
|
goto put_back;
|
|
|
|
|
|
if (nr_pages > 1)
|
|
if (nr_pages > 1)
|
|
@@ -2667,9 +2648,8 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
|
|
|
|
|
|
pc = lookup_page_cgroup(page);
|
|
pc = lookup_page_cgroup(page);
|
|
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
|
|
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
|
|
- if (ret || !memcg)
|
|
|
|
|
|
+ if (ret == -ENOMEM)
|
|
return ret;
|
|
return ret;
|
|
-
|
|
|
|
__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
|
|
__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2736,10 +2716,9 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
if (!page_is_file_cache(page))
|
|
if (!page_is_file_cache(page))
|
|
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
|
|
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
|
|
|
|
|
|
- if (!PageSwapCache(page)) {
|
|
|
|
|
|
+ if (!PageSwapCache(page))
|
|
ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
|
|
ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
|
|
- WARN_ON_ONCE(PageLRU(page));
|
|
|
|
- } else { /* page is swapcache/shmem */
|
|
|
|
|
|
+ else { /* page is swapcache/shmem */
|
|
ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
|
|
ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
|
|
if (!ret)
|
|
if (!ret)
|
|
__mem_cgroup_commit_charge_swapin(page, memcg, type);
|
|
__mem_cgroup_commit_charge_swapin(page, memcg, type);
|
|
@@ -2781,11 +2760,16 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
|
|
*memcgp = memcg;
|
|
*memcgp = memcg;
|
|
ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
|
|
ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
|
|
css_put(&memcg->css);
|
|
css_put(&memcg->css);
|
|
|
|
+ if (ret == -EINTR)
|
|
|
|
+ ret = 0;
|
|
return ret;
|
|
return ret;
|
|
charge_cur_mm:
|
|
charge_cur_mm:
|
|
if (unlikely(!mm))
|
|
if (unlikely(!mm))
|
|
mm = &init_mm;
|
|
mm = &init_mm;
|
|
- return __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
|
|
|
|
|
|
+ ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
|
|
|
|
+ if (ret == -EINTR)
|
|
|
|
+ ret = 0;
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -3245,7 +3229,7 @@ int mem_cgroup_prepare_migration(struct page *page,
|
|
*memcgp = memcg;
|
|
*memcgp = memcg;
|
|
ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
|
|
ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
|
|
css_put(&memcg->css);/* drop extra refcnt */
|
|
css_put(&memcg->css);/* drop extra refcnt */
|
|
- if (ret || *memcgp == NULL) {
|
|
|
|
|
|
+ if (ret) {
|
|
if (PageAnon(page)) {
|
|
if (PageAnon(page)) {
|
|
lock_page_cgroup(pc);
|
|
lock_page_cgroup(pc);
|
|
ClearPageCgroupMigration(pc);
|
|
ClearPageCgroupMigration(pc);
|
|
@@ -3255,6 +3239,7 @@ int mem_cgroup_prepare_migration(struct page *page,
|
|
*/
|
|
*/
|
|
mem_cgroup_uncharge_page(page);
|
|
mem_cgroup_uncharge_page(page);
|
|
}
|
|
}
|
|
|
|
+ /* we'll need to revisit this error code (we have -EINTR) */
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -3674,7 +3659,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
|
|
pc = lookup_page_cgroup(page);
|
|
pc = lookup_page_cgroup(page);
|
|
|
|
|
|
ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
|
|
ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
|
|
- if (ret == -ENOMEM)
|
|
|
|
|
|
+ if (ret == -ENOMEM || ret == -EINTR)
|
|
break;
|
|
break;
|
|
|
|
|
|
if (ret == -EBUSY || ret == -EINVAL) {
|
|
if (ret == -EBUSY || ret == -EINVAL) {
|
|
@@ -5065,9 +5050,9 @@ one_by_one:
|
|
}
|
|
}
|
|
ret = __mem_cgroup_try_charge(NULL,
|
|
ret = __mem_cgroup_try_charge(NULL,
|
|
GFP_KERNEL, 1, &memcg, false);
|
|
GFP_KERNEL, 1, &memcg, false);
|
|
- if (ret || !memcg)
|
|
|
|
|
|
+ if (ret)
|
|
/* mem_cgroup_clear_mc() will do uncharge later */
|
|
/* mem_cgroup_clear_mc() will do uncharge later */
|
|
- return -ENOMEM;
|
|
|
|
|
|
+ return ret;
|
|
mc.precharge++;
|
|
mc.precharge++;
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|