|
@@ -33,11 +33,11 @@
|
|
#include <linux/seq_file.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/mm_inline.h>
|
|
|
|
+#include <linux/page_cgroup.h>
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
|
|
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
|
|
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
|
|
-static struct kmem_cache *page_cgroup_cache __read_mostly;
|
|
|
|
#define MEM_CGROUP_RECLAIM_RETRIES 5
|
|
#define MEM_CGROUP_RECLAIM_RETRIES 5
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -135,79 +135,6 @@ struct mem_cgroup {
|
|
};
|
|
};
|
|
static struct mem_cgroup init_mem_cgroup;
|
|
static struct mem_cgroup init_mem_cgroup;
|
|
|
|
|
|
-/*
|
|
|
|
- * We use the lower bit of the page->page_cgroup pointer as a bit spin
|
|
|
|
- * lock. We need to ensure that page->page_cgroup is at least two
|
|
|
|
- * byte aligned (based on comments from Nick Piggin). But since
|
|
|
|
- * bit_spin_lock doesn't actually set that lock bit in a non-debug
|
|
|
|
- * uniprocessor kernel, we should avoid setting it here too.
|
|
|
|
- */
|
|
|
|
-#define PAGE_CGROUP_LOCK_BIT 0x0
|
|
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
|
|
-#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
|
|
|
|
-#else
|
|
|
|
-#define PAGE_CGROUP_LOCK 0x0
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * A page_cgroup page is associated with every page descriptor. The
|
|
|
|
- * page_cgroup helps us identify information about the cgroup
|
|
|
|
- */
|
|
|
|
-struct page_cgroup {
|
|
|
|
- struct list_head lru; /* per cgroup LRU list */
|
|
|
|
- struct page *page;
|
|
|
|
- struct mem_cgroup *mem_cgroup;
|
|
|
|
- unsigned long flags;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-enum {
|
|
|
|
- /* flags for mem_cgroup */
|
|
|
|
- PCG_CACHE, /* charged as cache */
|
|
|
|
- /* flags for LRU placement */
|
|
|
|
- PCG_ACTIVE, /* page is active in this cgroup */
|
|
|
|
- PCG_FILE, /* page is file system backed */
|
|
|
|
- PCG_UNEVICTABLE, /* page is unevictableable */
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-#define TESTPCGFLAG(uname, lname) \
|
|
|
|
-static inline int PageCgroup##uname(struct page_cgroup *pc) \
|
|
|
|
- { return test_bit(PCG_##lname, &pc->flags); }
|
|
|
|
-
|
|
|
|
-#define SETPCGFLAG(uname, lname) \
|
|
|
|
-static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
|
|
|
|
- { set_bit(PCG_##lname, &pc->flags); }
|
|
|
|
-
|
|
|
|
-#define CLEARPCGFLAG(uname, lname) \
|
|
|
|
-static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
|
|
|
|
- { clear_bit(PCG_##lname, &pc->flags); }
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-/* Cache flag is set only once (at allocation) */
|
|
|
|
-TESTPCGFLAG(Cache, CACHE)
|
|
|
|
-
|
|
|
|
-/* LRU management flags (from global-lru definition) */
|
|
|
|
-TESTPCGFLAG(File, FILE)
|
|
|
|
-SETPCGFLAG(File, FILE)
|
|
|
|
-CLEARPCGFLAG(File, FILE)
|
|
|
|
-
|
|
|
|
-TESTPCGFLAG(Active, ACTIVE)
|
|
|
|
-SETPCGFLAG(Active, ACTIVE)
|
|
|
|
-CLEARPCGFLAG(Active, ACTIVE)
|
|
|
|
-
|
|
|
|
-TESTPCGFLAG(Unevictable, UNEVICTABLE)
|
|
|
|
-SETPCGFLAG(Unevictable, UNEVICTABLE)
|
|
|
|
-CLEARPCGFLAG(Unevictable, UNEVICTABLE)
|
|
|
|
-
|
|
|
|
-static int page_cgroup_nid(struct page_cgroup *pc)
|
|
|
|
-{
|
|
|
|
- return page_to_nid(pc->page);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
|
|
|
|
-{
|
|
|
|
- return page_zonenum(pc->page);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
enum charge_type {
|
|
enum charge_type {
|
|
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
|
|
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
|
|
MEM_CGROUP_CHARGE_TYPE_MAPPED,
|
|
MEM_CGROUP_CHARGE_TYPE_MAPPED,
|
|
@@ -216,12 +143,18 @@ enum charge_type {
|
|
NR_CHARGE_TYPE,
|
|
NR_CHARGE_TYPE,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+/* only for here (for easy reading.) */
|
|
|
|
+#define PCGF_CACHE (1UL << PCG_CACHE)
|
|
|
|
+#define PCGF_USED (1UL << PCG_USED)
|
|
|
|
+#define PCGF_ACTIVE (1UL << PCG_ACTIVE)
|
|
|
|
+#define PCGF_LOCK (1UL << PCG_LOCK)
|
|
|
|
+#define PCGF_FILE (1UL << PCG_FILE)
|
|
static const unsigned long
|
|
static const unsigned long
|
|
pcg_default_flags[NR_CHARGE_TYPE] = {
|
|
pcg_default_flags[NR_CHARGE_TYPE] = {
|
|
- ((1 << PCG_CACHE) | (1 << PCG_FILE)),
|
|
|
|
- ((1 << PCG_ACTIVE)),
|
|
|
|
- ((1 << PCG_ACTIVE) | (1 << PCG_CACHE)),
|
|
|
|
- 0,
|
|
|
|
|
|
+ PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
|
|
|
|
+ PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
|
|
|
|
+ PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
|
|
|
|
+ 0, /* FORCE */
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -303,37 +236,6 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
|
|
struct mem_cgroup, css);
|
|
struct mem_cgroup, css);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int page_cgroup_locked(struct page *page)
|
|
|
|
-{
|
|
|
|
- return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
|
|
|
|
-{
|
|
|
|
- VM_BUG_ON(!page_cgroup_locked(page));
|
|
|
|
- page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-struct page_cgroup *page_get_page_cgroup(struct page *page)
|
|
|
|
-{
|
|
|
|
- return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void lock_page_cgroup(struct page *page)
|
|
|
|
-{
|
|
|
|
- bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int try_lock_page_cgroup(struct page *page)
|
|
|
|
-{
|
|
|
|
- return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void unlock_page_cgroup(struct page *page)
|
|
|
|
-{
|
|
|
|
- bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
|
|
static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
|
|
struct page_cgroup *pc)
|
|
struct page_cgroup *pc)
|
|
{
|
|
{
|
|
@@ -436,17 +338,16 @@ void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
|
|
* safely get to page_cgroup without it, so just try_lock it:
|
|
* safely get to page_cgroup without it, so just try_lock it:
|
|
* mem_cgroup_isolate_pages allows for page left on wrong list.
|
|
* mem_cgroup_isolate_pages allows for page left on wrong list.
|
|
*/
|
|
*/
|
|
- if (!try_lock_page_cgroup(page))
|
|
|
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
|
+ if (!trylock_page_cgroup(pc))
|
|
return;
|
|
return;
|
|
-
|
|
|
|
- pc = page_get_page_cgroup(page);
|
|
|
|
- if (pc) {
|
|
|
|
|
|
+ if (pc && PageCgroupUsed(pc)) {
|
|
mz = page_cgroup_zoneinfo(pc);
|
|
mz = page_cgroup_zoneinfo(pc);
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
__mem_cgroup_move_lists(pc, lru);
|
|
__mem_cgroup_move_lists(pc, lru);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
}
|
|
}
|
|
- unlock_page_cgroup(page);
|
|
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -533,6 +434,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
|
list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
|
|
list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
|
|
if (scan >= nr_to_scan)
|
|
if (scan >= nr_to_scan)
|
|
break;
|
|
break;
|
|
|
|
+ if (unlikely(!PageCgroupUsed(pc)))
|
|
|
|
+ continue;
|
|
page = pc->page;
|
|
page = pc->page;
|
|
|
|
|
|
if (unlikely(!PageLRU(page)))
|
|
if (unlikely(!PageLRU(page)))
|
|
@@ -576,26 +479,27 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
|
|
{
|
|
{
|
|
struct mem_cgroup *mem;
|
|
struct mem_cgroup *mem;
|
|
struct page_cgroup *pc;
|
|
struct page_cgroup *pc;
|
|
- unsigned long flags;
|
|
|
|
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
|
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
|
struct mem_cgroup_per_zone *mz;
|
|
struct mem_cgroup_per_zone *mz;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
|
|
|
|
- if (unlikely(pc == NULL))
|
|
|
|
- goto err;
|
|
|
|
-
|
|
|
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
|
+ /* can happen at boot */
|
|
|
|
+ if (unlikely(!pc))
|
|
|
|
+ return 0;
|
|
|
|
+ prefetchw(pc);
|
|
/*
|
|
/*
|
|
* We always charge the cgroup the mm_struct belongs to.
|
|
* We always charge the cgroup the mm_struct belongs to.
|
|
* The mm_struct's mem_cgroup changes on task migration if the
|
|
* The mm_struct's mem_cgroup changes on task migration if the
|
|
* thread group leader migrates. It's possible that mm is not
|
|
* thread group leader migrates. It's possible that mm is not
|
|
* set, if so charge the init_mm (happens for pagecache usage).
|
|
* set, if so charge the init_mm (happens for pagecache usage).
|
|
*/
|
|
*/
|
|
|
|
+
|
|
if (likely(!memcg)) {
|
|
if (likely(!memcg)) {
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
|
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
|
if (unlikely(!mem)) {
|
|
if (unlikely(!mem)) {
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
- kmem_cache_free(page_cgroup_cache, pc);
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -631,36 +535,33 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
|
|
+ lock_page_cgroup(pc);
|
|
|
|
+ if (unlikely(PageCgroupUsed(pc))) {
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
|
|
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
|
|
|
|
+ css_put(&mem->css);
|
|
|
|
+
|
|
|
|
+ goto done;
|
|
|
|
+ }
|
|
pc->mem_cgroup = mem;
|
|
pc->mem_cgroup = mem;
|
|
- pc->page = page;
|
|
|
|
/*
|
|
/*
|
|
* If a page is accounted as a page cache, insert to inactive list.
|
|
* If a page is accounted as a page cache, insert to inactive list.
|
|
* If anon, insert to active list.
|
|
* If anon, insert to active list.
|
|
*/
|
|
*/
|
|
pc->flags = pcg_default_flags[ctype];
|
|
pc->flags = pcg_default_flags[ctype];
|
|
|
|
|
|
- lock_page_cgroup(page);
|
|
|
|
- if (unlikely(page_get_page_cgroup(page))) {
|
|
|
|
- unlock_page_cgroup(page);
|
|
|
|
- res_counter_uncharge(&mem->res, PAGE_SIZE);
|
|
|
|
- css_put(&mem->css);
|
|
|
|
- kmem_cache_free(page_cgroup_cache, pc);
|
|
|
|
- goto done;
|
|
|
|
- }
|
|
|
|
- page_assign_page_cgroup(page, pc);
|
|
|
|
-
|
|
|
|
mz = page_cgroup_zoneinfo(pc);
|
|
mz = page_cgroup_zoneinfo(pc);
|
|
|
|
+
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
__mem_cgroup_add_list(mz, pc);
|
|
__mem_cgroup_add_list(mz, pc);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
|
|
|
|
- unlock_page_cgroup(page);
|
|
|
|
done:
|
|
done:
|
|
return 0;
|
|
return 0;
|
|
out:
|
|
out:
|
|
css_put(&mem->css);
|
|
css_put(&mem->css);
|
|
- kmem_cache_free(page_cgroup_cache, pc);
|
|
|
|
-err:
|
|
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -668,7 +569,8 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
|
|
{
|
|
{
|
|
if (mem_cgroup_subsys.disabled)
|
|
if (mem_cgroup_subsys.disabled)
|
|
return 0;
|
|
return 0;
|
|
-
|
|
|
|
|
|
+ if (PageCompound(page))
|
|
|
|
+ return 0;
|
|
/*
|
|
/*
|
|
* If already mapped, we don't have to account.
|
|
* If already mapped, we don't have to account.
|
|
* If page cache, page->mapping has address_space.
|
|
* If page cache, page->mapping has address_space.
|
|
@@ -689,7 +591,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
{
|
|
{
|
|
if (mem_cgroup_subsys.disabled)
|
|
if (mem_cgroup_subsys.disabled)
|
|
return 0;
|
|
return 0;
|
|
-
|
|
|
|
|
|
+ if (PageCompound(page))
|
|
|
|
+ return 0;
|
|
/*
|
|
/*
|
|
* Corner case handling. This is called from add_to_page_cache()
|
|
* Corner case handling. This is called from add_to_page_cache()
|
|
* in usual. But some FS (shmem) precharges this page before calling it
|
|
* in usual. But some FS (shmem) precharges this page before calling it
|
|
@@ -702,15 +605,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
if (!(gfp_mask & __GFP_WAIT)) {
|
|
if (!(gfp_mask & __GFP_WAIT)) {
|
|
struct page_cgroup *pc;
|
|
struct page_cgroup *pc;
|
|
|
|
|
|
- lock_page_cgroup(page);
|
|
|
|
- pc = page_get_page_cgroup(page);
|
|
|
|
- if (pc) {
|
|
|
|
- VM_BUG_ON(pc->page != page);
|
|
|
|
- VM_BUG_ON(!pc->mem_cgroup);
|
|
|
|
- unlock_page_cgroup(page);
|
|
|
|
|
|
+
|
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
|
+ if (!pc)
|
|
|
|
+ return 0;
|
|
|
|
+ lock_page_cgroup(pc);
|
|
|
|
+ if (PageCgroupUsed(pc)) {
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
- unlock_page_cgroup(page);
|
|
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
}
|
|
}
|
|
|
|
|
|
if (unlikely(!mm))
|
|
if (unlikely(!mm))
|
|
@@ -741,37 +645,39 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|
/*
|
|
/*
|
|
* Check if our page_cgroup is valid
|
|
* Check if our page_cgroup is valid
|
|
*/
|
|
*/
|
|
- lock_page_cgroup(page);
|
|
|
|
- pc = page_get_page_cgroup(page);
|
|
|
|
- if (unlikely(!pc))
|
|
|
|
- goto unlock;
|
|
|
|
-
|
|
|
|
- VM_BUG_ON(pc->page != page);
|
|
|
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
|
+ if (unlikely(!pc || !PageCgroupUsed(pc)))
|
|
|
|
+ return;
|
|
|
|
|
|
- if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
|
|
|
|
- && ((PageCgroupCache(pc) || page_mapped(page))))
|
|
|
|
- goto unlock;
|
|
|
|
|
|
+ lock_page_cgroup(pc);
|
|
|
|
+ if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
|
|
|
|
+ || !PageCgroupUsed(pc)) {
|
|
|
|
+ /* This happens at race in zap_pte_range() and do_swap_page()*/
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ ClearPageCgroupUsed(pc);
|
|
|
|
+ mem = pc->mem_cgroup;
|
|
|
|
|
|
mz = page_cgroup_zoneinfo(pc);
|
|
mz = page_cgroup_zoneinfo(pc);
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
__mem_cgroup_remove_list(mz, pc);
|
|
__mem_cgroup_remove_list(mz, pc);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
|
|
|
|
- page_assign_page_cgroup(page, NULL);
|
|
|
|
- unlock_page_cgroup(page);
|
|
|
|
-
|
|
|
|
- mem = pc->mem_cgroup;
|
|
|
|
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
|
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
|
css_put(&mem->css);
|
|
css_put(&mem->css);
|
|
|
|
|
|
- kmem_cache_free(page_cgroup_cache, pc);
|
|
|
|
return;
|
|
return;
|
|
-unlock:
|
|
|
|
- unlock_page_cgroup(page);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
void mem_cgroup_uncharge_page(struct page *page)
|
|
void mem_cgroup_uncharge_page(struct page *page)
|
|
{
|
|
{
|
|
|
|
+ /* early check. */
|
|
|
|
+ if (page_mapped(page))
|
|
|
|
+ return;
|
|
|
|
+ if (page->mapping && !PageAnon(page))
|
|
|
|
+ return;
|
|
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
|
|
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -795,9 +701,9 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
|
|
if (mem_cgroup_subsys.disabled)
|
|
if (mem_cgroup_subsys.disabled)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- lock_page_cgroup(page);
|
|
|
|
- pc = page_get_page_cgroup(page);
|
|
|
|
- if (pc) {
|
|
|
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
|
+ lock_page_cgroup(pc);
|
|
|
|
+ if (PageCgroupUsed(pc)) {
|
|
mem = pc->mem_cgroup;
|
|
mem = pc->mem_cgroup;
|
|
css_get(&mem->css);
|
|
css_get(&mem->css);
|
|
if (PageCgroupCache(pc)) {
|
|
if (PageCgroupCache(pc)) {
|
|
@@ -807,7 +713,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
|
|
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
|
|
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- unlock_page_cgroup(page);
|
|
|
|
|
|
+ unlock_page_cgroup(pc);
|
|
if (mem) {
|
|
if (mem) {
|
|
ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
|
|
ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
|
|
ctype, mem);
|
|
ctype, mem);
|
|
@@ -832,7 +738,7 @@ void mem_cgroup_end_migration(struct page *newpage)
|
|
*/
|
|
*/
|
|
if (!newpage->mapping)
|
|
if (!newpage->mapping)
|
|
__mem_cgroup_uncharge_common(newpage,
|
|
__mem_cgroup_uncharge_common(newpage,
|
|
- MEM_CGROUP_CHARGE_TYPE_FORCE);
|
|
|
|
|
|
+ MEM_CGROUP_CHARGE_TYPE_FORCE);
|
|
else if (PageAnon(newpage))
|
|
else if (PageAnon(newpage))
|
|
mem_cgroup_uncharge_page(newpage);
|
|
mem_cgroup_uncharge_page(newpage);
|
|
}
|
|
}
|
|
@@ -918,6 +824,8 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
|
|
while (!list_empty(list)) {
|
|
while (!list_empty(list)) {
|
|
pc = list_entry(list->prev, struct page_cgroup, lru);
|
|
pc = list_entry(list->prev, struct page_cgroup, lru);
|
|
page = pc->page;
|
|
page = pc->page;
|
|
|
|
+ if (!PageCgroupUsed(pc))
|
|
|
|
+ break;
|
|
get_page(page);
|
|
get_page(page);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
/*
|
|
/*
|
|
@@ -932,8 +840,10 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
|
|
count = FORCE_UNCHARGE_BATCH;
|
|
count = FORCE_UNCHARGE_BATCH;
|
|
cond_resched();
|
|
cond_resched();
|
|
}
|
|
}
|
|
- } else
|
|
|
|
- cond_resched();
|
|
|
|
|
|
+ } else {
|
|
|
|
+ spin_lock_irqsave(&mz->lru_lock, flags);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
spin_lock_irqsave(&mz->lru_lock, flags);
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
spin_unlock_irqrestore(&mz->lru_lock, flags);
|
|
@@ -957,6 +867,8 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
|
|
while (mem->res.usage > 0) {
|
|
while (mem->res.usage > 0) {
|
|
if (atomic_read(&mem->css.cgroup->count) > 0)
|
|
if (atomic_read(&mem->css.cgroup->count) > 0)
|
|
goto out;
|
|
goto out;
|
|
|
|
+ /* This is for making all *used* pages to be on LRU. */
|
|
|
|
+ lru_add_drain_all();
|
|
for_each_node_state(node, N_POSSIBLE)
|
|
for_each_node_state(node, N_POSSIBLE)
|
|
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
|
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
|
struct mem_cgroup_per_zone *mz;
|
|
struct mem_cgroup_per_zone *mz;
|
|
@@ -965,6 +877,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
|
|
for_each_lru(l)
|
|
for_each_lru(l)
|
|
mem_cgroup_force_empty_list(mem, mz, l);
|
|
mem_cgroup_force_empty_list(mem, mz, l);
|
|
}
|
|
}
|
|
|
|
+ cond_resched();
|
|
}
|
|
}
|
|
ret = 0;
|
|
ret = 0;
|
|
out:
|
|
out:
|
|
@@ -1175,8 +1088,8 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
int node;
|
|
int node;
|
|
|
|
|
|
if (unlikely((cont->parent) == NULL)) {
|
|
if (unlikely((cont->parent) == NULL)) {
|
|
|
|
+ page_cgroup_init();
|
|
mem = &init_mem_cgroup;
|
|
mem = &init_mem_cgroup;
|
|
- page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
|
|
|
|
} else {
|
|
} else {
|
|
mem = mem_cgroup_alloc();
|
|
mem = mem_cgroup_alloc();
|
|
if (!mem)
|
|
if (!mem)
|