123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191 |
- /* memcontrol.c - Memory Controller
- *
- * Copyright IBM Corporation, 2007
- * Author Balbir Singh <balbir@linux.vnet.ibm.com>
- *
- * Copyright 2007 OpenVZ SWsoft Inc
- * Author: Pavel Emelianov <xemul@openvz.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
- #include <linux/res_counter.h>
- #include <linux/memcontrol.h>
- #include <linux/cgroup.h>
- #include <linux/mm.h>
- #include <linux/smp.h>
- #include <linux/page-flags.h>
- #include <linux/backing-dev.h>
- #include <linux/bit_spinlock.h>
- #include <linux/rcupdate.h>
- #include <linux/swap.h>
- #include <linux/spinlock.h>
- #include <linux/fs.h>
- #include <linux/seq_file.h>
- #include <asm/uaccess.h>
- struct cgroup_subsys mem_cgroup_subsys;
- static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
- /*
- * Statistics for memory cgroup.
- */
- enum mem_cgroup_stat_index {
- /*
- * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
- */
- MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
- MEM_CGROUP_STAT_NSTATS,
- };
- struct mem_cgroup_stat_cpu {
- s64 count[MEM_CGROUP_STAT_NSTATS];
- } ____cacheline_aligned_in_smp;
- struct mem_cgroup_stat {
- struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
- };
- /*
- * For accounting under irq disable, no need for increment preempt count.
- */
- static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
- enum mem_cgroup_stat_index idx, int val)
- {
- int cpu = smp_processor_id();
- stat->cpustat[cpu].count[idx] += val;
- }
- static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
- enum mem_cgroup_stat_index idx)
- {
- int cpu;
- s64 ret = 0;
- for_each_possible_cpu(cpu)
- ret += stat->cpustat[cpu].count[idx];
- return ret;
- }
- /*
- * per-zone information in memory controller.
- */
- enum mem_cgroup_zstat_index {
- MEM_CGROUP_ZSTAT_ACTIVE,
- MEM_CGROUP_ZSTAT_INACTIVE,
- NR_MEM_CGROUP_ZSTAT,
- };
- struct mem_cgroup_per_zone {
- /*
- * spin_lock to protect the per cgroup LRU
- */
- spinlock_t lru_lock;
- struct list_head active_list;
- struct list_head inactive_list;
- unsigned long count[NR_MEM_CGROUP_ZSTAT];
- };
- /* Macro for accessing counter */
- #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
- struct mem_cgroup_per_node {
- struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
- };
- struct mem_cgroup_lru_info {
- struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
- };
- /*
- * The memory controller data structure. The memory controller controls both
- * page cache and RSS per cgroup. We would eventually like to provide
- * statistics based on the statistics developed by Rik Van Riel for clock-pro,
- * to help the administrator determine what knobs to tune.
- *
- * TODO: Add a water mark for the memory controller. Reclaim will begin when
- * we hit the water mark. May be even add a low water mark, such that
- * no reclaim occurs from a cgroup at it's low water mark, this is
- * a feature that will be implemented much later in the future.
- */
- struct mem_cgroup {
- struct cgroup_subsys_state css;
- /*
- * the counter to account for memory usage
- */
- struct res_counter res;
- /*
- * Per cgroup active and inactive list, similar to the
- * per zone LRU lists.
- */
- struct mem_cgroup_lru_info info;
- int prev_priority; /* for recording reclaim priority */
- /*
- * statistics.
- */
- struct mem_cgroup_stat stat;
- };
- /*
- * We use the lower bit of the page->page_cgroup pointer as a bit spin
- * lock. We need to ensure that page->page_cgroup is atleast two
- * byte aligned (based on comments from Nick Piggin)
- */
- #define PAGE_CGROUP_LOCK_BIT 0x0
- #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
- /*
- * A page_cgroup page is associated with every page descriptor. The
- * page_cgroup helps us identify information about the cgroup
- */
- struct page_cgroup {
- struct list_head lru; /* per cgroup LRU list */
- struct page *page;
- struct mem_cgroup *mem_cgroup;
- atomic_t ref_cnt; /* Helpful when pages move b/w */
- /* mapped and cached states */
- int flags;
- };
- #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
- #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
- static inline int page_cgroup_nid(struct page_cgroup *pc)
- {
- return page_to_nid(pc->page);
- }
- static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
- {
- return page_zonenum(pc->page);
- }
- enum {
- MEM_CGROUP_TYPE_UNSPEC = 0,
- MEM_CGROUP_TYPE_MAPPED,
- MEM_CGROUP_TYPE_CACHED,
- MEM_CGROUP_TYPE_ALL,
- MEM_CGROUP_TYPE_MAX,
- };
- enum charge_type {
- MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
- MEM_CGROUP_CHARGE_TYPE_MAPPED,
- };
- /*
- * Always modified under lru lock. Then, not necessary to preempt_disable()
- */
- static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
- bool charge)
- {
- int val = (charge)? 1 : -1;
- struct mem_cgroup_stat *stat = &mem->stat;
- VM_BUG_ON(!irqs_disabled());
- if (flags & PAGE_CGROUP_FLAG_CACHE)
- __mem_cgroup_stat_add_safe(stat,
- MEM_CGROUP_STAT_CACHE, val);
- else
- __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
- }
- static inline struct mem_cgroup_per_zone *
- mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
- {
- BUG_ON(!mem->info.nodeinfo[nid]);
- return &mem->info.nodeinfo[nid]->zoneinfo[zid];
- }
- static inline struct mem_cgroup_per_zone *
- page_cgroup_zoneinfo(struct page_cgroup *pc)
- {
- struct mem_cgroup *mem = pc->mem_cgroup;
- int nid = page_cgroup_nid(pc);
- int zid = page_cgroup_zid(pc);
- return mem_cgroup_zoneinfo(mem, nid, zid);
- }
- static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
- enum mem_cgroup_zstat_index idx)
- {
- int nid, zid;
- struct mem_cgroup_per_zone *mz;
- u64 total = 0;
- for_each_online_node(nid)
- for (zid = 0; zid < MAX_NR_ZONES; zid++) {
- mz = mem_cgroup_zoneinfo(mem, nid, zid);
- total += MEM_CGROUP_ZSTAT(mz, idx);
- }
- return total;
- }
- static struct mem_cgroup init_mem_cgroup;
- static inline
- struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
- {
- return container_of(cgroup_subsys_state(cont,
- mem_cgroup_subsys_id), struct mem_cgroup,
- css);
- }
- static inline
- struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
- {
- return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
- struct mem_cgroup, css);
- }
- void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
- {
- struct mem_cgroup *mem;
- mem = mem_cgroup_from_task(p);
- css_get(&mem->css);
- mm->mem_cgroup = mem;
- }
- void mm_free_cgroup(struct mm_struct *mm)
- {
- css_put(&mm->mem_cgroup->css);
- }
- static inline int page_cgroup_locked(struct page *page)
- {
- return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
- &page->page_cgroup);
- }
- void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
- {
- int locked;
- /*
- * While resetting the page_cgroup we might not hold the
- * page_cgroup lock. free_hot_cold_page() is an example
- * of such a scenario
- */
- if (pc)
- VM_BUG_ON(!page_cgroup_locked(page));
- locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
- page->page_cgroup = ((unsigned long)pc | locked);
- }
- struct page_cgroup *page_get_page_cgroup(struct page *page)
- {
- return (struct page_cgroup *)
- (page->page_cgroup & ~PAGE_CGROUP_LOCK);
- }
- static void __always_inline lock_page_cgroup(struct page *page)
- {
- bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
- VM_BUG_ON(!page_cgroup_locked(page));
- }
- static void __always_inline unlock_page_cgroup(struct page *page)
- {
- bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
- }
- /*
- * Tie new page_cgroup to struct page under lock_page_cgroup()
- * This can fail if the page has been tied to a page_cgroup.
- * If success, returns 0.
- */
- static int page_cgroup_assign_new_page_cgroup(struct page *page,
- struct page_cgroup *pc)
- {
- int ret = 0;
- lock_page_cgroup(page);
- if (!page_get_page_cgroup(page))
- page_assign_page_cgroup(page, pc);
- else /* A page is tied to other pc. */
- ret = 1;
- unlock_page_cgroup(page);
- return ret;
- }
- /*
- * Clear page->page_cgroup member under lock_page_cgroup().
- * If given "pc" value is different from one page->page_cgroup,
- * page->cgroup is not cleared.
- * Returns a value of page->page_cgroup at lock taken.
- * A can can detect failure of clearing by following
- * clear_page_cgroup(page, pc) == pc
- */
- static struct page_cgroup *clear_page_cgroup(struct page *page,
- struct page_cgroup *pc)
- {
- struct page_cgroup *ret;
- /* lock and clear */
- lock_page_cgroup(page);
- ret = page_get_page_cgroup(page);
- if (likely(ret == pc))
- page_assign_page_cgroup(page, NULL);
- unlock_page_cgroup(page);
- return ret;
- }
- static void __mem_cgroup_remove_list(struct page_cgroup *pc)
- {
- int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
- struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
- if (from)
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
- else
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
- mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
- list_del_init(&pc->lru);
- }
- static void __mem_cgroup_add_list(struct page_cgroup *pc)
- {
- int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
- struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
- if (!to) {
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
- list_add(&pc->lru, &mz->inactive_list);
- } else {
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
- list_add(&pc->lru, &mz->active_list);
- }
- mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
- }
- static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
- {
- int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
- struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
- if (from)
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
- else
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
- if (active) {
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
- pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
- list_move(&pc->lru, &mz->active_list);
- } else {
- MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
- pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
- list_move(&pc->lru, &mz->inactive_list);
- }
- }
- int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
- {
- int ret;
- task_lock(task);
- ret = task->mm && vm_match_cgroup(task->mm, mem);
- task_unlock(task);
- return ret;
- }
- /*
- * This routine assumes that the appropriate zone's lru lock is already held
- */
- void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
- {
- struct mem_cgroup_per_zone *mz;
- unsigned long flags;
- if (!pc)
- return;
- mz = page_cgroup_zoneinfo(pc);
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_move_lists(pc, active);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- }
- /*
- * Calculate mapped_ratio under memory controller. This will be used in
- * vmscan.c for deteremining we have to reclaim mapped pages.
- */
- int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
- {
- long total, rss;
- /*
- * usage is recorded in bytes. But, here, we assume the number of
- * physical pages can be represented by "long" on any arch.
- */
- total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
- rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
- return (int)((rss * 100L) / total);
- }
- /*
- * This function is called from vmscan.c. In page reclaiming loop. balance
- * between active and inactive list is calculated. For memory controller
- * page reclaiming, we should use using mem_cgroup's imbalance rather than
- * zone's global lru imbalance.
- */
- long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
- {
- unsigned long active, inactive;
- /* active and inactive are the number of pages. 'long' is ok.*/
- active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
- inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
- return (long) (active / (inactive + 1));
- }
- /*
- * prev_priority control...this will be used in memory reclaim path.
- */
- int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
- {
- return mem->prev_priority;
- }
- void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
- {
- if (priority < mem->prev_priority)
- mem->prev_priority = priority;
- }
- void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
- {
- mem->prev_priority = priority;
- }
- /*
- * Calculate # of pages to be scanned in this priority/zone.
- * See also vmscan.c
- *
- * priority starts from "DEF_PRIORITY" and decremented in each loop.
- * (see include/linux/mmzone.h)
- */
- long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
- struct zone *zone, int priority)
- {
- long nr_active;
- int nid = zone->zone_pgdat->node_id;
- int zid = zone_idx(zone);
- struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
- nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
- return (nr_active >> priority);
- }
- long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
- struct zone *zone, int priority)
- {
- long nr_inactive;
- int nid = zone->zone_pgdat->node_id;
- int zid = zone_idx(zone);
- struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
- nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
- return (nr_inactive >> priority);
- }
- unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
- struct list_head *dst,
- unsigned long *scanned, int order,
- int mode, struct zone *z,
- struct mem_cgroup *mem_cont,
- int active)
- {
- unsigned long nr_taken = 0;
- struct page *page;
- unsigned long scan;
- LIST_HEAD(pc_list);
- struct list_head *src;
- struct page_cgroup *pc, *tmp;
- int nid = z->zone_pgdat->node_id;
- int zid = zone_idx(z);
- struct mem_cgroup_per_zone *mz;
- mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
- if (active)
- src = &mz->active_list;
- else
- src = &mz->inactive_list;
- spin_lock(&mz->lru_lock);
- scan = 0;
- list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
- if (scan >= nr_to_scan)
- break;
- page = pc->page;
- if (unlikely(!PageLRU(page)))
- continue;
- if (PageActive(page) && !active) {
- __mem_cgroup_move_lists(pc, true);
- continue;
- }
- if (!PageActive(page) && active) {
- __mem_cgroup_move_lists(pc, false);
- continue;
- }
- scan++;
- list_move(&pc->lru, &pc_list);
- if (__isolate_lru_page(page, mode) == 0) {
- list_move(&page->lru, dst);
- nr_taken++;
- }
- }
- list_splice(&pc_list, src);
- spin_unlock(&mz->lru_lock);
- *scanned = scan;
- return nr_taken;
- }
- /*
- * Charge the memory controller for page usage.
- * Return
- * 0 if the charge was successful
- * < 0 if the cgroup is over its limit
- */
- static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, enum charge_type ctype)
- {
- struct mem_cgroup *mem;
- struct page_cgroup *pc;
- unsigned long flags;
- unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
- struct mem_cgroup_per_zone *mz;
- /*
- * Should page_cgroup's go to their own slab?
- * One could optimize the performance of the charging routine
- * by saving a bit in the page_flags and using it as a lock
- * to see if the cgroup page already has a page_cgroup associated
- * with it
- */
- retry:
- if (page) {
- lock_page_cgroup(page);
- pc = page_get_page_cgroup(page);
- /*
- * The page_cgroup exists and
- * the page has already been accounted.
- */
- if (pc) {
- if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
- /* this page is under being uncharged ? */
- unlock_page_cgroup(page);
- cpu_relax();
- goto retry;
- } else {
- unlock_page_cgroup(page);
- goto done;
- }
- }
- unlock_page_cgroup(page);
- }
- pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
- if (pc == NULL)
- goto err;
- /*
- * We always charge the cgroup the mm_struct belongs to.
- * The mm_struct's mem_cgroup changes on task migration if the
- * thread group leader migrates. It's possible that mm is not
- * set, if so charge the init_mm (happens for pagecache usage).
- */
- if (!mm)
- mm = &init_mm;
- rcu_read_lock();
- mem = rcu_dereference(mm->mem_cgroup);
- /*
- * For every charge from the cgroup, increment reference
- * count
- */
- css_get(&mem->css);
- rcu_read_unlock();
- /*
- * If we created the page_cgroup, we should free it on exceeding
- * the cgroup limit.
- */
- while (res_counter_charge(&mem->res, PAGE_SIZE)) {
- if (!(gfp_mask & __GFP_WAIT))
- goto out;
- if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
- continue;
- /*
- * try_to_free_mem_cgroup_pages() might not give us a full
- * picture of reclaim. Some pages are reclaimed and might be
- * moved to swap cache or just unmapped from the cgroup.
- * Check the limit again to see if the reclaim reduced the
- * current usage of the cgroup before giving up
- */
- if (res_counter_check_under_limit(&mem->res))
- continue;
- if (!nr_retries--) {
- mem_cgroup_out_of_memory(mem, gfp_mask);
- goto out;
- }
- congestion_wait(WRITE, HZ/10);
- }
- atomic_set(&pc->ref_cnt, 1);
- pc->mem_cgroup = mem;
- pc->page = page;
- pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
- if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
- pc->flags |= PAGE_CGROUP_FLAG_CACHE;
- if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
- /*
- * Another charge has been added to this page already.
- * We take lock_page_cgroup(page) again and read
- * page->cgroup, increment refcnt.... just retry is OK.
- */
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- css_put(&mem->css);
- kfree(pc);
- if (!page)
- goto done;
- goto retry;
- }
- mz = page_cgroup_zoneinfo(pc);
- spin_lock_irqsave(&mz->lru_lock, flags);
- /* Update statistics vector */
- __mem_cgroup_add_list(pc);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- done:
- return 0;
- out:
- css_put(&mem->css);
- kfree(pc);
- err:
- return -ENOMEM;
- }
- int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
- {
- return mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_MAPPED);
- }
- /*
- * See if the cached pages should be charged at all?
- */
- int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
- {
- int ret = 0;
- if (!mm)
- mm = &init_mm;
- ret = mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_CACHE);
- return ret;
- }
- /*
- * Uncharging is always a welcome operation, we never complain, simply
- * uncharge. This routine should be called with lock_page_cgroup held
- */
- void mem_cgroup_uncharge(struct page_cgroup *pc)
- {
- struct mem_cgroup *mem;
- struct mem_cgroup_per_zone *mz;
- struct page *page;
- unsigned long flags;
- /*
- * Check if our page_cgroup is valid
- */
- if (!pc)
- return;
- if (atomic_dec_and_test(&pc->ref_cnt)) {
- page = pc->page;
- mz = page_cgroup_zoneinfo(pc);
- /*
- * get page->cgroup and clear it under lock.
- * force_empty can drop page->cgroup without checking refcnt.
- */
- unlock_page_cgroup(page);
- if (clear_page_cgroup(page, pc) == pc) {
- mem = pc->mem_cgroup;
- css_put(&mem->css);
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_remove_list(pc);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- kfree(pc);
- }
- lock_page_cgroup(page);
- }
- }
- void mem_cgroup_uncharge_page(struct page *page)
- {
- lock_page_cgroup(page);
- mem_cgroup_uncharge(page_get_page_cgroup(page));
- unlock_page_cgroup(page);
- }
- /*
- * Returns non-zero if a page (under migration) has valid page_cgroup member.
- * Refcnt of page_cgroup is incremented.
- */
- int mem_cgroup_prepare_migration(struct page *page)
- {
- struct page_cgroup *pc;
- int ret = 0;
- lock_page_cgroup(page);
- pc = page_get_page_cgroup(page);
- if (pc && atomic_inc_not_zero(&pc->ref_cnt))
- ret = 1;
- unlock_page_cgroup(page);
- return ret;
- }
- void mem_cgroup_end_migration(struct page *page)
- {
- struct page_cgroup *pc;
- lock_page_cgroup(page);
- pc = page_get_page_cgroup(page);
- mem_cgroup_uncharge(pc);
- unlock_page_cgroup(page);
- }
- /*
- * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
- * And no race with uncharge() routines because page_cgroup for *page*
- * has extra one reference by mem_cgroup_prepare_migration.
- */
- void mem_cgroup_page_migration(struct page *page, struct page *newpage)
- {
- struct page_cgroup *pc;
- struct mem_cgroup *mem;
- unsigned long flags;
- struct mem_cgroup_per_zone *mz;
- retry:
- pc = page_get_page_cgroup(page);
- if (!pc)
- return;
- mem = pc->mem_cgroup;
- mz = page_cgroup_zoneinfo(pc);
- if (clear_page_cgroup(page, pc) != pc)
- goto retry;
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_remove_list(pc);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- pc->page = newpage;
- lock_page_cgroup(newpage);
- page_assign_page_cgroup(newpage, pc);
- unlock_page_cgroup(newpage);
- mz = page_cgroup_zoneinfo(pc);
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_add_list(pc);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- return;
- }
- /*
- * This routine traverse page_cgroup in given list and drop them all.
- * This routine ignores page_cgroup->ref_cnt.
- * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
- */
- #define FORCE_UNCHARGE_BATCH (128)
- static void
- mem_cgroup_force_empty_list(struct mem_cgroup *mem,
- struct mem_cgroup_per_zone *mz,
- int active)
- {
- struct page_cgroup *pc;
- struct page *page;
- int count;
- unsigned long flags;
- struct list_head *list;
- if (active)
- list = &mz->active_list;
- else
- list = &mz->inactive_list;
- if (list_empty(list))
- return;
- retry:
- count = FORCE_UNCHARGE_BATCH;
- spin_lock_irqsave(&mz->lru_lock, flags);
- while (--count && !list_empty(list)) {
- pc = list_entry(list->prev, struct page_cgroup, lru);
- page = pc->page;
- /* Avoid race with charge */
- atomic_set(&pc->ref_cnt, 0);
- if (clear_page_cgroup(page, pc) == pc) {
- css_put(&mem->css);
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- __mem_cgroup_remove_list(pc);
- kfree(pc);
- } else /* being uncharged ? ...do relax */
- break;
- }
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- if (!list_empty(list)) {
- cond_resched();
- goto retry;
- }
- return;
- }
- /*
- * make mem_cgroup's charge to be 0 if there is no task.
- * This enables deleting this mem_cgroup.
- */
- int mem_cgroup_force_empty(struct mem_cgroup *mem)
- {
- int ret = -EBUSY;
- int node, zid;
- css_get(&mem->css);
- /*
- * page reclaim code (kswapd etc..) will move pages between
- ` * active_list <-> inactive_list while we don't take a lock.
- * So, we have to do loop here until all lists are empty.
- */
- while (mem->res.usage > 0) {
- if (atomic_read(&mem->css.cgroup->count) > 0)
- goto out;
- for_each_node_state(node, N_POSSIBLE)
- for (zid = 0; zid < MAX_NR_ZONES; zid++) {
- struct mem_cgroup_per_zone *mz;
- mz = mem_cgroup_zoneinfo(mem, node, zid);
- /* drop all page_cgroup in active_list */
- mem_cgroup_force_empty_list(mem, mz, 1);
- /* drop all page_cgroup in inactive_list */
- mem_cgroup_force_empty_list(mem, mz, 0);
- }
- }
- ret = 0;
- out:
- css_put(&mem->css);
- return ret;
- }
- int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
- {
- *tmp = memparse(buf, &buf);
- if (*buf != '\0')
- return -EINVAL;
- /*
- * Round up the value to the closest page size
- */
- *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
- return 0;
- }
- static ssize_t mem_cgroup_read(struct cgroup *cont,
- struct cftype *cft, struct file *file,
- char __user *userbuf, size_t nbytes, loff_t *ppos)
- {
- return res_counter_read(&mem_cgroup_from_cont(cont)->res,
- cft->private, userbuf, nbytes, ppos,
- NULL);
- }
- static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
- struct file *file, const char __user *userbuf,
- size_t nbytes, loff_t *ppos)
- {
- return res_counter_write(&mem_cgroup_from_cont(cont)->res,
- cft->private, userbuf, nbytes, ppos,
- mem_cgroup_write_strategy);
- }
- static ssize_t mem_force_empty_write(struct cgroup *cont,
- struct cftype *cft, struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *ppos)
- {
- struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- int ret;
- ret = mem_cgroup_force_empty(mem);
- if (!ret)
- ret = nbytes;
- return ret;
- }
- /*
- * Note: This should be removed if cgroup supports write-only file.
- */
- static ssize_t mem_force_empty_read(struct cgroup *cont,
- struct cftype *cft,
- struct file *file, char __user *userbuf,
- size_t nbytes, loff_t *ppos)
- {
- return -EINVAL;
- }
- static const struct mem_cgroup_stat_desc {
- const char *msg;
- u64 unit;
- } mem_cgroup_stat_desc[] = {
- [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
- [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
- };
- static int mem_control_stat_show(struct seq_file *m, void *arg)
- {
- struct cgroup *cont = m->private;
- struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
- struct mem_cgroup_stat *stat = &mem_cont->stat;
- int i;
- for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
- s64 val;
- val = mem_cgroup_read_stat(stat, i);
- val *= mem_cgroup_stat_desc[i].unit;
- seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
- (long long)val);
- }
- /* showing # of active pages */
- {
- unsigned long active, inactive;
- inactive = mem_cgroup_get_all_zonestat(mem_cont,
- MEM_CGROUP_ZSTAT_INACTIVE);
- active = mem_cgroup_get_all_zonestat(mem_cont,
- MEM_CGROUP_ZSTAT_ACTIVE);
- seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
- seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
- }
- return 0;
- }
- static const struct file_operations mem_control_stat_file_operations = {
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- static int mem_control_stat_open(struct inode *unused, struct file *file)
- {
- /* XXX __d_cont */
- struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
- file->f_op = &mem_control_stat_file_operations;
- return single_open(file, mem_control_stat_show, cont);
- }
- static struct cftype mem_cgroup_files[] = {
- {
- .name = "usage_in_bytes",
- .private = RES_USAGE,
- .read = mem_cgroup_read,
- },
- {
- .name = "limit_in_bytes",
- .private = RES_LIMIT,
- .write = mem_cgroup_write,
- .read = mem_cgroup_read,
- },
- {
- .name = "failcnt",
- .private = RES_FAILCNT,
- .read = mem_cgroup_read,
- },
- {
- .name = "force_empty",
- .write = mem_force_empty_write,
- .read = mem_force_empty_read,
- },
- {
- .name = "stat",
- .open = mem_control_stat_open,
- },
- };
- static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
- {
- struct mem_cgroup_per_node *pn;
- struct mem_cgroup_per_zone *mz;
- int zone;
- /*
- * This routine is called against possible nodes.
- * But it's BUG to call kmalloc() against offline node.
- *
- * TODO: this routine can waste much memory for nodes which will
- * never be onlined. It's better to use memory hotplug callback
- * function.
- */
- if (node_state(node, N_HIGH_MEMORY))
- pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
- else
- pn = kmalloc(sizeof(*pn), GFP_KERNEL);
- if (!pn)
- return 1;
- mem->info.nodeinfo[node] = pn;
- memset(pn, 0, sizeof(*pn));
- for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- mz = &pn->zoneinfo[zone];
- INIT_LIST_HEAD(&mz->active_list);
- INIT_LIST_HEAD(&mz->inactive_list);
- spin_lock_init(&mz->lru_lock);
- }
- return 0;
- }
- static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
- {
- kfree(mem->info.nodeinfo[node]);
- }
- static struct mem_cgroup init_mem_cgroup;
- static struct cgroup_subsys_state *
- mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
- {
- struct mem_cgroup *mem;
- int node;
- if (unlikely((cont->parent) == NULL)) {
- mem = &init_mem_cgroup;
- init_mm.mem_cgroup = mem;
- } else
- mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
- if (mem == NULL)
- return ERR_PTR(-ENOMEM);
- res_counter_init(&mem->res);
- memset(&mem->info, 0, sizeof(mem->info));
- for_each_node_state(node, N_POSSIBLE)
- if (alloc_mem_cgroup_per_zone_info(mem, node))
- goto free_out;
- return &mem->css;
- free_out:
- for_each_node_state(node, N_POSSIBLE)
- free_mem_cgroup_per_zone_info(mem, node);
- if (cont->parent != NULL)
- kfree(mem);
- return ERR_PTR(-ENOMEM);
- }
- static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
- {
- struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- mem_cgroup_force_empty(mem);
- }
- static void mem_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
- {
- int node;
- struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- for_each_node_state(node, N_POSSIBLE)
- free_mem_cgroup_per_zone_info(mem, node);
- kfree(mem_cgroup_from_cont(cont));
- }
- static int mem_cgroup_populate(struct cgroup_subsys *ss,
- struct cgroup *cont)
- {
- return cgroup_add_files(cont, ss, mem_cgroup_files,
- ARRAY_SIZE(mem_cgroup_files));
- }
- static void mem_cgroup_move_task(struct cgroup_subsys *ss,
- struct cgroup *cont,
- struct cgroup *old_cont,
- struct task_struct *p)
- {
- struct mm_struct *mm;
- struct mem_cgroup *mem, *old_mem;
- mm = get_task_mm(p);
- if (mm == NULL)
- return;
- mem = mem_cgroup_from_cont(cont);
- old_mem = mem_cgroup_from_cont(old_cont);
- if (mem == old_mem)
- goto out;
- /*
- * Only thread group leaders are allowed to migrate, the mm_struct is
- * in effect owned by the leader
- */
- if (p->tgid != p->pid)
- goto out;
- css_get(&mem->css);
- rcu_assign_pointer(mm->mem_cgroup, mem);
- css_put(&old_mem->css);
- out:
- mmput(mm);
- return;
- }
- struct cgroup_subsys mem_cgroup_subsys = {
- .name = "memory",
- .subsys_id = mem_cgroup_subsys_id,
- .create = mem_cgroup_create,
- .pre_destroy = mem_cgroup_pre_destroy,
- .destroy = mem_cgroup_destroy,
- .populate = mem_cgroup_populate,
- .attach = mem_cgroup_move_task,
- .early_init = 0,
- };
|