memcontrol.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175
  1. /* memcontrol.c - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/res_counter.h>
  20. #include <linux/memcontrol.h>
  21. #include <linux/cgroup.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/page-flags.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/bit_spinlock.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/slab.h>
  29. #include <linux/swap.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/fs.h>
  32. #include <linux/seq_file.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/mm_inline.h>
  35. #include <linux/page_cgroup.h>
  36. #include <asm/uaccess.h>
  37. struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  38. #define MEM_CGROUP_RECLAIM_RETRIES 5
  39. /*
  40. * Statistics for memory cgroup.
  41. */
  42. enum mem_cgroup_stat_index {
  43. /*
  44. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  45. */
  46. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  47. MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
  48. MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
  49. MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
  50. MEM_CGROUP_STAT_NSTATS,
  51. };
  52. struct mem_cgroup_stat_cpu {
  53. s64 count[MEM_CGROUP_STAT_NSTATS];
  54. } ____cacheline_aligned_in_smp;
  55. struct mem_cgroup_stat {
  56. struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
  57. };
  58. /*
  59. * For accounting under irq disable, no need for increment preempt count.
  60. */
  61. static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
  62. enum mem_cgroup_stat_index idx, int val)
  63. {
  64. stat->count[idx] += val;
  65. }
  66. static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
  67. enum mem_cgroup_stat_index idx)
  68. {
  69. int cpu;
  70. s64 ret = 0;
  71. for_each_possible_cpu(cpu)
  72. ret += stat->cpustat[cpu].count[idx];
  73. return ret;
  74. }
  75. /*
  76. * per-zone information in memory controller.
  77. */
  78. struct mem_cgroup_per_zone {
  79. /*
  80. * spin_lock to protect the per cgroup LRU
  81. */
  82. spinlock_t lru_lock;
  83. struct list_head lists[NR_LRU_LISTS];
  84. unsigned long count[NR_LRU_LISTS];
  85. };
  86. /* Macro for accessing counter */
  87. #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
  88. struct mem_cgroup_per_node {
  89. struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  90. };
  91. struct mem_cgroup_lru_info {
  92. struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
  93. };
  94. /*
  95. * The memory controller data structure. The memory controller controls both
  96. * page cache and RSS per cgroup. We would eventually like to provide
  97. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  98. * to help the administrator determine what knobs to tune.
  99. *
  100. * TODO: Add a water mark for the memory controller. Reclaim will begin when
  101. * we hit the water mark. May be even add a low water mark, such that
  102. * no reclaim occurs from a cgroup at it's low water mark, this is
  103. * a feature that will be implemented much later in the future.
  104. */
  105. struct mem_cgroup {
  106. struct cgroup_subsys_state css;
  107. /*
  108. * the counter to account for memory usage
  109. */
  110. struct res_counter res;
  111. /*
  112. * Per cgroup active and inactive list, similar to the
  113. * per zone LRU lists.
  114. */
  115. struct mem_cgroup_lru_info info;
  116. int prev_priority; /* for recording reclaim priority */
  117. /*
  118. * statistics.
  119. */
  120. struct mem_cgroup_stat stat;
  121. };
  122. static struct mem_cgroup init_mem_cgroup;
  123. enum charge_type {
  124. MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  125. MEM_CGROUP_CHARGE_TYPE_MAPPED,
  126. MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
  127. MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
  128. NR_CHARGE_TYPE,
  129. };
  130. /* only for here (for easy reading.) */
  131. #define PCGF_CACHE (1UL << PCG_CACHE)
  132. #define PCGF_USED (1UL << PCG_USED)
  133. #define PCGF_ACTIVE (1UL << PCG_ACTIVE)
  134. #define PCGF_LOCK (1UL << PCG_LOCK)
  135. #define PCGF_FILE (1UL << PCG_FILE)
  136. static const unsigned long
  137. pcg_default_flags[NR_CHARGE_TYPE] = {
  138. PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
  139. PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
  140. PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
  141. 0, /* FORCE */
  142. };
  143. /*
  144. * Always modified under lru lock. Then, not necessary to preempt_disable()
  145. */
  146. static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
  147. struct page_cgroup *pc,
  148. bool charge)
  149. {
  150. int val = (charge)? 1 : -1;
  151. struct mem_cgroup_stat *stat = &mem->stat;
  152. struct mem_cgroup_stat_cpu *cpustat;
  153. VM_BUG_ON(!irqs_disabled());
  154. cpustat = &stat->cpustat[smp_processor_id()];
  155. if (PageCgroupCache(pc))
  156. __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
  157. else
  158. __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
  159. if (charge)
  160. __mem_cgroup_stat_add_safe(cpustat,
  161. MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
  162. else
  163. __mem_cgroup_stat_add_safe(cpustat,
  164. MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
  165. }
  166. static struct mem_cgroup_per_zone *
  167. mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
  168. {
  169. return &mem->info.nodeinfo[nid]->zoneinfo[zid];
  170. }
  171. static struct mem_cgroup_per_zone *
  172. page_cgroup_zoneinfo(struct page_cgroup *pc)
  173. {
  174. struct mem_cgroup *mem = pc->mem_cgroup;
  175. int nid = page_cgroup_nid(pc);
  176. int zid = page_cgroup_zid(pc);
  177. return mem_cgroup_zoneinfo(mem, nid, zid);
  178. }
  179. static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
  180. enum lru_list idx)
  181. {
  182. int nid, zid;
  183. struct mem_cgroup_per_zone *mz;
  184. u64 total = 0;
  185. for_each_online_node(nid)
  186. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  187. mz = mem_cgroup_zoneinfo(mem, nid, zid);
  188. total += MEM_CGROUP_ZSTAT(mz, idx);
  189. }
  190. return total;
  191. }
  192. static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
  193. {
  194. return container_of(cgroup_subsys_state(cont,
  195. mem_cgroup_subsys_id), struct mem_cgroup,
  196. css);
  197. }
  198. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
  199. {
  200. /*
  201. * mm_update_next_owner() may clear mm->owner to NULL
  202. * if it races with swapoff, page migration, etc.
  203. * So this can be called with p == NULL.
  204. */
  205. if (unlikely(!p))
  206. return NULL;
  207. return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
  208. struct mem_cgroup, css);
  209. }
  210. static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
  211. struct page_cgroup *pc)
  212. {
  213. int lru = LRU_BASE;
  214. if (PageCgroupUnevictable(pc))
  215. lru = LRU_UNEVICTABLE;
  216. else {
  217. if (PageCgroupActive(pc))
  218. lru += LRU_ACTIVE;
  219. if (PageCgroupFile(pc))
  220. lru += LRU_FILE;
  221. }
  222. MEM_CGROUP_ZSTAT(mz, lru) -= 1;
  223. mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
  224. list_del(&pc->lru);
  225. }
  226. static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
  227. struct page_cgroup *pc)
  228. {
  229. int lru = LRU_BASE;
  230. if (PageCgroupUnevictable(pc))
  231. lru = LRU_UNEVICTABLE;
  232. else {
  233. if (PageCgroupActive(pc))
  234. lru += LRU_ACTIVE;
  235. if (PageCgroupFile(pc))
  236. lru += LRU_FILE;
  237. }
  238. MEM_CGROUP_ZSTAT(mz, lru) += 1;
  239. list_add(&pc->lru, &mz->lists[lru]);
  240. mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
  241. }
  242. static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
  243. {
  244. struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
  245. int active = PageCgroupActive(pc);
  246. int file = PageCgroupFile(pc);
  247. int unevictable = PageCgroupUnevictable(pc);
  248. enum lru_list from = unevictable ? LRU_UNEVICTABLE :
  249. (LRU_FILE * !!file + !!active);
  250. if (lru == from)
  251. return;
  252. MEM_CGROUP_ZSTAT(mz, from) -= 1;
  253. /*
  254. * However this is done under mz->lru_lock, another flags, which
  255. * are not related to LRU, will be modified from out-of-lock.
  256. * We have to use atomic set/clear flags.
  257. */
  258. if (is_unevictable_lru(lru)) {
  259. ClearPageCgroupActive(pc);
  260. SetPageCgroupUnevictable(pc);
  261. } else {
  262. if (is_active_lru(lru))
  263. SetPageCgroupActive(pc);
  264. else
  265. ClearPageCgroupActive(pc);
  266. ClearPageCgroupUnevictable(pc);
  267. }
  268. MEM_CGROUP_ZSTAT(mz, lru) += 1;
  269. list_move(&pc->lru, &mz->lists[lru]);
  270. }
  271. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
  272. {
  273. int ret;
  274. task_lock(task);
  275. ret = task->mm && mm_match_cgroup(task->mm, mem);
  276. task_unlock(task);
  277. return ret;
  278. }
  279. /*
  280. * This routine assumes that the appropriate zone's lru lock is already held
  281. */
  282. void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
  283. {
  284. struct page_cgroup *pc;
  285. struct mem_cgroup_per_zone *mz;
  286. unsigned long flags;
  287. if (mem_cgroup_subsys.disabled)
  288. return;
  289. /*
  290. * We cannot lock_page_cgroup while holding zone's lru_lock,
  291. * because other holders of lock_page_cgroup can be interrupted
  292. * with an attempt to rotate_reclaimable_page. But we cannot
  293. * safely get to page_cgroup without it, so just try_lock it:
  294. * mem_cgroup_isolate_pages allows for page left on wrong list.
  295. */
  296. pc = lookup_page_cgroup(page);
  297. if (!trylock_page_cgroup(pc))
  298. return;
  299. if (pc && PageCgroupUsed(pc)) {
  300. mz = page_cgroup_zoneinfo(pc);
  301. spin_lock_irqsave(&mz->lru_lock, flags);
  302. __mem_cgroup_move_lists(pc, lru);
  303. spin_unlock_irqrestore(&mz->lru_lock, flags);
  304. }
  305. unlock_page_cgroup(pc);
  306. }
  307. /*
  308. * Calculate mapped_ratio under memory controller. This will be used in
  309. * vmscan.c for deteremining we have to reclaim mapped pages.
  310. */
  311. int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
  312. {
  313. long total, rss;
  314. /*
  315. * usage is recorded in bytes. But, here, we assume the number of
  316. * physical pages can be represented by "long" on any arch.
  317. */
  318. total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
  319. rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
  320. return (int)((rss * 100L) / total);
  321. }
  322. /*
  323. * prev_priority control...this will be used in memory reclaim path.
  324. */
  325. int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
  326. {
  327. return mem->prev_priority;
  328. }
  329. void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
  330. {
  331. if (priority < mem->prev_priority)
  332. mem->prev_priority = priority;
  333. }
  334. void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
  335. {
  336. mem->prev_priority = priority;
  337. }
  338. /*
  339. * Calculate # of pages to be scanned in this priority/zone.
  340. * See also vmscan.c
  341. *
  342. * priority starts from "DEF_PRIORITY" and decremented in each loop.
  343. * (see include/linux/mmzone.h)
  344. */
  345. long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
  346. int priority, enum lru_list lru)
  347. {
  348. long nr_pages;
  349. int nid = zone->zone_pgdat->node_id;
  350. int zid = zone_idx(zone);
  351. struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
  352. nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
  353. return (nr_pages >> priority);
  354. }
  355. unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  356. struct list_head *dst,
  357. unsigned long *scanned, int order,
  358. int mode, struct zone *z,
  359. struct mem_cgroup *mem_cont,
  360. int active, int file)
  361. {
  362. unsigned long nr_taken = 0;
  363. struct page *page;
  364. unsigned long scan;
  365. LIST_HEAD(pc_list);
  366. struct list_head *src;
  367. struct page_cgroup *pc, *tmp;
  368. int nid = z->zone_pgdat->node_id;
  369. int zid = zone_idx(z);
  370. struct mem_cgroup_per_zone *mz;
  371. int lru = LRU_FILE * !!file + !!active;
  372. BUG_ON(!mem_cont);
  373. mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
  374. src = &mz->lists[lru];
  375. spin_lock(&mz->lru_lock);
  376. scan = 0;
  377. list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
  378. if (scan >= nr_to_scan)
  379. break;
  380. if (unlikely(!PageCgroupUsed(pc)))
  381. continue;
  382. page = pc->page;
  383. if (unlikely(!PageLRU(page)))
  384. continue;
  385. /*
  386. * TODO: play better with lumpy reclaim, grabbing anything.
  387. */
  388. if (PageUnevictable(page) ||
  389. (PageActive(page) && !active) ||
  390. (!PageActive(page) && active)) {
  391. __mem_cgroup_move_lists(pc, page_lru(page));
  392. continue;
  393. }
  394. scan++;
  395. list_move(&pc->lru, &pc_list);
  396. if (__isolate_lru_page(page, mode, file) == 0) {
  397. list_move(&page->lru, dst);
  398. nr_taken++;
  399. }
  400. }
  401. list_splice(&pc_list, src);
  402. spin_unlock(&mz->lru_lock);
  403. *scanned = scan;
  404. return nr_taken;
  405. }
  406. /*
  407. * Charge the memory controller for page usage.
  408. * Return
  409. * 0 if the charge was successful
  410. * < 0 if the cgroup is over its limit
  411. */
  412. static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
  413. gfp_t gfp_mask, enum charge_type ctype,
  414. struct mem_cgroup *memcg)
  415. {
  416. struct mem_cgroup *mem;
  417. struct page_cgroup *pc;
  418. unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
  419. struct mem_cgroup_per_zone *mz;
  420. unsigned long flags;
  421. pc = lookup_page_cgroup(page);
  422. /* can happen at boot */
  423. if (unlikely(!pc))
  424. return 0;
  425. prefetchw(pc);
  426. /*
  427. * We always charge the cgroup the mm_struct belongs to.
  428. * The mm_struct's mem_cgroup changes on task migration if the
  429. * thread group leader migrates. It's possible that mm is not
  430. * set, if so charge the init_mm (happens for pagecache usage).
  431. */
  432. if (likely(!memcg)) {
  433. rcu_read_lock();
  434. mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  435. if (unlikely(!mem)) {
  436. rcu_read_unlock();
  437. return 0;
  438. }
  439. /*
  440. * For every charge from the cgroup, increment reference count
  441. */
  442. css_get(&mem->css);
  443. rcu_read_unlock();
  444. } else {
  445. mem = memcg;
  446. css_get(&memcg->css);
  447. }
  448. while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
  449. if (!(gfp_mask & __GFP_WAIT))
  450. goto out;
  451. if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
  452. continue;
  453. /*
  454. * try_to_free_mem_cgroup_pages() might not give us a full
  455. * picture of reclaim. Some pages are reclaimed and might be
  456. * moved to swap cache or just unmapped from the cgroup.
  457. * Check the limit again to see if the reclaim reduced the
  458. * current usage of the cgroup before giving up
  459. */
  460. if (res_counter_check_under_limit(&mem->res))
  461. continue;
  462. if (!nr_retries--) {
  463. mem_cgroup_out_of_memory(mem, gfp_mask);
  464. goto out;
  465. }
  466. }
  467. lock_page_cgroup(pc);
  468. if (unlikely(PageCgroupUsed(pc))) {
  469. unlock_page_cgroup(pc);
  470. res_counter_uncharge(&mem->res, PAGE_SIZE);
  471. css_put(&mem->css);
  472. goto done;
  473. }
  474. pc->mem_cgroup = mem;
  475. /*
  476. * If a page is accounted as a page cache, insert to inactive list.
  477. * If anon, insert to active list.
  478. */
  479. pc->flags = pcg_default_flags[ctype];
  480. mz = page_cgroup_zoneinfo(pc);
  481. spin_lock_irqsave(&mz->lru_lock, flags);
  482. __mem_cgroup_add_list(mz, pc);
  483. spin_unlock_irqrestore(&mz->lru_lock, flags);
  484. unlock_page_cgroup(pc);
  485. done:
  486. return 0;
  487. out:
  488. css_put(&mem->css);
  489. return -ENOMEM;
  490. }
  491. int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
  492. {
  493. if (mem_cgroup_subsys.disabled)
  494. return 0;
  495. if (PageCompound(page))
  496. return 0;
  497. /*
  498. * If already mapped, we don't have to account.
  499. * If page cache, page->mapping has address_space.
  500. * But page->mapping may have out-of-use anon_vma pointer,
  501. * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
  502. * is NULL.
  503. */
  504. if (page_mapped(page) || (page->mapping && !PageAnon(page)))
  505. return 0;
  506. if (unlikely(!mm))
  507. mm = &init_mm;
  508. return mem_cgroup_charge_common(page, mm, gfp_mask,
  509. MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
  510. }
  511. int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  512. gfp_t gfp_mask)
  513. {
  514. if (mem_cgroup_subsys.disabled)
  515. return 0;
  516. if (PageCompound(page))
  517. return 0;
  518. /*
  519. * Corner case handling. This is called from add_to_page_cache()
  520. * in usual. But some FS (shmem) precharges this page before calling it
  521. * and call add_to_page_cache() with GFP_NOWAIT.
  522. *
  523. * For GFP_NOWAIT case, the page may be pre-charged before calling
  524. * add_to_page_cache(). (See shmem.c) check it here and avoid to call
  525. * charge twice. (It works but has to pay a bit larger cost.)
  526. */
  527. if (!(gfp_mask & __GFP_WAIT)) {
  528. struct page_cgroup *pc;
  529. pc = lookup_page_cgroup(page);
  530. if (!pc)
  531. return 0;
  532. lock_page_cgroup(pc);
  533. if (PageCgroupUsed(pc)) {
  534. unlock_page_cgroup(pc);
  535. return 0;
  536. }
  537. unlock_page_cgroup(pc);
  538. }
  539. if (unlikely(!mm))
  540. mm = &init_mm;
  541. if (page_is_file_cache(page))
  542. return mem_cgroup_charge_common(page, mm, gfp_mask,
  543. MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
  544. else
  545. return mem_cgroup_charge_common(page, mm, gfp_mask,
  546. MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
  547. }
  548. /*
  549. * uncharge if !page_mapped(page)
  550. */
  551. static void
  552. __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
  553. {
  554. struct page_cgroup *pc;
  555. struct mem_cgroup *mem;
  556. struct mem_cgroup_per_zone *mz;
  557. unsigned long flags;
  558. if (mem_cgroup_subsys.disabled)
  559. return;
  560. /*
  561. * Check if our page_cgroup is valid
  562. */
  563. pc = lookup_page_cgroup(page);
  564. if (unlikely(!pc || !PageCgroupUsed(pc)))
  565. return;
  566. lock_page_cgroup(pc);
  567. if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
  568. || !PageCgroupUsed(pc)) {
  569. /* This happens at race in zap_pte_range() and do_swap_page()*/
  570. unlock_page_cgroup(pc);
  571. return;
  572. }
  573. ClearPageCgroupUsed(pc);
  574. mem = pc->mem_cgroup;
  575. mz = page_cgroup_zoneinfo(pc);
  576. spin_lock_irqsave(&mz->lru_lock, flags);
  577. __mem_cgroup_remove_list(mz, pc);
  578. spin_unlock_irqrestore(&mz->lru_lock, flags);
  579. unlock_page_cgroup(pc);
  580. res_counter_uncharge(&mem->res, PAGE_SIZE);
  581. css_put(&mem->css);
  582. return;
  583. }
  584. void mem_cgroup_uncharge_page(struct page *page)
  585. {
  586. /* early check. */
  587. if (page_mapped(page))
  588. return;
  589. if (page->mapping && !PageAnon(page))
  590. return;
  591. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
  592. }
  593. void mem_cgroup_uncharge_cache_page(struct page *page)
  594. {
  595. VM_BUG_ON(page_mapped(page));
  596. VM_BUG_ON(page->mapping);
  597. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
  598. }
  599. /*
  600. * Before starting migration, account against new page.
  601. */
  602. int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
  603. {
  604. struct page_cgroup *pc;
  605. struct mem_cgroup *mem = NULL;
  606. enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
  607. int ret = 0;
  608. if (mem_cgroup_subsys.disabled)
  609. return 0;
  610. pc = lookup_page_cgroup(page);
  611. lock_page_cgroup(pc);
  612. if (PageCgroupUsed(pc)) {
  613. mem = pc->mem_cgroup;
  614. css_get(&mem->css);
  615. if (PageCgroupCache(pc)) {
  616. if (page_is_file_cache(page))
  617. ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  618. else
  619. ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
  620. }
  621. }
  622. unlock_page_cgroup(pc);
  623. if (mem) {
  624. ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
  625. ctype, mem);
  626. css_put(&mem->css);
  627. }
  628. return ret;
  629. }
  630. /* remove redundant charge if migration failed*/
  631. void mem_cgroup_end_migration(struct page *newpage)
  632. {
  633. /*
  634. * At success, page->mapping is not NULL.
  635. * special rollback care is necessary when
  636. * 1. at migration failure. (newpage->mapping is cleared in this case)
  637. * 2. the newpage was moved but not remapped again because the task
  638. * exits and the newpage is obsolete. In this case, the new page
  639. * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
  640. * always for avoiding mess. The page_cgroup will be removed if
  641. * unnecessary. File cache pages is still on radix-tree. Don't
  642. * care it.
  643. */
  644. if (!newpage->mapping)
  645. __mem_cgroup_uncharge_common(newpage,
  646. MEM_CGROUP_CHARGE_TYPE_FORCE);
  647. else if (PageAnon(newpage))
  648. mem_cgroup_uncharge_page(newpage);
  649. }
  650. /*
  651. * A call to try to shrink memory usage under specified resource controller.
  652. * This is typically used for page reclaiming for shmem for reducing side
  653. * effect of page allocation from shmem, which is used by some mem_cgroup.
  654. */
  655. int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
  656. {
  657. struct mem_cgroup *mem;
  658. int progress = 0;
  659. int retry = MEM_CGROUP_RECLAIM_RETRIES;
  660. if (mem_cgroup_subsys.disabled)
  661. return 0;
  662. if (!mm)
  663. return 0;
  664. rcu_read_lock();
  665. mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  666. if (unlikely(!mem)) {
  667. rcu_read_unlock();
  668. return 0;
  669. }
  670. css_get(&mem->css);
  671. rcu_read_unlock();
  672. do {
  673. progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
  674. progress += res_counter_check_under_limit(&mem->res);
  675. } while (!progress && --retry);
  676. css_put(&mem->css);
  677. if (!retry)
  678. return -ENOMEM;
  679. return 0;
  680. }
  681. static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
  682. unsigned long long val)
  683. {
  684. int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
  685. int progress;
  686. int ret = 0;
  687. while (res_counter_set_limit(&memcg->res, val)) {
  688. if (signal_pending(current)) {
  689. ret = -EINTR;
  690. break;
  691. }
  692. if (!retry_count) {
  693. ret = -EBUSY;
  694. break;
  695. }
  696. progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
  697. if (!progress)
  698. retry_count--;
  699. }
  700. return ret;
  701. }
  702. /*
  703. * This routine traverse page_cgroup in given list and drop them all.
  704. * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  705. */
  706. #define FORCE_UNCHARGE_BATCH (128)
  707. static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
  708. struct mem_cgroup_per_zone *mz,
  709. enum lru_list lru)
  710. {
  711. struct page_cgroup *pc;
  712. struct page *page;
  713. int count = FORCE_UNCHARGE_BATCH;
  714. unsigned long flags;
  715. struct list_head *list;
  716. list = &mz->lists[lru];
  717. spin_lock_irqsave(&mz->lru_lock, flags);
  718. while (!list_empty(list)) {
  719. pc = list_entry(list->prev, struct page_cgroup, lru);
  720. page = pc->page;
  721. if (!PageCgroupUsed(pc))
  722. break;
  723. get_page(page);
  724. spin_unlock_irqrestore(&mz->lru_lock, flags);
  725. /*
  726. * Check if this page is on LRU. !LRU page can be found
  727. * if it's under page migration.
  728. */
  729. if (PageLRU(page)) {
  730. __mem_cgroup_uncharge_common(page,
  731. MEM_CGROUP_CHARGE_TYPE_FORCE);
  732. put_page(page);
  733. if (--count <= 0) {
  734. count = FORCE_UNCHARGE_BATCH;
  735. cond_resched();
  736. }
  737. } else {
  738. spin_lock_irqsave(&mz->lru_lock, flags);
  739. break;
  740. }
  741. spin_lock_irqsave(&mz->lru_lock, flags);
  742. }
  743. spin_unlock_irqrestore(&mz->lru_lock, flags);
  744. }
  745. /*
  746. * make mem_cgroup's charge to be 0 if there is no task.
  747. * This enables deleting this mem_cgroup.
  748. */
  749. static int mem_cgroup_force_empty(struct mem_cgroup *mem)
  750. {
  751. int ret = -EBUSY;
  752. int node, zid;
  753. css_get(&mem->css);
  754. /*
  755. * page reclaim code (kswapd etc..) will move pages between
  756. * active_list <-> inactive_list while we don't take a lock.
  757. * So, we have to do loop here until all lists are empty.
  758. */
  759. while (mem->res.usage > 0) {
  760. if (atomic_read(&mem->css.cgroup->count) > 0)
  761. goto out;
  762. /* This is for making all *used* pages to be on LRU. */
  763. lru_add_drain_all();
  764. for_each_node_state(node, N_POSSIBLE)
  765. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  766. struct mem_cgroup_per_zone *mz;
  767. enum lru_list l;
  768. mz = mem_cgroup_zoneinfo(mem, node, zid);
  769. for_each_lru(l)
  770. mem_cgroup_force_empty_list(mem, mz, l);
  771. }
  772. cond_resched();
  773. }
  774. ret = 0;
  775. out:
  776. css_put(&mem->css);
  777. return ret;
  778. }
  779. static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
  780. {
  781. return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
  782. cft->private);
  783. }
  784. /*
  785. * The user of this function is...
  786. * RES_LIMIT.
  787. */
  788. static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  789. const char *buffer)
  790. {
  791. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  792. unsigned long long val;
  793. int ret;
  794. switch (cft->private) {
  795. case RES_LIMIT:
  796. /* This function does all necessary parse...reuse it */
  797. ret = res_counter_memparse_write_strategy(buffer, &val);
  798. if (!ret)
  799. ret = mem_cgroup_resize_limit(memcg, val);
  800. break;
  801. default:
  802. ret = -EINVAL; /* should be BUG() ? */
  803. break;
  804. }
  805. return ret;
  806. }
  807. static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  808. {
  809. struct mem_cgroup *mem;
  810. mem = mem_cgroup_from_cont(cont);
  811. switch (event) {
  812. case RES_MAX_USAGE:
  813. res_counter_reset_max(&mem->res);
  814. break;
  815. case RES_FAILCNT:
  816. res_counter_reset_failcnt(&mem->res);
  817. break;
  818. }
  819. return 0;
  820. }
  821. static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
  822. {
  823. return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
  824. }
  825. static const struct mem_cgroup_stat_desc {
  826. const char *msg;
  827. u64 unit;
  828. } mem_cgroup_stat_desc[] = {
  829. [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
  830. [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
  831. [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
  832. [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
  833. };
  834. static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
  835. struct cgroup_map_cb *cb)
  836. {
  837. struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
  838. struct mem_cgroup_stat *stat = &mem_cont->stat;
  839. int i;
  840. for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
  841. s64 val;
  842. val = mem_cgroup_read_stat(stat, i);
  843. val *= mem_cgroup_stat_desc[i].unit;
  844. cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
  845. }
  846. /* showing # of active pages */
  847. {
  848. unsigned long active_anon, inactive_anon;
  849. unsigned long active_file, inactive_file;
  850. unsigned long unevictable;
  851. inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
  852. LRU_INACTIVE_ANON);
  853. active_anon = mem_cgroup_get_all_zonestat(mem_cont,
  854. LRU_ACTIVE_ANON);
  855. inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
  856. LRU_INACTIVE_FILE);
  857. active_file = mem_cgroup_get_all_zonestat(mem_cont,
  858. LRU_ACTIVE_FILE);
  859. unevictable = mem_cgroup_get_all_zonestat(mem_cont,
  860. LRU_UNEVICTABLE);
  861. cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
  862. cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
  863. cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
  864. cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
  865. cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
  866. }
  867. return 0;
  868. }
  869. static struct cftype mem_cgroup_files[] = {
  870. {
  871. .name = "usage_in_bytes",
  872. .private = RES_USAGE,
  873. .read_u64 = mem_cgroup_read,
  874. },
  875. {
  876. .name = "max_usage_in_bytes",
  877. .private = RES_MAX_USAGE,
  878. .trigger = mem_cgroup_reset,
  879. .read_u64 = mem_cgroup_read,
  880. },
  881. {
  882. .name = "limit_in_bytes",
  883. .private = RES_LIMIT,
  884. .write_string = mem_cgroup_write,
  885. .read_u64 = mem_cgroup_read,
  886. },
  887. {
  888. .name = "failcnt",
  889. .private = RES_FAILCNT,
  890. .trigger = mem_cgroup_reset,
  891. .read_u64 = mem_cgroup_read,
  892. },
  893. {
  894. .name = "force_empty",
  895. .trigger = mem_force_empty_write,
  896. },
  897. {
  898. .name = "stat",
  899. .read_map = mem_control_stat_show,
  900. },
  901. };
  902. static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  903. {
  904. struct mem_cgroup_per_node *pn;
  905. struct mem_cgroup_per_zone *mz;
  906. enum lru_list l;
  907. int zone, tmp = node;
  908. /*
  909. * This routine is called against possible nodes.
  910. * But it's BUG to call kmalloc() against offline node.
  911. *
  912. * TODO: this routine can waste much memory for nodes which will
  913. * never be onlined. It's better to use memory hotplug callback
  914. * function.
  915. */
  916. if (!node_state(node, N_NORMAL_MEMORY))
  917. tmp = -1;
  918. pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
  919. if (!pn)
  920. return 1;
  921. mem->info.nodeinfo[node] = pn;
  922. memset(pn, 0, sizeof(*pn));
  923. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  924. mz = &pn->zoneinfo[zone];
  925. spin_lock_init(&mz->lru_lock);
  926. for_each_lru(l)
  927. INIT_LIST_HEAD(&mz->lists[l]);
  928. }
  929. return 0;
  930. }
  931. static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  932. {
  933. kfree(mem->info.nodeinfo[node]);
  934. }
  935. static struct mem_cgroup *mem_cgroup_alloc(void)
  936. {
  937. struct mem_cgroup *mem;
  938. if (sizeof(*mem) < PAGE_SIZE)
  939. mem = kmalloc(sizeof(*mem), GFP_KERNEL);
  940. else
  941. mem = vmalloc(sizeof(*mem));
  942. if (mem)
  943. memset(mem, 0, sizeof(*mem));
  944. return mem;
  945. }
  946. static void mem_cgroup_free(struct mem_cgroup *mem)
  947. {
  948. if (sizeof(*mem) < PAGE_SIZE)
  949. kfree(mem);
  950. else
  951. vfree(mem);
  952. }
  953. static struct cgroup_subsys_state *
  954. mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
  955. {
  956. struct mem_cgroup *mem;
  957. int node;
  958. if (unlikely((cont->parent) == NULL)) {
  959. mem = &init_mem_cgroup;
  960. } else {
  961. mem = mem_cgroup_alloc();
  962. if (!mem)
  963. return ERR_PTR(-ENOMEM);
  964. }
  965. res_counter_init(&mem->res);
  966. for_each_node_state(node, N_POSSIBLE)
  967. if (alloc_mem_cgroup_per_zone_info(mem, node))
  968. goto free_out;
  969. return &mem->css;
  970. free_out:
  971. for_each_node_state(node, N_POSSIBLE)
  972. free_mem_cgroup_per_zone_info(mem, node);
  973. if (cont->parent != NULL)
  974. mem_cgroup_free(mem);
  975. return ERR_PTR(-ENOMEM);
  976. }
  977. static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
  978. struct cgroup *cont)
  979. {
  980. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  981. mem_cgroup_force_empty(mem);
  982. }
  983. static void mem_cgroup_destroy(struct cgroup_subsys *ss,
  984. struct cgroup *cont)
  985. {
  986. int node;
  987. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  988. for_each_node_state(node, N_POSSIBLE)
  989. free_mem_cgroup_per_zone_info(mem, node);
  990. mem_cgroup_free(mem_cgroup_from_cont(cont));
  991. }
  992. static int mem_cgroup_populate(struct cgroup_subsys *ss,
  993. struct cgroup *cont)
  994. {
  995. return cgroup_add_files(cont, ss, mem_cgroup_files,
  996. ARRAY_SIZE(mem_cgroup_files));
  997. }
  998. static void mem_cgroup_move_task(struct cgroup_subsys *ss,
  999. struct cgroup *cont,
  1000. struct cgroup *old_cont,
  1001. struct task_struct *p)
  1002. {
  1003. struct mm_struct *mm;
  1004. struct mem_cgroup *mem, *old_mem;
  1005. mm = get_task_mm(p);
  1006. if (mm == NULL)
  1007. return;
  1008. mem = mem_cgroup_from_cont(cont);
  1009. old_mem = mem_cgroup_from_cont(old_cont);
  1010. /*
  1011. * Only thread group leaders are allowed to migrate, the mm_struct is
  1012. * in effect owned by the leader
  1013. */
  1014. if (!thread_group_leader(p))
  1015. goto out;
  1016. out:
  1017. mmput(mm);
  1018. }
  1019. struct cgroup_subsys mem_cgroup_subsys = {
  1020. .name = "memory",
  1021. .subsys_id = mem_cgroup_subsys_id,
  1022. .create = mem_cgroup_create,
  1023. .pre_destroy = mem_cgroup_pre_destroy,
  1024. .destroy = mem_cgroup_destroy,
  1025. .populate = mem_cgroup_populate,
  1026. .attach = mem_cgroup_move_task,
  1027. .early_init = 0,
  1028. };