memcontrol.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. /* memcontrol.c - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/res_counter.h>
  20. #include <linux/memcontrol.h>
  21. #include <linux/cgroup.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/page-flags.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/bit_spinlock.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/slab.h>
  29. #include <linux/swap.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/fs.h>
  32. #include <linux/seq_file.h>
  33. #include <linux/vmalloc.h>
  34. #include <asm/uaccess.h>
  35. struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  36. static struct kmem_cache *page_cgroup_cache __read_mostly;
  37. #define MEM_CGROUP_RECLAIM_RETRIES 5
  38. /*
  39. * Statistics for memory cgroup.
  40. */
  41. enum mem_cgroup_stat_index {
  42. /*
  43. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  44. */
  45. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  46. MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
  47. MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
  48. MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
  49. MEM_CGROUP_STAT_NSTATS,
  50. };
  51. struct mem_cgroup_stat_cpu {
  52. s64 count[MEM_CGROUP_STAT_NSTATS];
  53. } ____cacheline_aligned_in_smp;
  54. struct mem_cgroup_stat {
  55. struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
  56. };
  57. /*
  58. * For accounting under irq disable, no need for increment preempt count.
  59. */
  60. static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
  61. enum mem_cgroup_stat_index idx, int val)
  62. {
  63. int cpu = smp_processor_id();
  64. stat->cpustat[cpu].count[idx] += val;
  65. }
  66. static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
  67. enum mem_cgroup_stat_index idx)
  68. {
  69. int cpu;
  70. s64 ret = 0;
  71. for_each_possible_cpu(cpu)
  72. ret += stat->cpustat[cpu].count[idx];
  73. return ret;
  74. }
  75. /*
  76. * per-zone information in memory controller.
  77. */
  78. enum mem_cgroup_zstat_index {
  79. MEM_CGROUP_ZSTAT_ACTIVE,
  80. MEM_CGROUP_ZSTAT_INACTIVE,
  81. NR_MEM_CGROUP_ZSTAT,
  82. };
  83. struct mem_cgroup_per_zone {
  84. /*
  85. * spin_lock to protect the per cgroup LRU
  86. */
  87. spinlock_t lru_lock;
  88. struct list_head active_list;
  89. struct list_head inactive_list;
  90. unsigned long count[NR_MEM_CGROUP_ZSTAT];
  91. };
  92. /* Macro for accessing counter */
  93. #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
  94. struct mem_cgroup_per_node {
  95. struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  96. };
  97. struct mem_cgroup_lru_info {
  98. struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
  99. };
  100. /*
  101. * The memory controller data structure. The memory controller controls both
  102. * page cache and RSS per cgroup. We would eventually like to provide
  103. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  104. * to help the administrator determine what knobs to tune.
  105. *
  106. * TODO: Add a water mark for the memory controller. Reclaim will begin when
  107. * we hit the water mark. May be even add a low water mark, such that
  108. * no reclaim occurs from a cgroup at it's low water mark, this is
  109. * a feature that will be implemented much later in the future.
  110. */
  111. struct mem_cgroup {
  112. struct cgroup_subsys_state css;
  113. /*
  114. * the counter to account for memory usage
  115. */
  116. struct res_counter res;
  117. /*
  118. * Per cgroup active and inactive list, similar to the
  119. * per zone LRU lists.
  120. */
  121. struct mem_cgroup_lru_info info;
  122. int prev_priority; /* for recording reclaim priority */
  123. /*
  124. * statistics.
  125. */
  126. struct mem_cgroup_stat stat;
  127. };
  128. static struct mem_cgroup init_mem_cgroup;
  129. /*
  130. * We use the lower bit of the page->page_cgroup pointer as a bit spin
  131. * lock. We need to ensure that page->page_cgroup is at least two
  132. * byte aligned (based on comments from Nick Piggin). But since
  133. * bit_spin_lock doesn't actually set that lock bit in a non-debug
  134. * uniprocessor kernel, we should avoid setting it here too.
  135. */
  136. #define PAGE_CGROUP_LOCK_BIT 0x0
  137. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  138. #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
  139. #else
  140. #define PAGE_CGROUP_LOCK 0x0
  141. #endif
  142. /*
  143. * A page_cgroup page is associated with every page descriptor. The
  144. * page_cgroup helps us identify information about the cgroup
  145. */
  146. struct page_cgroup {
  147. struct list_head lru; /* per cgroup LRU list */
  148. struct page *page;
  149. struct mem_cgroup *mem_cgroup;
  150. int flags;
  151. };
  152. #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
  153. #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
  154. static int page_cgroup_nid(struct page_cgroup *pc)
  155. {
  156. return page_to_nid(pc->page);
  157. }
  158. static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
  159. {
  160. return page_zonenum(pc->page);
  161. }
  162. enum charge_type {
  163. MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  164. MEM_CGROUP_CHARGE_TYPE_MAPPED,
  165. MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
  166. };
  167. /*
  168. * Always modified under lru lock. Then, not necessary to preempt_disable()
  169. */
  170. static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
  171. bool charge)
  172. {
  173. int val = (charge)? 1 : -1;
  174. struct mem_cgroup_stat *stat = &mem->stat;
  175. VM_BUG_ON(!irqs_disabled());
  176. if (flags & PAGE_CGROUP_FLAG_CACHE)
  177. __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
  178. else
  179. __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
  180. if (charge)
  181. __mem_cgroup_stat_add_safe(stat,
  182. MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
  183. else
  184. __mem_cgroup_stat_add_safe(stat,
  185. MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
  186. }
  187. static struct mem_cgroup_per_zone *
  188. mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
  189. {
  190. return &mem->info.nodeinfo[nid]->zoneinfo[zid];
  191. }
  192. static struct mem_cgroup_per_zone *
  193. page_cgroup_zoneinfo(struct page_cgroup *pc)
  194. {
  195. struct mem_cgroup *mem = pc->mem_cgroup;
  196. int nid = page_cgroup_nid(pc);
  197. int zid = page_cgroup_zid(pc);
  198. return mem_cgroup_zoneinfo(mem, nid, zid);
  199. }
  200. static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
  201. enum mem_cgroup_zstat_index idx)
  202. {
  203. int nid, zid;
  204. struct mem_cgroup_per_zone *mz;
  205. u64 total = 0;
  206. for_each_online_node(nid)
  207. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  208. mz = mem_cgroup_zoneinfo(mem, nid, zid);
  209. total += MEM_CGROUP_ZSTAT(mz, idx);
  210. }
  211. return total;
  212. }
  213. static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
  214. {
  215. return container_of(cgroup_subsys_state(cont,
  216. mem_cgroup_subsys_id), struct mem_cgroup,
  217. css);
  218. }
  219. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
  220. {
  221. return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
  222. struct mem_cgroup, css);
  223. }
  224. static inline int page_cgroup_locked(struct page *page)
  225. {
  226. return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
  227. }
  228. static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
  229. {
  230. VM_BUG_ON(!page_cgroup_locked(page));
  231. page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
  232. }
  233. struct page_cgroup *page_get_page_cgroup(struct page *page)
  234. {
  235. return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
  236. }
  237. static void lock_page_cgroup(struct page *page)
  238. {
  239. bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
  240. }
  241. static int try_lock_page_cgroup(struct page *page)
  242. {
  243. return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
  244. }
  245. static void unlock_page_cgroup(struct page *page)
  246. {
  247. bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
  248. }
  249. static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
  250. struct page_cgroup *pc)
  251. {
  252. int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
  253. if (from)
  254. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
  255. else
  256. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
  257. mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
  258. list_del(&pc->lru);
  259. }
  260. static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
  261. struct page_cgroup *pc)
  262. {
  263. int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
  264. if (!to) {
  265. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
  266. list_add(&pc->lru, &mz->inactive_list);
  267. } else {
  268. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
  269. list_add(&pc->lru, &mz->active_list);
  270. }
  271. mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
  272. }
  273. static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
  274. {
  275. int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
  276. struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
  277. if (from)
  278. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
  279. else
  280. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
  281. if (active) {
  282. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
  283. pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
  284. list_move(&pc->lru, &mz->active_list);
  285. } else {
  286. MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
  287. pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
  288. list_move(&pc->lru, &mz->inactive_list);
  289. }
  290. }
  291. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
  292. {
  293. int ret;
  294. task_lock(task);
  295. ret = task->mm && mm_match_cgroup(task->mm, mem);
  296. task_unlock(task);
  297. return ret;
  298. }
  299. /*
  300. * This routine assumes that the appropriate zone's lru lock is already held
  301. */
  302. void mem_cgroup_move_lists(struct page *page, bool active)
  303. {
  304. struct page_cgroup *pc;
  305. struct mem_cgroup_per_zone *mz;
  306. unsigned long flags;
  307. if (mem_cgroup_subsys.disabled)
  308. return;
  309. /*
  310. * We cannot lock_page_cgroup while holding zone's lru_lock,
  311. * because other holders of lock_page_cgroup can be interrupted
  312. * with an attempt to rotate_reclaimable_page. But we cannot
  313. * safely get to page_cgroup without it, so just try_lock it:
  314. * mem_cgroup_isolate_pages allows for page left on wrong list.
  315. */
  316. if (!try_lock_page_cgroup(page))
  317. return;
  318. pc = page_get_page_cgroup(page);
  319. if (pc) {
  320. mz = page_cgroup_zoneinfo(pc);
  321. spin_lock_irqsave(&mz->lru_lock, flags);
  322. __mem_cgroup_move_lists(pc, active);
  323. spin_unlock_irqrestore(&mz->lru_lock, flags);
  324. }
  325. unlock_page_cgroup(page);
  326. }
  327. /*
  328. * Calculate mapped_ratio under memory controller. This will be used in
  329. * vmscan.c for deteremining we have to reclaim mapped pages.
  330. */
  331. int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
  332. {
  333. long total, rss;
  334. /*
  335. * usage is recorded in bytes. But, here, we assume the number of
  336. * physical pages can be represented by "long" on any arch.
  337. */
  338. total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
  339. rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
  340. return (int)((rss * 100L) / total);
  341. }
  342. /*
  343. * This function is called from vmscan.c. In page reclaiming loop. balance
  344. * between active and inactive list is calculated. For memory controller
  345. * page reclaiming, we should use using mem_cgroup's imbalance rather than
  346. * zone's global lru imbalance.
  347. */
  348. long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
  349. {
  350. unsigned long active, inactive;
  351. /* active and inactive are the number of pages. 'long' is ok.*/
  352. active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
  353. inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
  354. return (long) (active / (inactive + 1));
  355. }
  356. /*
  357. * prev_priority control...this will be used in memory reclaim path.
  358. */
  359. int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
  360. {
  361. return mem->prev_priority;
  362. }
  363. void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
  364. {
  365. if (priority < mem->prev_priority)
  366. mem->prev_priority = priority;
  367. }
  368. void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
  369. {
  370. mem->prev_priority = priority;
  371. }
  372. /*
  373. * Calculate # of pages to be scanned in this priority/zone.
  374. * See also vmscan.c
  375. *
  376. * priority starts from "DEF_PRIORITY" and decremented in each loop.
  377. * (see include/linux/mmzone.h)
  378. */
  379. long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
  380. struct zone *zone, int priority)
  381. {
  382. long nr_active;
  383. int nid = zone->zone_pgdat->node_id;
  384. int zid = zone_idx(zone);
  385. struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
  386. nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
  387. return (nr_active >> priority);
  388. }
  389. long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
  390. struct zone *zone, int priority)
  391. {
  392. long nr_inactive;
  393. int nid = zone->zone_pgdat->node_id;
  394. int zid = zone_idx(zone);
  395. struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
  396. nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
  397. return (nr_inactive >> priority);
  398. }
  399. unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  400. struct list_head *dst,
  401. unsigned long *scanned, int order,
  402. int mode, struct zone *z,
  403. struct mem_cgroup *mem_cont,
  404. int active)
  405. {
  406. unsigned long nr_taken = 0;
  407. struct page *page;
  408. unsigned long scan;
  409. LIST_HEAD(pc_list);
  410. struct list_head *src;
  411. struct page_cgroup *pc, *tmp;
  412. int nid = z->zone_pgdat->node_id;
  413. int zid = zone_idx(z);
  414. struct mem_cgroup_per_zone *mz;
  415. BUG_ON(!mem_cont);
  416. mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
  417. if (active)
  418. src = &mz->active_list;
  419. else
  420. src = &mz->inactive_list;
  421. spin_lock(&mz->lru_lock);
  422. scan = 0;
  423. list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
  424. if (scan >= nr_to_scan)
  425. break;
  426. page = pc->page;
  427. if (unlikely(!PageLRU(page)))
  428. continue;
  429. if (PageActive(page) && !active) {
  430. __mem_cgroup_move_lists(pc, true);
  431. continue;
  432. }
  433. if (!PageActive(page) && active) {
  434. __mem_cgroup_move_lists(pc, false);
  435. continue;
  436. }
  437. scan++;
  438. list_move(&pc->lru, &pc_list);
  439. if (__isolate_lru_page(page, mode) == 0) {
  440. list_move(&page->lru, dst);
  441. nr_taken++;
  442. }
  443. }
  444. list_splice(&pc_list, src);
  445. spin_unlock(&mz->lru_lock);
  446. *scanned = scan;
  447. return nr_taken;
  448. }
  449. /*
  450. * Charge the memory controller for page usage.
  451. * Return
  452. * 0 if the charge was successful
  453. * < 0 if the cgroup is over its limit
  454. */
  455. static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
  456. gfp_t gfp_mask, enum charge_type ctype,
  457. struct mem_cgroup *memcg)
  458. {
  459. struct mem_cgroup *mem;
  460. struct page_cgroup *pc;
  461. unsigned long flags;
  462. unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
  463. struct mem_cgroup_per_zone *mz;
  464. pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
  465. if (unlikely(pc == NULL))
  466. goto err;
  467. /*
  468. * We always charge the cgroup the mm_struct belongs to.
  469. * The mm_struct's mem_cgroup changes on task migration if the
  470. * thread group leader migrates. It's possible that mm is not
  471. * set, if so charge the init_mm (happens for pagecache usage).
  472. */
  473. if (likely(!memcg)) {
  474. rcu_read_lock();
  475. mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  476. /*
  477. * For every charge from the cgroup, increment reference count
  478. */
  479. css_get(&mem->css);
  480. rcu_read_unlock();
  481. } else {
  482. mem = memcg;
  483. css_get(&memcg->css);
  484. }
  485. while (res_counter_charge(&mem->res, PAGE_SIZE)) {
  486. if (!(gfp_mask & __GFP_WAIT))
  487. goto out;
  488. if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
  489. continue;
  490. /*
  491. * try_to_free_mem_cgroup_pages() might not give us a full
  492. * picture of reclaim. Some pages are reclaimed and might be
  493. * moved to swap cache or just unmapped from the cgroup.
  494. * Check the limit again to see if the reclaim reduced the
  495. * current usage of the cgroup before giving up
  496. */
  497. if (res_counter_check_under_limit(&mem->res))
  498. continue;
  499. if (!nr_retries--) {
  500. mem_cgroup_out_of_memory(mem, gfp_mask);
  501. goto out;
  502. }
  503. }
  504. pc->mem_cgroup = mem;
  505. pc->page = page;
  506. /*
  507. * If a page is accounted as a page cache, insert to inactive list.
  508. * If anon, insert to active list.
  509. */
  510. if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
  511. pc->flags = PAGE_CGROUP_FLAG_CACHE;
  512. else
  513. pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
  514. lock_page_cgroup(page);
  515. if (unlikely(page_get_page_cgroup(page))) {
  516. unlock_page_cgroup(page);
  517. res_counter_uncharge(&mem->res, PAGE_SIZE);
  518. css_put(&mem->css);
  519. kmem_cache_free(page_cgroup_cache, pc);
  520. goto done;
  521. }
  522. page_assign_page_cgroup(page, pc);
  523. mz = page_cgroup_zoneinfo(pc);
  524. spin_lock_irqsave(&mz->lru_lock, flags);
  525. __mem_cgroup_add_list(mz, pc);
  526. spin_unlock_irqrestore(&mz->lru_lock, flags);
  527. unlock_page_cgroup(page);
  528. done:
  529. return 0;
  530. out:
  531. css_put(&mem->css);
  532. kmem_cache_free(page_cgroup_cache, pc);
  533. err:
  534. return -ENOMEM;
  535. }
  536. int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
  537. {
  538. if (mem_cgroup_subsys.disabled)
  539. return 0;
  540. /*
  541. * If already mapped, we don't have to account.
  542. * If page cache, page->mapping has address_space.
  543. * But page->mapping may have out-of-use anon_vma pointer,
  544. * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
  545. * is NULL.
  546. */
  547. if (page_mapped(page) || (page->mapping && !PageAnon(page)))
  548. return 0;
  549. if (unlikely(!mm))
  550. mm = &init_mm;
  551. return mem_cgroup_charge_common(page, mm, gfp_mask,
  552. MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
  553. }
  554. int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  555. gfp_t gfp_mask)
  556. {
  557. if (mem_cgroup_subsys.disabled)
  558. return 0;
  559. /*
  560. * Corner case handling. This is called from add_to_page_cache()
  561. * in usual. But some FS (shmem) precharges this page before calling it
  562. * and call add_to_page_cache() with GFP_NOWAIT.
  563. *
  564. * For GFP_NOWAIT case, the page may be pre-charged before calling
  565. * add_to_page_cache(). (See shmem.c) check it here and avoid to call
  566. * charge twice. (It works but has to pay a bit larger cost.)
  567. */
  568. if (!(gfp_mask & __GFP_WAIT)) {
  569. struct page_cgroup *pc;
  570. lock_page_cgroup(page);
  571. pc = page_get_page_cgroup(page);
  572. if (pc) {
  573. VM_BUG_ON(pc->page != page);
  574. VM_BUG_ON(!pc->mem_cgroup);
  575. unlock_page_cgroup(page);
  576. return 0;
  577. }
  578. unlock_page_cgroup(page);
  579. }
  580. if (unlikely(!mm))
  581. mm = &init_mm;
  582. return mem_cgroup_charge_common(page, mm, gfp_mask,
  583. MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
  584. }
  585. /*
  586. * uncharge if !page_mapped(page)
  587. */
  588. static void
  589. __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
  590. {
  591. struct page_cgroup *pc;
  592. struct mem_cgroup *mem;
  593. struct mem_cgroup_per_zone *mz;
  594. unsigned long flags;
  595. if (mem_cgroup_subsys.disabled)
  596. return;
  597. /*
  598. * Check if our page_cgroup is valid
  599. */
  600. lock_page_cgroup(page);
  601. pc = page_get_page_cgroup(page);
  602. if (unlikely(!pc))
  603. goto unlock;
  604. VM_BUG_ON(pc->page != page);
  605. if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
  606. && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
  607. || page_mapped(page)))
  608. goto unlock;
  609. mz = page_cgroup_zoneinfo(pc);
  610. spin_lock_irqsave(&mz->lru_lock, flags);
  611. __mem_cgroup_remove_list(mz, pc);
  612. spin_unlock_irqrestore(&mz->lru_lock, flags);
  613. page_assign_page_cgroup(page, NULL);
  614. unlock_page_cgroup(page);
  615. mem = pc->mem_cgroup;
  616. res_counter_uncharge(&mem->res, PAGE_SIZE);
  617. css_put(&mem->css);
  618. kmem_cache_free(page_cgroup_cache, pc);
  619. return;
  620. unlock:
  621. unlock_page_cgroup(page);
  622. }
  623. void mem_cgroup_uncharge_page(struct page *page)
  624. {
  625. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
  626. }
  627. void mem_cgroup_uncharge_cache_page(struct page *page)
  628. {
  629. VM_BUG_ON(page_mapped(page));
  630. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
  631. }
  632. /*
  633. * Before starting migration, account against new page.
  634. */
  635. int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
  636. {
  637. struct page_cgroup *pc;
  638. struct mem_cgroup *mem = NULL;
  639. enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
  640. int ret = 0;
  641. if (mem_cgroup_subsys.disabled)
  642. return 0;
  643. lock_page_cgroup(page);
  644. pc = page_get_page_cgroup(page);
  645. if (pc) {
  646. mem = pc->mem_cgroup;
  647. css_get(&mem->css);
  648. if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
  649. ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  650. }
  651. unlock_page_cgroup(page);
  652. if (mem) {
  653. ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
  654. ctype, mem);
  655. css_put(&mem->css);
  656. }
  657. return ret;
  658. }
  659. /* remove redundant charge if migration failed*/
  660. void mem_cgroup_end_migration(struct page *newpage)
  661. {
  662. /*
  663. * At success, page->mapping is not NULL.
  664. * special rollback care is necessary when
  665. * 1. at migration failure. (newpage->mapping is cleared in this case)
  666. * 2. the newpage was moved but not remapped again because the task
  667. * exits and the newpage is obsolete. In this case, the new page
  668. * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
  669. * always for avoiding mess. The page_cgroup will be removed if
  670. * unnecessary. File cache pages is still on radix-tree. Don't
  671. * care it.
  672. */
  673. if (!newpage->mapping)
  674. __mem_cgroup_uncharge_common(newpage,
  675. MEM_CGROUP_CHARGE_TYPE_FORCE);
  676. else if (PageAnon(newpage))
  677. mem_cgroup_uncharge_page(newpage);
  678. }
  679. /*
  680. * A call to try to shrink memory usage under specified resource controller.
  681. * This is typically used for page reclaiming for shmem for reducing side
  682. * effect of page allocation from shmem, which is used by some mem_cgroup.
  683. */
  684. int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
  685. {
  686. struct mem_cgroup *mem;
  687. int progress = 0;
  688. int retry = MEM_CGROUP_RECLAIM_RETRIES;
  689. if (mem_cgroup_subsys.disabled)
  690. return 0;
  691. if (!mm)
  692. return 0;
  693. rcu_read_lock();
  694. mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  695. css_get(&mem->css);
  696. rcu_read_unlock();
  697. do {
  698. progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
  699. } while (!progress && --retry);
  700. css_put(&mem->css);
  701. if (!retry)
  702. return -ENOMEM;
  703. return 0;
  704. }
  705. int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
  706. {
  707. int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
  708. int progress;
  709. int ret = 0;
  710. while (res_counter_set_limit(&memcg->res, val)) {
  711. if (signal_pending(current)) {
  712. ret = -EINTR;
  713. break;
  714. }
  715. if (!retry_count) {
  716. ret = -EBUSY;
  717. break;
  718. }
  719. progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
  720. if (!progress)
  721. retry_count--;
  722. }
  723. return ret;
  724. }
  725. /*
  726. * This routine traverse page_cgroup in given list and drop them all.
  727. * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  728. */
  729. #define FORCE_UNCHARGE_BATCH (128)
  730. static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
  731. struct mem_cgroup_per_zone *mz,
  732. int active)
  733. {
  734. struct page_cgroup *pc;
  735. struct page *page;
  736. int count = FORCE_UNCHARGE_BATCH;
  737. unsigned long flags;
  738. struct list_head *list;
  739. if (active)
  740. list = &mz->active_list;
  741. else
  742. list = &mz->inactive_list;
  743. spin_lock_irqsave(&mz->lru_lock, flags);
  744. while (!list_empty(list)) {
  745. pc = list_entry(list->prev, struct page_cgroup, lru);
  746. page = pc->page;
  747. get_page(page);
  748. spin_unlock_irqrestore(&mz->lru_lock, flags);
  749. /*
  750. * Check if this page is on LRU. !LRU page can be found
  751. * if it's under page migration.
  752. */
  753. if (PageLRU(page)) {
  754. __mem_cgroup_uncharge_common(page,
  755. MEM_CGROUP_CHARGE_TYPE_FORCE);
  756. put_page(page);
  757. if (--count <= 0) {
  758. count = FORCE_UNCHARGE_BATCH;
  759. cond_resched();
  760. }
  761. } else
  762. cond_resched();
  763. spin_lock_irqsave(&mz->lru_lock, flags);
  764. }
  765. spin_unlock_irqrestore(&mz->lru_lock, flags);
  766. }
  767. /*
  768. * make mem_cgroup's charge to be 0 if there is no task.
  769. * This enables deleting this mem_cgroup.
  770. */
  771. static int mem_cgroup_force_empty(struct mem_cgroup *mem)
  772. {
  773. int ret = -EBUSY;
  774. int node, zid;
  775. css_get(&mem->css);
  776. /*
  777. * page reclaim code (kswapd etc..) will move pages between
  778. * active_list <-> inactive_list while we don't take a lock.
  779. * So, we have to do loop here until all lists are empty.
  780. */
  781. while (mem->res.usage > 0) {
  782. if (atomic_read(&mem->css.cgroup->count) > 0)
  783. goto out;
  784. for_each_node_state(node, N_POSSIBLE)
  785. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  786. struct mem_cgroup_per_zone *mz;
  787. mz = mem_cgroup_zoneinfo(mem, node, zid);
  788. /* drop all page_cgroup in active_list */
  789. mem_cgroup_force_empty_list(mem, mz, 1);
  790. /* drop all page_cgroup in inactive_list */
  791. mem_cgroup_force_empty_list(mem, mz, 0);
  792. }
  793. }
  794. ret = 0;
  795. out:
  796. css_put(&mem->css);
  797. return ret;
  798. }
  799. static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
  800. {
  801. return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
  802. cft->private);
  803. }
  804. /*
  805. * The user of this function is...
  806. * RES_LIMIT.
  807. */
  808. static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  809. const char *buffer)
  810. {
  811. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  812. unsigned long long val;
  813. int ret;
  814. switch (cft->private) {
  815. case RES_LIMIT:
  816. /* This function does all necessary parse...reuse it */
  817. ret = res_counter_memparse_write_strategy(buffer, &val);
  818. if (!ret)
  819. ret = mem_cgroup_resize_limit(memcg, val);
  820. break;
  821. default:
  822. ret = -EINVAL; /* should be BUG() ? */
  823. break;
  824. }
  825. return ret;
  826. }
  827. static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  828. {
  829. struct mem_cgroup *mem;
  830. mem = mem_cgroup_from_cont(cont);
  831. switch (event) {
  832. case RES_MAX_USAGE:
  833. res_counter_reset_max(&mem->res);
  834. break;
  835. case RES_FAILCNT:
  836. res_counter_reset_failcnt(&mem->res);
  837. break;
  838. }
  839. return 0;
  840. }
  841. static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
  842. {
  843. return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
  844. }
  845. static const struct mem_cgroup_stat_desc {
  846. const char *msg;
  847. u64 unit;
  848. } mem_cgroup_stat_desc[] = {
  849. [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
  850. [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
  851. [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
  852. [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
  853. };
  854. static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
  855. struct cgroup_map_cb *cb)
  856. {
  857. struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
  858. struct mem_cgroup_stat *stat = &mem_cont->stat;
  859. int i;
  860. for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
  861. s64 val;
  862. val = mem_cgroup_read_stat(stat, i);
  863. val *= mem_cgroup_stat_desc[i].unit;
  864. cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
  865. }
  866. /* showing # of active pages */
  867. {
  868. unsigned long active, inactive;
  869. inactive = mem_cgroup_get_all_zonestat(mem_cont,
  870. MEM_CGROUP_ZSTAT_INACTIVE);
  871. active = mem_cgroup_get_all_zonestat(mem_cont,
  872. MEM_CGROUP_ZSTAT_ACTIVE);
  873. cb->fill(cb, "active", (active) * PAGE_SIZE);
  874. cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
  875. }
  876. return 0;
  877. }
  878. static struct cftype mem_cgroup_files[] = {
  879. {
  880. .name = "usage_in_bytes",
  881. .private = RES_USAGE,
  882. .read_u64 = mem_cgroup_read,
  883. },
  884. {
  885. .name = "max_usage_in_bytes",
  886. .private = RES_MAX_USAGE,
  887. .trigger = mem_cgroup_reset,
  888. .read_u64 = mem_cgroup_read,
  889. },
  890. {
  891. .name = "limit_in_bytes",
  892. .private = RES_LIMIT,
  893. .write_string = mem_cgroup_write,
  894. .read_u64 = mem_cgroup_read,
  895. },
  896. {
  897. .name = "failcnt",
  898. .private = RES_FAILCNT,
  899. .trigger = mem_cgroup_reset,
  900. .read_u64 = mem_cgroup_read,
  901. },
  902. {
  903. .name = "force_empty",
  904. .trigger = mem_force_empty_write,
  905. },
  906. {
  907. .name = "stat",
  908. .read_map = mem_control_stat_show,
  909. },
  910. };
  911. static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  912. {
  913. struct mem_cgroup_per_node *pn;
  914. struct mem_cgroup_per_zone *mz;
  915. int zone, tmp = node;
  916. /*
  917. * This routine is called against possible nodes.
  918. * But it's BUG to call kmalloc() against offline node.
  919. *
  920. * TODO: this routine can waste much memory for nodes which will
  921. * never be onlined. It's better to use memory hotplug callback
  922. * function.
  923. */
  924. if (!node_state(node, N_NORMAL_MEMORY))
  925. tmp = -1;
  926. pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
  927. if (!pn)
  928. return 1;
  929. mem->info.nodeinfo[node] = pn;
  930. memset(pn, 0, sizeof(*pn));
  931. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  932. mz = &pn->zoneinfo[zone];
  933. INIT_LIST_HEAD(&mz->active_list);
  934. INIT_LIST_HEAD(&mz->inactive_list);
  935. spin_lock_init(&mz->lru_lock);
  936. }
  937. return 0;
  938. }
  939. static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  940. {
  941. kfree(mem->info.nodeinfo[node]);
  942. }
  943. static struct mem_cgroup *mem_cgroup_alloc(void)
  944. {
  945. struct mem_cgroup *mem;
  946. if (sizeof(*mem) < PAGE_SIZE)
  947. mem = kmalloc(sizeof(*mem), GFP_KERNEL);
  948. else
  949. mem = vmalloc(sizeof(*mem));
  950. if (mem)
  951. memset(mem, 0, sizeof(*mem));
  952. return mem;
  953. }
  954. static void mem_cgroup_free(struct mem_cgroup *mem)
  955. {
  956. if (sizeof(*mem) < PAGE_SIZE)
  957. kfree(mem);
  958. else
  959. vfree(mem);
  960. }
  961. static struct cgroup_subsys_state *
  962. mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
  963. {
  964. struct mem_cgroup *mem;
  965. int node;
  966. if (unlikely((cont->parent) == NULL)) {
  967. mem = &init_mem_cgroup;
  968. page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
  969. } else {
  970. mem = mem_cgroup_alloc();
  971. if (!mem)
  972. return ERR_PTR(-ENOMEM);
  973. }
  974. res_counter_init(&mem->res);
  975. for_each_node_state(node, N_POSSIBLE)
  976. if (alloc_mem_cgroup_per_zone_info(mem, node))
  977. goto free_out;
  978. return &mem->css;
  979. free_out:
  980. for_each_node_state(node, N_POSSIBLE)
  981. free_mem_cgroup_per_zone_info(mem, node);
  982. if (cont->parent != NULL)
  983. mem_cgroup_free(mem);
  984. return ERR_PTR(-ENOMEM);
  985. }
  986. static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
  987. struct cgroup *cont)
  988. {
  989. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  990. mem_cgroup_force_empty(mem);
  991. }
  992. static void mem_cgroup_destroy(struct cgroup_subsys *ss,
  993. struct cgroup *cont)
  994. {
  995. int node;
  996. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  997. for_each_node_state(node, N_POSSIBLE)
  998. free_mem_cgroup_per_zone_info(mem, node);
  999. mem_cgroup_free(mem_cgroup_from_cont(cont));
  1000. }
  1001. static int mem_cgroup_populate(struct cgroup_subsys *ss,
  1002. struct cgroup *cont)
  1003. {
  1004. return cgroup_add_files(cont, ss, mem_cgroup_files,
  1005. ARRAY_SIZE(mem_cgroup_files));
  1006. }
  1007. static void mem_cgroup_move_task(struct cgroup_subsys *ss,
  1008. struct cgroup *cont,
  1009. struct cgroup *old_cont,
  1010. struct task_struct *p)
  1011. {
  1012. struct mm_struct *mm;
  1013. struct mem_cgroup *mem, *old_mem;
  1014. mm = get_task_mm(p);
  1015. if (mm == NULL)
  1016. return;
  1017. mem = mem_cgroup_from_cont(cont);
  1018. old_mem = mem_cgroup_from_cont(old_cont);
  1019. /*
  1020. * Only thread group leaders are allowed to migrate, the mm_struct is
  1021. * in effect owned by the leader
  1022. */
  1023. if (!thread_group_leader(p))
  1024. goto out;
  1025. out:
  1026. mmput(mm);
  1027. }
  1028. struct cgroup_subsys mem_cgroup_subsys = {
  1029. .name = "memory",
  1030. .subsys_id = mem_cgroup_subsys_id,
  1031. .create = mem_cgroup_create,
  1032. .pre_destroy = mem_cgroup_pre_destroy,
  1033. .destroy = mem_cgroup_destroy,
  1034. .populate = mem_cgroup_populate,
  1035. .attach = mem_cgroup_move_task,
  1036. .early_init = 0,
  1037. };