memcontrol.c 126 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921
  1. /* memcontrol.c - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * Memory thresholds
  10. * Copyright (C) 2009 Nokia Corporation
  11. * Author: Kirill A. Shutemov
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. */
  23. #include <linux/res_counter.h>
  24. #include <linux/memcontrol.h>
  25. #include <linux/cgroup.h>
  26. #include <linux/mm.h>
  27. #include <linux/hugetlb.h>
  28. #include <linux/pagemap.h>
  29. #include <linux/smp.h>
  30. #include <linux/page-flags.h>
  31. #include <linux/backing-dev.h>
  32. #include <linux/bit_spinlock.h>
  33. #include <linux/rcupdate.h>
  34. #include <linux/limits.h>
  35. #include <linux/mutex.h>
  36. #include <linux/rbtree.h>
  37. #include <linux/slab.h>
  38. #include <linux/swap.h>
  39. #include <linux/swapops.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/eventfd.h>
  42. #include <linux/sort.h>
  43. #include <linux/fs.h>
  44. #include <linux/seq_file.h>
  45. #include <linux/vmalloc.h>
  46. #include <linux/mm_inline.h>
  47. #include <linux/page_cgroup.h>
  48. #include <linux/cpu.h>
  49. #include <linux/oom.h>
  50. #include "internal.h"
  51. #include <asm/uaccess.h>
  52. #include <trace/events/vmscan.h>
  53. struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  54. #define MEM_CGROUP_RECLAIM_RETRIES 5
  55. struct mem_cgroup *root_mem_cgroup __read_mostly;
  56. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  57. /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  58. int do_swap_account __read_mostly;
  59. static int really_do_swap_account __initdata = 1; /* for remember boot option*/
  60. #else
  61. #define do_swap_account (0)
  62. #endif
  63. /*
  64. * Per memcg event counter is incremented at every pagein/pageout. This counter
  65. * is used for trigger some periodic events. This is straightforward and better
  66. * than using jiffies etc. to handle periodic memcg event.
  67. *
  68. * These values will be used as !((event) & ((1 <<(thresh)) - 1))
  69. */
  70. #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
  71. #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
  72. /*
  73. * Statistics for memory cgroup.
  74. */
  75. enum mem_cgroup_stat_index {
  76. /*
  77. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  78. */
  79. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  80. MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
  81. MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
  82. MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
  83. MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
  84. MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
  85. MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
  86. /* incremented at every pagein/pageout */
  87. MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
  88. MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
  89. MEM_CGROUP_STAT_NSTATS,
  90. };
  91. struct mem_cgroup_stat_cpu {
  92. s64 count[MEM_CGROUP_STAT_NSTATS];
  93. };
  94. /*
  95. * per-zone information in memory controller.
  96. */
  97. struct mem_cgroup_per_zone {
  98. /*
  99. * spin_lock to protect the per cgroup LRU
  100. */
  101. struct list_head lists[NR_LRU_LISTS];
  102. unsigned long count[NR_LRU_LISTS];
  103. struct zone_reclaim_stat reclaim_stat;
  104. struct rb_node tree_node; /* RB tree node */
  105. unsigned long long usage_in_excess;/* Set to the value by which */
  106. /* the soft limit is exceeded*/
  107. bool on_tree;
  108. struct mem_cgroup *mem; /* Back pointer, we cannot */
  109. /* use container_of */
  110. };
  111. /* Macro for accessing counter */
  112. #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
  113. struct mem_cgroup_per_node {
  114. struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  115. };
  116. struct mem_cgroup_lru_info {
  117. struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
  118. };
  119. /*
  120. * Cgroups above their limits are maintained in a RB-Tree, independent of
  121. * their hierarchy representation
  122. */
  123. struct mem_cgroup_tree_per_zone {
  124. struct rb_root rb_root;
  125. spinlock_t lock;
  126. };
  127. struct mem_cgroup_tree_per_node {
  128. struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
  129. };
  130. struct mem_cgroup_tree {
  131. struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
  132. };
  133. static struct mem_cgroup_tree soft_limit_tree __read_mostly;
  134. struct mem_cgroup_threshold {
  135. struct eventfd_ctx *eventfd;
  136. u64 threshold;
  137. };
  138. /* For threshold */
  139. struct mem_cgroup_threshold_ary {
  140. /* An array index points to threshold just below usage. */
  141. int current_threshold;
  142. /* Size of entries[] */
  143. unsigned int size;
  144. /* Array of thresholds */
  145. struct mem_cgroup_threshold entries[0];
  146. };
  147. struct mem_cgroup_thresholds {
  148. /* Primary thresholds array */
  149. struct mem_cgroup_threshold_ary *primary;
  150. /*
  151. * Spare threshold array.
  152. * This is needed to make mem_cgroup_unregister_event() "never fail".
  153. * It must be able to store at least primary->size - 1 entries.
  154. */
  155. struct mem_cgroup_threshold_ary *spare;
  156. };
  157. /* for OOM */
  158. struct mem_cgroup_eventfd_list {
  159. struct list_head list;
  160. struct eventfd_ctx *eventfd;
  161. };
  162. static void mem_cgroup_threshold(struct mem_cgroup *mem);
  163. static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
  164. /*
  165. * The memory controller data structure. The memory controller controls both
  166. * page cache and RSS per cgroup. We would eventually like to provide
  167. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  168. * to help the administrator determine what knobs to tune.
  169. *
  170. * TODO: Add a water mark for the memory controller. Reclaim will begin when
  171. * we hit the water mark. May be even add a low water mark, such that
  172. * no reclaim occurs from a cgroup at it's low water mark, this is
  173. * a feature that will be implemented much later in the future.
  174. */
  175. struct mem_cgroup {
  176. struct cgroup_subsys_state css;
  177. /*
  178. * the counter to account for memory usage
  179. */
  180. struct res_counter res;
  181. /*
  182. * the counter to account for mem+swap usage.
  183. */
  184. struct res_counter memsw;
  185. /*
  186. * Per cgroup active and inactive list, similar to the
  187. * per zone LRU lists.
  188. */
  189. struct mem_cgroup_lru_info info;
  190. /*
  191. protect against reclaim related member.
  192. */
  193. spinlock_t reclaim_param_lock;
  194. /*
  195. * While reclaiming in a hierarchy, we cache the last child we
  196. * reclaimed from.
  197. */
  198. int last_scanned_child;
  199. /*
  200. * Should the accounting and control be hierarchical, per subtree?
  201. */
  202. bool use_hierarchy;
  203. atomic_t oom_lock;
  204. atomic_t refcnt;
  205. unsigned int swappiness;
  206. /* OOM-Killer disable */
  207. int oom_kill_disable;
  208. /* set when res.limit == memsw.limit */
  209. bool memsw_is_minimum;
  210. /* protect arrays of thresholds */
  211. struct mutex thresholds_lock;
  212. /* thresholds for memory usage. RCU-protected */
  213. struct mem_cgroup_thresholds thresholds;
  214. /* thresholds for mem+swap usage. RCU-protected */
  215. struct mem_cgroup_thresholds memsw_thresholds;
  216. /* For oom notifier event fd */
  217. struct list_head oom_notify;
  218. /*
  219. * Should we move charges of a task when a task is moved into this
  220. * mem_cgroup ? And what type of charges should we move ?
  221. */
  222. unsigned long move_charge_at_immigrate;
  223. /*
  224. * percpu counter.
  225. */
  226. struct mem_cgroup_stat_cpu *stat;
  227. /*
  228. * used when a cpu is offlined or other synchronizations
  229. * See mem_cgroup_read_stat().
  230. */
  231. struct mem_cgroup_stat_cpu nocpu_base;
  232. spinlock_t pcp_counter_lock;
  233. };
  234. /* Stuffs for move charges at task migration. */
  235. /*
  236. * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
  237. * left-shifted bitmap of these types.
  238. */
  239. enum move_type {
  240. MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
  241. MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
  242. NR_MOVE_TYPE,
  243. };
  244. /* "mc" and its members are protected by cgroup_mutex */
  245. static struct move_charge_struct {
  246. spinlock_t lock; /* for from, to, moving_task */
  247. struct mem_cgroup *from;
  248. struct mem_cgroup *to;
  249. unsigned long precharge;
  250. unsigned long moved_charge;
  251. unsigned long moved_swap;
  252. struct task_struct *moving_task; /* a task moving charges */
  253. wait_queue_head_t waitq; /* a waitq for other context */
  254. } mc = {
  255. .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
  256. .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
  257. };
  258. static bool move_anon(void)
  259. {
  260. return test_bit(MOVE_CHARGE_TYPE_ANON,
  261. &mc.to->move_charge_at_immigrate);
  262. }
  263. static bool move_file(void)
  264. {
  265. return test_bit(MOVE_CHARGE_TYPE_FILE,
  266. &mc.to->move_charge_at_immigrate);
  267. }
  268. /*
  269. * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  270. * limit reclaim to prevent infinite loops, if they ever occur.
  271. */
  272. #define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
  273. #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
  274. enum charge_type {
  275. MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  276. MEM_CGROUP_CHARGE_TYPE_MAPPED,
  277. MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
  278. MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
  279. MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
  280. MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
  281. NR_CHARGE_TYPE,
  282. };
  283. /* only for here (for easy reading.) */
  284. #define PCGF_CACHE (1UL << PCG_CACHE)
  285. #define PCGF_USED (1UL << PCG_USED)
  286. #define PCGF_LOCK (1UL << PCG_LOCK)
  287. /* Not used, but added here for completeness */
  288. #define PCGF_ACCT (1UL << PCG_ACCT)
  289. /* for encoding cft->private value on file */
  290. #define _MEM (0)
  291. #define _MEMSWAP (1)
  292. #define _OOM_TYPE (2)
  293. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  294. #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
  295. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  296. /* Used for OOM nofiier */
  297. #define OOM_CONTROL (0)
  298. /*
  299. * Reclaim flags for mem_cgroup_hierarchical_reclaim
  300. */
  301. #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
  302. #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
  303. #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
  304. #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
  305. #define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
  306. #define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
  307. static void mem_cgroup_get(struct mem_cgroup *mem);
  308. static void mem_cgroup_put(struct mem_cgroup *mem);
  309. static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
  310. static void drain_all_stock_async(void);
  311. static struct mem_cgroup_per_zone *
  312. mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
  313. {
  314. return &mem->info.nodeinfo[nid]->zoneinfo[zid];
  315. }
  316. struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
  317. {
  318. return &mem->css;
  319. }
  320. static struct mem_cgroup_per_zone *
  321. page_cgroup_zoneinfo(struct page_cgroup *pc)
  322. {
  323. struct mem_cgroup *mem = pc->mem_cgroup;
  324. int nid = page_cgroup_nid(pc);
  325. int zid = page_cgroup_zid(pc);
  326. if (!mem)
  327. return NULL;
  328. return mem_cgroup_zoneinfo(mem, nid, zid);
  329. }
  330. static struct mem_cgroup_tree_per_zone *
  331. soft_limit_tree_node_zone(int nid, int zid)
  332. {
  333. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  334. }
  335. static struct mem_cgroup_tree_per_zone *
  336. soft_limit_tree_from_page(struct page *page)
  337. {
  338. int nid = page_to_nid(page);
  339. int zid = page_zonenum(page);
  340. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  341. }
  342. static void
  343. __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
  344. struct mem_cgroup_per_zone *mz,
  345. struct mem_cgroup_tree_per_zone *mctz,
  346. unsigned long long new_usage_in_excess)
  347. {
  348. struct rb_node **p = &mctz->rb_root.rb_node;
  349. struct rb_node *parent = NULL;
  350. struct mem_cgroup_per_zone *mz_node;
  351. if (mz->on_tree)
  352. return;
  353. mz->usage_in_excess = new_usage_in_excess;
  354. if (!mz->usage_in_excess)
  355. return;
  356. while (*p) {
  357. parent = *p;
  358. mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
  359. tree_node);
  360. if (mz->usage_in_excess < mz_node->usage_in_excess)
  361. p = &(*p)->rb_left;
  362. /*
  363. * We can't avoid mem cgroups that are over their soft
  364. * limit by the same amount
  365. */
  366. else if (mz->usage_in_excess >= mz_node->usage_in_excess)
  367. p = &(*p)->rb_right;
  368. }
  369. rb_link_node(&mz->tree_node, parent, p);
  370. rb_insert_color(&mz->tree_node, &mctz->rb_root);
  371. mz->on_tree = true;
  372. }
  373. static void
  374. __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
  375. struct mem_cgroup_per_zone *mz,
  376. struct mem_cgroup_tree_per_zone *mctz)
  377. {
  378. if (!mz->on_tree)
  379. return;
  380. rb_erase(&mz->tree_node, &mctz->rb_root);
  381. mz->on_tree = false;
  382. }
  383. static void
  384. mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
  385. struct mem_cgroup_per_zone *mz,
  386. struct mem_cgroup_tree_per_zone *mctz)
  387. {
  388. spin_lock(&mctz->lock);
  389. __mem_cgroup_remove_exceeded(mem, mz, mctz);
  390. spin_unlock(&mctz->lock);
  391. }
  392. static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
  393. {
  394. unsigned long long excess;
  395. struct mem_cgroup_per_zone *mz;
  396. struct mem_cgroup_tree_per_zone *mctz;
  397. int nid = page_to_nid(page);
  398. int zid = page_zonenum(page);
  399. mctz = soft_limit_tree_from_page(page);
  400. /*
  401. * Necessary to update all ancestors when hierarchy is used.
  402. * because their event counter is not touched.
  403. */
  404. for (; mem; mem = parent_mem_cgroup(mem)) {
  405. mz = mem_cgroup_zoneinfo(mem, nid, zid);
  406. excess = res_counter_soft_limit_excess(&mem->res);
  407. /*
  408. * We have to update the tree if mz is on RB-tree or
  409. * mem is over its softlimit.
  410. */
  411. if (excess || mz->on_tree) {
  412. spin_lock(&mctz->lock);
  413. /* if on-tree, remove it */
  414. if (mz->on_tree)
  415. __mem_cgroup_remove_exceeded(mem, mz, mctz);
  416. /*
  417. * Insert again. mz->usage_in_excess will be updated.
  418. * If excess is 0, no tree ops.
  419. */
  420. __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
  421. spin_unlock(&mctz->lock);
  422. }
  423. }
  424. }
  425. static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
  426. {
  427. int node, zone;
  428. struct mem_cgroup_per_zone *mz;
  429. struct mem_cgroup_tree_per_zone *mctz;
  430. for_each_node_state(node, N_POSSIBLE) {
  431. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  432. mz = mem_cgroup_zoneinfo(mem, node, zone);
  433. mctz = soft_limit_tree_node_zone(node, zone);
  434. mem_cgroup_remove_exceeded(mem, mz, mctz);
  435. }
  436. }
  437. }
  438. static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
  439. {
  440. return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
  441. }
  442. static struct mem_cgroup_per_zone *
  443. __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  444. {
  445. struct rb_node *rightmost = NULL;
  446. struct mem_cgroup_per_zone *mz;
  447. retry:
  448. mz = NULL;
  449. rightmost = rb_last(&mctz->rb_root);
  450. if (!rightmost)
  451. goto done; /* Nothing to reclaim from */
  452. mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
  453. /*
  454. * Remove the node now but someone else can add it back,
  455. * we will to add it back at the end of reclaim to its correct
  456. * position in the tree.
  457. */
  458. __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
  459. if (!res_counter_soft_limit_excess(&mz->mem->res) ||
  460. !css_tryget(&mz->mem->css))
  461. goto retry;
  462. done:
  463. return mz;
  464. }
  465. static struct mem_cgroup_per_zone *
  466. mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  467. {
  468. struct mem_cgroup_per_zone *mz;
  469. spin_lock(&mctz->lock);
  470. mz = __mem_cgroup_largest_soft_limit_node(mctz);
  471. spin_unlock(&mctz->lock);
  472. return mz;
  473. }
  474. /*
  475. * Implementation Note: reading percpu statistics for memcg.
  476. *
  477. * Both of vmstat[] and percpu_counter has threshold and do periodic
  478. * synchronization to implement "quick" read. There are trade-off between
  479. * reading cost and precision of value. Then, we may have a chance to implement
  480. * a periodic synchronizion of counter in memcg's counter.
  481. *
  482. * But this _read() function is used for user interface now. The user accounts
  483. * memory usage by memory cgroup and he _always_ requires exact value because
  484. * he accounts memory. Even if we provide quick-and-fuzzy read, we always
  485. * have to visit all online cpus and make sum. So, for now, unnecessary
  486. * synchronization is not implemented. (just implemented for cpu hotplug)
  487. *
  488. * If there are kernel internal actions which can make use of some not-exact
  489. * value, and reading all cpu value can be performance bottleneck in some
  490. * common workload, threashold and synchonization as vmstat[] should be
  491. * implemented.
  492. */
  493. static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
  494. enum mem_cgroup_stat_index idx)
  495. {
  496. int cpu;
  497. s64 val = 0;
  498. get_online_cpus();
  499. for_each_online_cpu(cpu)
  500. val += per_cpu(mem->stat->count[idx], cpu);
  501. #ifdef CONFIG_HOTPLUG_CPU
  502. spin_lock(&mem->pcp_counter_lock);
  503. val += mem->nocpu_base.count[idx];
  504. spin_unlock(&mem->pcp_counter_lock);
  505. #endif
  506. put_online_cpus();
  507. return val;
  508. }
  509. static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
  510. {
  511. s64 ret;
  512. ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
  513. ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
  514. return ret;
  515. }
  516. static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
  517. bool charge)
  518. {
  519. int val = (charge) ? 1 : -1;
  520. this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
  521. }
  522. static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
  523. struct page_cgroup *pc,
  524. bool charge)
  525. {
  526. int val = (charge) ? 1 : -1;
  527. preempt_disable();
  528. if (PageCgroupCache(pc))
  529. __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
  530. else
  531. __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
  532. if (charge)
  533. __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
  534. else
  535. __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
  536. __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
  537. preempt_enable();
  538. }
  539. static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
  540. enum lru_list idx)
  541. {
  542. int nid, zid;
  543. struct mem_cgroup_per_zone *mz;
  544. u64 total = 0;
  545. for_each_online_node(nid)
  546. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  547. mz = mem_cgroup_zoneinfo(mem, nid, zid);
  548. total += MEM_CGROUP_ZSTAT(mz, idx);
  549. }
  550. return total;
  551. }
  552. static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
  553. {
  554. s64 val;
  555. val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
  556. return !(val & ((1 << event_mask_shift) - 1));
  557. }
  558. /*
  559. * Check events in order.
  560. *
  561. */
  562. static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
  563. {
  564. /* threshold event is triggered in finer grain than soft limit */
  565. if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
  566. mem_cgroup_threshold(mem);
  567. if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
  568. mem_cgroup_update_tree(mem, page);
  569. }
  570. }
  571. static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
  572. {
  573. return container_of(cgroup_subsys_state(cont,
  574. mem_cgroup_subsys_id), struct mem_cgroup,
  575. css);
  576. }
  577. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
  578. {
  579. /*
  580. * mm_update_next_owner() may clear mm->owner to NULL
  581. * if it races with swapoff, page migration, etc.
  582. * So this can be called with p == NULL.
  583. */
  584. if (unlikely(!p))
  585. return NULL;
  586. return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
  587. struct mem_cgroup, css);
  588. }
  589. static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  590. {
  591. struct mem_cgroup *mem = NULL;
  592. if (!mm)
  593. return NULL;
  594. /*
  595. * Because we have no locks, mm->owner's may be being moved to other
  596. * cgroup. We use css_tryget() here even if this looks
  597. * pessimistic (rather than adding locks here).
  598. */
  599. rcu_read_lock();
  600. do {
  601. mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  602. if (unlikely(!mem))
  603. break;
  604. } while (!css_tryget(&mem->css));
  605. rcu_read_unlock();
  606. return mem;
  607. }
  608. /* The caller has to guarantee "mem" exists before calling this */
  609. static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
  610. {
  611. struct cgroup_subsys_state *css;
  612. int found;
  613. if (!mem) /* ROOT cgroup has the smallest ID */
  614. return root_mem_cgroup; /*css_put/get against root is ignored*/
  615. if (!mem->use_hierarchy) {
  616. if (css_tryget(&mem->css))
  617. return mem;
  618. return NULL;
  619. }
  620. rcu_read_lock();
  621. /*
  622. * searching a memory cgroup which has the smallest ID under given
  623. * ROOT cgroup. (ID >= 1)
  624. */
  625. css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
  626. if (css && css_tryget(css))
  627. mem = container_of(css, struct mem_cgroup, css);
  628. else
  629. mem = NULL;
  630. rcu_read_unlock();
  631. return mem;
  632. }
  633. static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
  634. struct mem_cgroup *root,
  635. bool cond)
  636. {
  637. int nextid = css_id(&iter->css) + 1;
  638. int found;
  639. int hierarchy_used;
  640. struct cgroup_subsys_state *css;
  641. hierarchy_used = iter->use_hierarchy;
  642. css_put(&iter->css);
  643. /* If no ROOT, walk all, ignore hierarchy */
  644. if (!cond || (root && !hierarchy_used))
  645. return NULL;
  646. if (!root)
  647. root = root_mem_cgroup;
  648. do {
  649. iter = NULL;
  650. rcu_read_lock();
  651. css = css_get_next(&mem_cgroup_subsys, nextid,
  652. &root->css, &found);
  653. if (css && css_tryget(css))
  654. iter = container_of(css, struct mem_cgroup, css);
  655. rcu_read_unlock();
  656. /* If css is NULL, no more cgroups will be found */
  657. nextid = found + 1;
  658. } while (css && !iter);
  659. return iter;
  660. }
  661. /*
  662. * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
  663. * be careful that "break" loop is not allowed. We have reference count.
  664. * Instead of that modify "cond" to be false and "continue" to exit the loop.
  665. */
  666. #define for_each_mem_cgroup_tree_cond(iter, root, cond) \
  667. for (iter = mem_cgroup_start_loop(root);\
  668. iter != NULL;\
  669. iter = mem_cgroup_get_next(iter, root, cond))
  670. #define for_each_mem_cgroup_tree(iter, root) \
  671. for_each_mem_cgroup_tree_cond(iter, root, true)
  672. #define for_each_mem_cgroup_all(iter) \
  673. for_each_mem_cgroup_tree_cond(iter, NULL, true)
  674. static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
  675. {
  676. return (mem == root_mem_cgroup);
  677. }
  678. /*
  679. * Following LRU functions are allowed to be used without PCG_LOCK.
  680. * Operations are called by routine of global LRU independently from memcg.
  681. * What we have to take care of here is validness of pc->mem_cgroup.
  682. *
  683. * Changes to pc->mem_cgroup happens when
  684. * 1. charge
  685. * 2. moving account
  686. * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
  687. * It is added to LRU before charge.
  688. * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
  689. * When moving account, the page is not on LRU. It's isolated.
  690. */
  691. void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
  692. {
  693. struct page_cgroup *pc;
  694. struct mem_cgroup_per_zone *mz;
  695. if (mem_cgroup_disabled())
  696. return;
  697. pc = lookup_page_cgroup(page);
  698. /* can happen while we handle swapcache. */
  699. if (!TestClearPageCgroupAcctLRU(pc))
  700. return;
  701. VM_BUG_ON(!pc->mem_cgroup);
  702. /*
  703. * We don't check PCG_USED bit. It's cleared when the "page" is finally
  704. * removed from global LRU.
  705. */
  706. mz = page_cgroup_zoneinfo(pc);
  707. MEM_CGROUP_ZSTAT(mz, lru) -= 1;
  708. if (mem_cgroup_is_root(pc->mem_cgroup))
  709. return;
  710. VM_BUG_ON(list_empty(&pc->lru));
  711. list_del_init(&pc->lru);
  712. return;
  713. }
  714. void mem_cgroup_del_lru(struct page *page)
  715. {
  716. mem_cgroup_del_lru_list(page, page_lru(page));
  717. }
  718. void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
  719. {
  720. struct mem_cgroup_per_zone *mz;
  721. struct page_cgroup *pc;
  722. if (mem_cgroup_disabled())
  723. return;
  724. pc = lookup_page_cgroup(page);
  725. /*
  726. * Used bit is set without atomic ops but after smp_wmb().
  727. * For making pc->mem_cgroup visible, insert smp_rmb() here.
  728. */
  729. smp_rmb();
  730. /* unused or root page is not rotated. */
  731. if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
  732. return;
  733. mz = page_cgroup_zoneinfo(pc);
  734. list_move(&pc->lru, &mz->lists[lru]);
  735. }
  736. void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
  737. {
  738. struct page_cgroup *pc;
  739. struct mem_cgroup_per_zone *mz;
  740. if (mem_cgroup_disabled())
  741. return;
  742. pc = lookup_page_cgroup(page);
  743. VM_BUG_ON(PageCgroupAcctLRU(pc));
  744. /*
  745. * Used bit is set without atomic ops but after smp_wmb().
  746. * For making pc->mem_cgroup visible, insert smp_rmb() here.
  747. */
  748. smp_rmb();
  749. if (!PageCgroupUsed(pc))
  750. return;
  751. mz = page_cgroup_zoneinfo(pc);
  752. MEM_CGROUP_ZSTAT(mz, lru) += 1;
  753. SetPageCgroupAcctLRU(pc);
  754. if (mem_cgroup_is_root(pc->mem_cgroup))
  755. return;
  756. list_add(&pc->lru, &mz->lists[lru]);
  757. }
  758. /*
  759. * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
  760. * lru because the page may.be reused after it's fully uncharged (because of
  761. * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
  762. * it again. This function is only used to charge SwapCache. It's done under
  763. * lock_page and expected that zone->lru_lock is never held.
  764. */
  765. static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
  766. {
  767. unsigned long flags;
  768. struct zone *zone = page_zone(page);
  769. struct page_cgroup *pc = lookup_page_cgroup(page);
  770. spin_lock_irqsave(&zone->lru_lock, flags);
  771. /*
  772. * Forget old LRU when this page_cgroup is *not* used. This Used bit
  773. * is guarded by lock_page() because the page is SwapCache.
  774. */
  775. if (!PageCgroupUsed(pc))
  776. mem_cgroup_del_lru_list(page, page_lru(page));
  777. spin_unlock_irqrestore(&zone->lru_lock, flags);
  778. }
  779. static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
  780. {
  781. unsigned long flags;
  782. struct zone *zone = page_zone(page);
  783. struct page_cgroup *pc = lookup_page_cgroup(page);
  784. spin_lock_irqsave(&zone->lru_lock, flags);
  785. /* link when the page is linked to LRU but page_cgroup isn't */
  786. if (PageLRU(page) && !PageCgroupAcctLRU(pc))
  787. mem_cgroup_add_lru_list(page, page_lru(page));
  788. spin_unlock_irqrestore(&zone->lru_lock, flags);
  789. }
  790. void mem_cgroup_move_lists(struct page *page,
  791. enum lru_list from, enum lru_list to)
  792. {
  793. if (mem_cgroup_disabled())
  794. return;
  795. mem_cgroup_del_lru_list(page, from);
  796. mem_cgroup_add_lru_list(page, to);
  797. }
  798. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
  799. {
  800. int ret;
  801. struct mem_cgroup *curr = NULL;
  802. struct task_struct *p;
  803. p = find_lock_task_mm(task);
  804. if (!p)
  805. return 0;
  806. curr = try_get_mem_cgroup_from_mm(p->mm);
  807. task_unlock(p);
  808. if (!curr)
  809. return 0;
  810. /*
  811. * We should check use_hierarchy of "mem" not "curr". Because checking
  812. * use_hierarchy of "curr" here make this function true if hierarchy is
  813. * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
  814. * hierarchy(even if use_hierarchy is disabled in "mem").
  815. */
  816. if (mem->use_hierarchy)
  817. ret = css_is_ancestor(&curr->css, &mem->css);
  818. else
  819. ret = (curr == mem);
  820. css_put(&curr->css);
  821. return ret;
  822. }
  823. static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
  824. {
  825. unsigned long active;
  826. unsigned long inactive;
  827. unsigned long gb;
  828. unsigned long inactive_ratio;
  829. inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
  830. active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
  831. gb = (inactive + active) >> (30 - PAGE_SHIFT);
  832. if (gb)
  833. inactive_ratio = int_sqrt(10 * gb);
  834. else
  835. inactive_ratio = 1;
  836. if (present_pages) {
  837. present_pages[0] = inactive;
  838. present_pages[1] = active;
  839. }
  840. return inactive_ratio;
  841. }
  842. int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
  843. {
  844. unsigned long active;
  845. unsigned long inactive;
  846. unsigned long present_pages[2];
  847. unsigned long inactive_ratio;
  848. inactive_ratio = calc_inactive_ratio(memcg, present_pages);
  849. inactive = present_pages[0];
  850. active = present_pages[1];
  851. if (inactive * inactive_ratio < active)
  852. return 1;
  853. return 0;
  854. }
  855. int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
  856. {
  857. unsigned long active;
  858. unsigned long inactive;
  859. inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
  860. active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
  861. return (active > inactive);
  862. }
  863. unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
  864. struct zone *zone,
  865. enum lru_list lru)
  866. {
  867. int nid = zone_to_nid(zone);
  868. int zid = zone_idx(zone);
  869. struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  870. return MEM_CGROUP_ZSTAT(mz, lru);
  871. }
  872. struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  873. struct zone *zone)
  874. {
  875. int nid = zone_to_nid(zone);
  876. int zid = zone_idx(zone);
  877. struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  878. return &mz->reclaim_stat;
  879. }
  880. struct zone_reclaim_stat *
  881. mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  882. {
  883. struct page_cgroup *pc;
  884. struct mem_cgroup_per_zone *mz;
  885. if (mem_cgroup_disabled())
  886. return NULL;
  887. pc = lookup_page_cgroup(page);
  888. /*
  889. * Used bit is set without atomic ops but after smp_wmb().
  890. * For making pc->mem_cgroup visible, insert smp_rmb() here.
  891. */
  892. smp_rmb();
  893. if (!PageCgroupUsed(pc))
  894. return NULL;
  895. mz = page_cgroup_zoneinfo(pc);
  896. if (!mz)
  897. return NULL;
  898. return &mz->reclaim_stat;
  899. }
  900. unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  901. struct list_head *dst,
  902. unsigned long *scanned, int order,
  903. int mode, struct zone *z,
  904. struct mem_cgroup *mem_cont,
  905. int active, int file)
  906. {
  907. unsigned long nr_taken = 0;
  908. struct page *page;
  909. unsigned long scan;
  910. LIST_HEAD(pc_list);
  911. struct list_head *src;
  912. struct page_cgroup *pc, *tmp;
  913. int nid = zone_to_nid(z);
  914. int zid = zone_idx(z);
  915. struct mem_cgroup_per_zone *mz;
  916. int lru = LRU_FILE * file + active;
  917. int ret;
  918. BUG_ON(!mem_cont);
  919. mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
  920. src = &mz->lists[lru];
  921. scan = 0;
  922. list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
  923. if (scan >= nr_to_scan)
  924. break;
  925. page = pc->page;
  926. if (unlikely(!PageCgroupUsed(pc)))
  927. continue;
  928. if (unlikely(!PageLRU(page)))
  929. continue;
  930. scan++;
  931. ret = __isolate_lru_page(page, mode, file);
  932. switch (ret) {
  933. case 0:
  934. list_move(&page->lru, dst);
  935. mem_cgroup_del_lru(page);
  936. nr_taken++;
  937. break;
  938. case -EBUSY:
  939. /* we don't affect global LRU but rotate in our LRU */
  940. mem_cgroup_rotate_lru_list(page, page_lru(page));
  941. break;
  942. default:
  943. break;
  944. }
  945. }
  946. *scanned = scan;
  947. trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
  948. 0, 0, 0, mode);
  949. return nr_taken;
  950. }
  951. #define mem_cgroup_from_res_counter(counter, member) \
  952. container_of(counter, struct mem_cgroup, member)
  953. static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
  954. {
  955. if (do_swap_account) {
  956. if (res_counter_check_under_limit(&mem->res) &&
  957. res_counter_check_under_limit(&mem->memsw))
  958. return true;
  959. } else
  960. if (res_counter_check_under_limit(&mem->res))
  961. return true;
  962. return false;
  963. }
  964. static unsigned int get_swappiness(struct mem_cgroup *memcg)
  965. {
  966. struct cgroup *cgrp = memcg->css.cgroup;
  967. unsigned int swappiness;
  968. /* root ? */
  969. if (cgrp->parent == NULL)
  970. return vm_swappiness;
  971. spin_lock(&memcg->reclaim_param_lock);
  972. swappiness = memcg->swappiness;
  973. spin_unlock(&memcg->reclaim_param_lock);
  974. return swappiness;
  975. }
  976. static void mem_cgroup_start_move(struct mem_cgroup *mem)
  977. {
  978. int cpu;
  979. get_online_cpus();
  980. spin_lock(&mem->pcp_counter_lock);
  981. for_each_online_cpu(cpu)
  982. per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
  983. mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
  984. spin_unlock(&mem->pcp_counter_lock);
  985. put_online_cpus();
  986. synchronize_rcu();
  987. }
  988. static void mem_cgroup_end_move(struct mem_cgroup *mem)
  989. {
  990. int cpu;
  991. if (!mem)
  992. return;
  993. get_online_cpus();
  994. spin_lock(&mem->pcp_counter_lock);
  995. for_each_online_cpu(cpu)
  996. per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
  997. mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
  998. spin_unlock(&mem->pcp_counter_lock);
  999. put_online_cpus();
  1000. }
  1001. /*
  1002. * 2 routines for checking "mem" is under move_account() or not.
  1003. *
  1004. * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
  1005. * for avoiding race in accounting. If true,
  1006. * pc->mem_cgroup may be overwritten.
  1007. *
  1008. * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
  1009. * under hierarchy of moving cgroups. This is for
  1010. * waiting at hith-memory prressure caused by "move".
  1011. */
  1012. static bool mem_cgroup_stealed(struct mem_cgroup *mem)
  1013. {
  1014. VM_BUG_ON(!rcu_read_lock_held());
  1015. return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
  1016. }
  1017. static bool mem_cgroup_under_move(struct mem_cgroup *mem)
  1018. {
  1019. struct mem_cgroup *from;
  1020. struct mem_cgroup *to;
  1021. bool ret = false;
  1022. /*
  1023. * Unlike task_move routines, we access mc.to, mc.from not under
  1024. * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
  1025. */
  1026. spin_lock(&mc.lock);
  1027. from = mc.from;
  1028. to = mc.to;
  1029. if (!from)
  1030. goto unlock;
  1031. if (from == mem || to == mem
  1032. || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
  1033. || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
  1034. ret = true;
  1035. unlock:
  1036. spin_unlock(&mc.lock);
  1037. return ret;
  1038. }
  1039. static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
  1040. {
  1041. if (mc.moving_task && current != mc.moving_task) {
  1042. if (mem_cgroup_under_move(mem)) {
  1043. DEFINE_WAIT(wait);
  1044. prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
  1045. /* moving charge context might have finished. */
  1046. if (mc.moving_task)
  1047. schedule();
  1048. finish_wait(&mc.waitq, &wait);
  1049. return true;
  1050. }
  1051. }
  1052. return false;
  1053. }
  1054. /**
  1055. * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
  1056. * @memcg: The memory cgroup that went over limit
  1057. * @p: Task that is going to be killed
  1058. *
  1059. * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
  1060. * enabled
  1061. */
  1062. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  1063. {
  1064. struct cgroup *task_cgrp;
  1065. struct cgroup *mem_cgrp;
  1066. /*
  1067. * Need a buffer in BSS, can't rely on allocations. The code relies
  1068. * on the assumption that OOM is serialized for memory controller.
  1069. * If this assumption is broken, revisit this code.
  1070. */
  1071. static char memcg_name[PATH_MAX];
  1072. int ret;
  1073. if (!memcg || !p)
  1074. return;
  1075. rcu_read_lock();
  1076. mem_cgrp = memcg->css.cgroup;
  1077. task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
  1078. ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
  1079. if (ret < 0) {
  1080. /*
  1081. * Unfortunately, we are unable to convert to a useful name
  1082. * But we'll still print out the usage information
  1083. */
  1084. rcu_read_unlock();
  1085. goto done;
  1086. }
  1087. rcu_read_unlock();
  1088. printk(KERN_INFO "Task in %s killed", memcg_name);
  1089. rcu_read_lock();
  1090. ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
  1091. if (ret < 0) {
  1092. rcu_read_unlock();
  1093. goto done;
  1094. }
  1095. rcu_read_unlock();
  1096. /*
  1097. * Continues from above, so we don't need an KERN_ level
  1098. */
  1099. printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
  1100. done:
  1101. printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
  1102. res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
  1103. res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
  1104. res_counter_read_u64(&memcg->res, RES_FAILCNT));
  1105. printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
  1106. "failcnt %llu\n",
  1107. res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
  1108. res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
  1109. res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
  1110. }
  1111. /*
  1112. * This function returns the number of memcg under hierarchy tree. Returns
  1113. * 1(self count) if no children.
  1114. */
  1115. static int mem_cgroup_count_children(struct mem_cgroup *mem)
  1116. {
  1117. int num = 0;
  1118. struct mem_cgroup *iter;
  1119. for_each_mem_cgroup_tree(iter, mem)
  1120. num++;
  1121. return num;
  1122. }
  1123. /*
  1124. * Return the memory (and swap, if configured) limit for a memcg.
  1125. */
  1126. u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
  1127. {
  1128. u64 limit;
  1129. u64 memsw;
  1130. limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
  1131. total_swap_pages;
  1132. memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  1133. /*
  1134. * If memsw is finite and limits the amount of swap space available
  1135. * to this memcg, return that limit.
  1136. */
  1137. return min(limit, memsw);
  1138. }
  1139. /*
  1140. * Visit the first child (need not be the first child as per the ordering
  1141. * of the cgroup list, since we track last_scanned_child) of @mem and use
  1142. * that to reclaim free pages from.
  1143. */
  1144. static struct mem_cgroup *
  1145. mem_cgroup_select_victim(struct mem_cgroup *root_mem)
  1146. {
  1147. struct mem_cgroup *ret = NULL;
  1148. struct cgroup_subsys_state *css;
  1149. int nextid, found;
  1150. if (!root_mem->use_hierarchy) {
  1151. css_get(&root_mem->css);
  1152. ret = root_mem;
  1153. }
  1154. while (!ret) {
  1155. rcu_read_lock();
  1156. nextid = root_mem->last_scanned_child + 1;
  1157. css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
  1158. &found);
  1159. if (css && css_tryget(css))
  1160. ret = container_of(css, struct mem_cgroup, css);
  1161. rcu_read_unlock();
  1162. /* Updates scanning parameter */
  1163. spin_lock(&root_mem->reclaim_param_lock);
  1164. if (!css) {
  1165. /* this means start scan from ID:1 */
  1166. root_mem->last_scanned_child = 0;
  1167. } else
  1168. root_mem->last_scanned_child = found;
  1169. spin_unlock(&root_mem->reclaim_param_lock);
  1170. }
  1171. return ret;
  1172. }
  1173. /*
  1174. * Scan the hierarchy if needed to reclaim memory. We remember the last child
  1175. * we reclaimed from, so that we don't end up penalizing one child extensively
  1176. * based on its position in the children list.
  1177. *
  1178. * root_mem is the original ancestor that we've been reclaim from.
  1179. *
  1180. * We give up and return to the caller when we visit root_mem twice.
  1181. * (other groups can be removed while we're walking....)
  1182. *
  1183. * If shrink==true, for avoiding to free too much, this returns immedieately.
  1184. */
  1185. static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
  1186. struct zone *zone,
  1187. gfp_t gfp_mask,
  1188. unsigned long reclaim_options)
  1189. {
  1190. struct mem_cgroup *victim;
  1191. int ret, total = 0;
  1192. int loop = 0;
  1193. bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
  1194. bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
  1195. bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
  1196. unsigned long excess = mem_cgroup_get_excess(root_mem);
  1197. /* If memsw_is_minimum==1, swap-out is of-no-use. */
  1198. if (root_mem->memsw_is_minimum)
  1199. noswap = true;
  1200. while (1) {
  1201. victim = mem_cgroup_select_victim(root_mem);
  1202. if (victim == root_mem) {
  1203. loop++;
  1204. if (loop >= 1)
  1205. drain_all_stock_async();
  1206. if (loop >= 2) {
  1207. /*
  1208. * If we have not been able to reclaim
  1209. * anything, it might because there are
  1210. * no reclaimable pages under this hierarchy
  1211. */
  1212. if (!check_soft || !total) {
  1213. css_put(&victim->css);
  1214. break;
  1215. }
  1216. /*
  1217. * We want to do more targetted reclaim.
  1218. * excess >> 2 is not to excessive so as to
  1219. * reclaim too much, nor too less that we keep
  1220. * coming back to reclaim from this cgroup
  1221. */
  1222. if (total >= (excess >> 2) ||
  1223. (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
  1224. css_put(&victim->css);
  1225. break;
  1226. }
  1227. }
  1228. }
  1229. if (!mem_cgroup_local_usage(victim)) {
  1230. /* this cgroup's local usage == 0 */
  1231. css_put(&victim->css);
  1232. continue;
  1233. }
  1234. /* we use swappiness of local cgroup */
  1235. if (check_soft)
  1236. ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
  1237. noswap, get_swappiness(victim), zone);
  1238. else
  1239. ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
  1240. noswap, get_swappiness(victim));
  1241. css_put(&victim->css);
  1242. /*
  1243. * At shrinking usage, we can't check we should stop here or
  1244. * reclaim more. It's depends on callers. last_scanned_child
  1245. * will work enough for keeping fairness under tree.
  1246. */
  1247. if (shrink)
  1248. return ret;
  1249. total += ret;
  1250. if (check_soft) {
  1251. if (res_counter_check_under_soft_limit(&root_mem->res))
  1252. return total;
  1253. } else if (mem_cgroup_check_under_limit(root_mem))
  1254. return 1 + total;
  1255. }
  1256. return total;
  1257. }
  1258. /*
  1259. * Check OOM-Killer is already running under our hierarchy.
  1260. * If someone is running, return false.
  1261. */
  1262. static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
  1263. {
  1264. int x, lock_count = 0;
  1265. struct mem_cgroup *iter;
  1266. for_each_mem_cgroup_tree(iter, mem) {
  1267. x = atomic_inc_return(&iter->oom_lock);
  1268. lock_count = max(x, lock_count);
  1269. }
  1270. if (lock_count == 1)
  1271. return true;
  1272. return false;
  1273. }
  1274. static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
  1275. {
  1276. struct mem_cgroup *iter;
  1277. /*
  1278. * When a new child is created while the hierarchy is under oom,
  1279. * mem_cgroup_oom_lock() may not be called. We have to use
  1280. * atomic_add_unless() here.
  1281. */
  1282. for_each_mem_cgroup_tree(iter, mem)
  1283. atomic_add_unless(&iter->oom_lock, -1, 0);
  1284. return 0;
  1285. }
  1286. static DEFINE_MUTEX(memcg_oom_mutex);
  1287. static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
  1288. struct oom_wait_info {
  1289. struct mem_cgroup *mem;
  1290. wait_queue_t wait;
  1291. };
  1292. static int memcg_oom_wake_function(wait_queue_t *wait,
  1293. unsigned mode, int sync, void *arg)
  1294. {
  1295. struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
  1296. struct oom_wait_info *oom_wait_info;
  1297. oom_wait_info = container_of(wait, struct oom_wait_info, wait);
  1298. if (oom_wait_info->mem == wake_mem)
  1299. goto wakeup;
  1300. /* if no hierarchy, no match */
  1301. if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
  1302. return 0;
  1303. /*
  1304. * Both of oom_wait_info->mem and wake_mem are stable under us.
  1305. * Then we can use css_is_ancestor without taking care of RCU.
  1306. */
  1307. if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
  1308. !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
  1309. return 0;
  1310. wakeup:
  1311. return autoremove_wake_function(wait, mode, sync, arg);
  1312. }
  1313. static void memcg_wakeup_oom(struct mem_cgroup *mem)
  1314. {
  1315. /* for filtering, pass "mem" as argument. */
  1316. __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
  1317. }
  1318. static void memcg_oom_recover(struct mem_cgroup *mem)
  1319. {
  1320. if (mem && atomic_read(&mem->oom_lock))
  1321. memcg_wakeup_oom(mem);
  1322. }
  1323. /*
  1324. * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  1325. */
  1326. bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
  1327. {
  1328. struct oom_wait_info owait;
  1329. bool locked, need_to_kill;
  1330. owait.mem = mem;
  1331. owait.wait.flags = 0;
  1332. owait.wait.func = memcg_oom_wake_function;
  1333. owait.wait.private = current;
  1334. INIT_LIST_HEAD(&owait.wait.task_list);
  1335. need_to_kill = true;
  1336. /* At first, try to OOM lock hierarchy under mem.*/
  1337. mutex_lock(&memcg_oom_mutex);
  1338. locked = mem_cgroup_oom_lock(mem);
  1339. /*
  1340. * Even if signal_pending(), we can't quit charge() loop without
  1341. * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
  1342. * under OOM is always welcomed, use TASK_KILLABLE here.
  1343. */
  1344. prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
  1345. if (!locked || mem->oom_kill_disable)
  1346. need_to_kill = false;
  1347. if (locked)
  1348. mem_cgroup_oom_notify(mem);
  1349. mutex_unlock(&memcg_oom_mutex);
  1350. if (need_to_kill) {
  1351. finish_wait(&memcg_oom_waitq, &owait.wait);
  1352. mem_cgroup_out_of_memory(mem, mask);
  1353. } else {
  1354. schedule();
  1355. finish_wait(&memcg_oom_waitq, &owait.wait);
  1356. }
  1357. mutex_lock(&memcg_oom_mutex);
  1358. mem_cgroup_oom_unlock(mem);
  1359. memcg_wakeup_oom(mem);
  1360. mutex_unlock(&memcg_oom_mutex);
  1361. if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
  1362. return false;
  1363. /* Give chance to dying process */
  1364. schedule_timeout(1);
  1365. return true;
  1366. }
  1367. /*
  1368. * Currently used to update mapped file statistics, but the routine can be
  1369. * generalized to update other statistics as well.
  1370. *
  1371. * Notes: Race condition
  1372. *
  1373. * We usually use page_cgroup_lock() for accessing page_cgroup member but
  1374. * it tends to be costly. But considering some conditions, we doesn't need
  1375. * to do so _always_.
  1376. *
  1377. * Considering "charge", lock_page_cgroup() is not required because all
  1378. * file-stat operations happen after a page is attached to radix-tree. There
  1379. * are no race with "charge".
  1380. *
  1381. * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
  1382. * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
  1383. * if there are race with "uncharge". Statistics itself is properly handled
  1384. * by flags.
  1385. *
  1386. * Considering "move", this is an only case we see a race. To make the race
  1387. * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
  1388. * possibility of race condition. If there is, we take a lock.
  1389. */
  1390. static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
  1391. {
  1392. struct mem_cgroup *mem;
  1393. struct page_cgroup *pc = lookup_page_cgroup(page);
  1394. bool need_unlock = false;
  1395. if (unlikely(!pc))
  1396. return;
  1397. rcu_read_lock();
  1398. mem = pc->mem_cgroup;
  1399. if (unlikely(!mem || !PageCgroupUsed(pc)))
  1400. goto out;
  1401. /* pc->mem_cgroup is unstable ? */
  1402. if (unlikely(mem_cgroup_stealed(mem))) {
  1403. /* take a lock against to access pc->mem_cgroup */
  1404. lock_page_cgroup(pc);
  1405. need_unlock = true;
  1406. mem = pc->mem_cgroup;
  1407. if (!mem || !PageCgroupUsed(pc))
  1408. goto out;
  1409. }
  1410. this_cpu_add(mem->stat->count[idx], val);
  1411. switch (idx) {
  1412. case MEM_CGROUP_STAT_FILE_MAPPED:
  1413. if (val > 0)
  1414. SetPageCgroupFileMapped(pc);
  1415. else if (!page_mapped(page))
  1416. ClearPageCgroupFileMapped(pc);
  1417. break;
  1418. default:
  1419. BUG();
  1420. }
  1421. out:
  1422. if (unlikely(need_unlock))
  1423. unlock_page_cgroup(pc);
  1424. rcu_read_unlock();
  1425. return;
  1426. }
  1427. void mem_cgroup_update_file_mapped(struct page *page, int val)
  1428. {
  1429. mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
  1430. }
  1431. /*
  1432. * size of first charge trial. "32" comes from vmscan.c's magic value.
  1433. * TODO: maybe necessary to use big numbers in big irons.
  1434. */
  1435. #define CHARGE_SIZE (32 * PAGE_SIZE)
  1436. struct memcg_stock_pcp {
  1437. struct mem_cgroup *cached; /* this never be root cgroup */
  1438. int charge;
  1439. struct work_struct work;
  1440. };
  1441. static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
  1442. static atomic_t memcg_drain_count;
  1443. /*
  1444. * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
  1445. * from local stock and true is returned. If the stock is 0 or charges from a
  1446. * cgroup which is not current target, returns false. This stock will be
  1447. * refilled.
  1448. */
  1449. static bool consume_stock(struct mem_cgroup *mem)
  1450. {
  1451. struct memcg_stock_pcp *stock;
  1452. bool ret = true;
  1453. stock = &get_cpu_var(memcg_stock);
  1454. if (mem == stock->cached && stock->charge)
  1455. stock->charge -= PAGE_SIZE;
  1456. else /* need to call res_counter_charge */
  1457. ret = false;
  1458. put_cpu_var(memcg_stock);
  1459. return ret;
  1460. }
  1461. /*
  1462. * Returns stocks cached in percpu to res_counter and reset cached information.
  1463. */
  1464. static void drain_stock(struct memcg_stock_pcp *stock)
  1465. {
  1466. struct mem_cgroup *old = stock->cached;
  1467. if (stock->charge) {
  1468. res_counter_uncharge(&old->res, stock->charge);
  1469. if (do_swap_account)
  1470. res_counter_uncharge(&old->memsw, stock->charge);
  1471. }
  1472. stock->cached = NULL;
  1473. stock->charge = 0;
  1474. }
  1475. /*
  1476. * This must be called under preempt disabled or must be called by
  1477. * a thread which is pinned to local cpu.
  1478. */
  1479. static void drain_local_stock(struct work_struct *dummy)
  1480. {
  1481. struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
  1482. drain_stock(stock);
  1483. }
  1484. /*
  1485. * Cache charges(val) which is from res_counter, to local per_cpu area.
  1486. * This will be consumed by consume_stock() function, later.
  1487. */
  1488. static void refill_stock(struct mem_cgroup *mem, int val)
  1489. {
  1490. struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
  1491. if (stock->cached != mem) { /* reset if necessary */
  1492. drain_stock(stock);
  1493. stock->cached = mem;
  1494. }
  1495. stock->charge += val;
  1496. put_cpu_var(memcg_stock);
  1497. }
  1498. /*
  1499. * Tries to drain stocked charges in other cpus. This function is asynchronous
  1500. * and just put a work per cpu for draining localy on each cpu. Caller can
  1501. * expects some charges will be back to res_counter later but cannot wait for
  1502. * it.
  1503. */
  1504. static void drain_all_stock_async(void)
  1505. {
  1506. int cpu;
  1507. /* This function is for scheduling "drain" in asynchronous way.
  1508. * The result of "drain" is not directly handled by callers. Then,
  1509. * if someone is calling drain, we don't have to call drain more.
  1510. * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
  1511. * there is a race. We just do loose check here.
  1512. */
  1513. if (atomic_read(&memcg_drain_count))
  1514. return;
  1515. /* Notify other cpus that system-wide "drain" is running */
  1516. atomic_inc(&memcg_drain_count);
  1517. get_online_cpus();
  1518. for_each_online_cpu(cpu) {
  1519. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  1520. schedule_work_on(cpu, &stock->work);
  1521. }
  1522. put_online_cpus();
  1523. atomic_dec(&memcg_drain_count);
  1524. /* We don't wait for flush_work */
  1525. }
  1526. /* This is a synchronous drain interface. */
  1527. static void drain_all_stock_sync(void)
  1528. {
  1529. /* called when force_empty is called */
  1530. atomic_inc(&memcg_drain_count);
  1531. schedule_on_each_cpu(drain_local_stock);
  1532. atomic_dec(&memcg_drain_count);
  1533. }
  1534. /*
  1535. * This function drains percpu counter value from DEAD cpu and
  1536. * move it to local cpu. Note that this function can be preempted.
  1537. */
  1538. static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
  1539. {
  1540. int i;
  1541. spin_lock(&mem->pcp_counter_lock);
  1542. for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
  1543. s64 x = per_cpu(mem->stat->count[i], cpu);
  1544. per_cpu(mem->stat->count[i], cpu) = 0;
  1545. mem->nocpu_base.count[i] += x;
  1546. }
  1547. /* need to clear ON_MOVE value, works as a kind of lock. */
  1548. per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
  1549. spin_unlock(&mem->pcp_counter_lock);
  1550. }
  1551. static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
  1552. {
  1553. int idx = MEM_CGROUP_ON_MOVE;
  1554. spin_lock(&mem->pcp_counter_lock);
  1555. per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
  1556. spin_unlock(&mem->pcp_counter_lock);
  1557. }
  1558. static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
  1559. unsigned long action,
  1560. void *hcpu)
  1561. {
  1562. int cpu = (unsigned long)hcpu;
  1563. struct memcg_stock_pcp *stock;
  1564. struct mem_cgroup *iter;
  1565. if ((action == CPU_ONLINE)) {
  1566. for_each_mem_cgroup_all(iter)
  1567. synchronize_mem_cgroup_on_move(iter, cpu);
  1568. return NOTIFY_OK;
  1569. }
  1570. if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
  1571. return NOTIFY_OK;
  1572. for_each_mem_cgroup_all(iter)
  1573. mem_cgroup_drain_pcp_counter(iter, cpu);
  1574. stock = &per_cpu(memcg_stock, cpu);
  1575. drain_stock(stock);
  1576. return NOTIFY_OK;
  1577. }
  1578. /* See __mem_cgroup_try_charge() for details */
  1579. enum {
  1580. CHARGE_OK, /* success */
  1581. CHARGE_RETRY, /* need to retry but retry is not bad */
  1582. CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
  1583. CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
  1584. CHARGE_OOM_DIE, /* the current is killed because of OOM */
  1585. };
  1586. static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
  1587. int csize, bool oom_check)
  1588. {
  1589. struct mem_cgroup *mem_over_limit;
  1590. struct res_counter *fail_res;
  1591. unsigned long flags = 0;
  1592. int ret;
  1593. ret = res_counter_charge(&mem->res, csize, &fail_res);
  1594. if (likely(!ret)) {
  1595. if (!do_swap_account)
  1596. return CHARGE_OK;
  1597. ret = res_counter_charge(&mem->memsw, csize, &fail_res);
  1598. if (likely(!ret))
  1599. return CHARGE_OK;
  1600. mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
  1601. flags |= MEM_CGROUP_RECLAIM_NOSWAP;
  1602. } else
  1603. mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
  1604. if (csize > PAGE_SIZE) /* change csize and retry */
  1605. return CHARGE_RETRY;
  1606. if (!(gfp_mask & __GFP_WAIT))
  1607. return CHARGE_WOULDBLOCK;
  1608. ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
  1609. gfp_mask, flags);
  1610. /*
  1611. * try_to_free_mem_cgroup_pages() might not give us a full
  1612. * picture of reclaim. Some pages are reclaimed and might be
  1613. * moved to swap cache or just unmapped from the cgroup.
  1614. * Check the limit again to see if the reclaim reduced the
  1615. * current usage of the cgroup before giving up
  1616. */
  1617. if (ret || mem_cgroup_check_under_limit(mem_over_limit))
  1618. return CHARGE_RETRY;
  1619. /*
  1620. * At task move, charge accounts can be doubly counted. So, it's
  1621. * better to wait until the end of task_move if something is going on.
  1622. */
  1623. if (mem_cgroup_wait_acct_move(mem_over_limit))
  1624. return CHARGE_RETRY;
  1625. /* If we don't need to call oom-killer at el, return immediately */
  1626. if (!oom_check)
  1627. return CHARGE_NOMEM;
  1628. /* check OOM */
  1629. if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
  1630. return CHARGE_OOM_DIE;
  1631. return CHARGE_RETRY;
  1632. }
  1633. /*
  1634. * Unlike exported interface, "oom" parameter is added. if oom==true,
  1635. * oom-killer can be invoked.
  1636. */
  1637. static int __mem_cgroup_try_charge(struct mm_struct *mm,
  1638. gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
  1639. {
  1640. int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  1641. struct mem_cgroup *mem = NULL;
  1642. int ret;
  1643. int csize = CHARGE_SIZE;
  1644. /*
  1645. * Unlike gloval-vm's OOM-kill, we're not in memory shortage
  1646. * in system level. So, allow to go ahead dying process in addition to
  1647. * MEMDIE process.
  1648. */
  1649. if (unlikely(test_thread_flag(TIF_MEMDIE)
  1650. || fatal_signal_pending(current)))
  1651. goto bypass;
  1652. /*
  1653. * We always charge the cgroup the mm_struct belongs to.
  1654. * The mm_struct's mem_cgroup changes on task migration if the
  1655. * thread group leader migrates. It's possible that mm is not
  1656. * set, if so charge the init_mm (happens for pagecache usage).
  1657. */
  1658. if (!*memcg && !mm)
  1659. goto bypass;
  1660. again:
  1661. if (*memcg) { /* css should be a valid one */
  1662. mem = *memcg;
  1663. VM_BUG_ON(css_is_removed(&mem->css));
  1664. if (mem_cgroup_is_root(mem))
  1665. goto done;
  1666. if (consume_stock(mem))
  1667. goto done;
  1668. css_get(&mem->css);
  1669. } else {
  1670. struct task_struct *p;
  1671. rcu_read_lock();
  1672. p = rcu_dereference(mm->owner);
  1673. VM_BUG_ON(!p);
  1674. /*
  1675. * because we don't have task_lock(), "p" can exit while
  1676. * we're here. In that case, "mem" can point to root
  1677. * cgroup but never be NULL. (and task_struct itself is freed
  1678. * by RCU, cgroup itself is RCU safe.) Then, we have small
  1679. * risk here to get wrong cgroup. But such kind of mis-account
  1680. * by race always happens because we don't have cgroup_mutex().
  1681. * It's overkill and we allow that small race, here.
  1682. */
  1683. mem = mem_cgroup_from_task(p);
  1684. VM_BUG_ON(!mem);
  1685. if (mem_cgroup_is_root(mem)) {
  1686. rcu_read_unlock();
  1687. goto done;
  1688. }
  1689. if (consume_stock(mem)) {
  1690. /*
  1691. * It seems dagerous to access memcg without css_get().
  1692. * But considering how consume_stok works, it's not
  1693. * necessary. If consume_stock success, some charges
  1694. * from this memcg are cached on this cpu. So, we
  1695. * don't need to call css_get()/css_tryget() before
  1696. * calling consume_stock().
  1697. */
  1698. rcu_read_unlock();
  1699. goto done;
  1700. }
  1701. /* after here, we may be blocked. we need to get refcnt */
  1702. if (!css_tryget(&mem->css)) {
  1703. rcu_read_unlock();
  1704. goto again;
  1705. }
  1706. rcu_read_unlock();
  1707. }
  1708. do {
  1709. bool oom_check;
  1710. /* If killed, bypass charge */
  1711. if (fatal_signal_pending(current)) {
  1712. css_put(&mem->css);
  1713. goto bypass;
  1714. }
  1715. oom_check = false;
  1716. if (oom && !nr_oom_retries) {
  1717. oom_check = true;
  1718. nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  1719. }
  1720. ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
  1721. switch (ret) {
  1722. case CHARGE_OK:
  1723. break;
  1724. case CHARGE_RETRY: /* not in OOM situation but retry */
  1725. csize = PAGE_SIZE;
  1726. css_put(&mem->css);
  1727. mem = NULL;
  1728. goto again;
  1729. case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
  1730. css_put(&mem->css);
  1731. goto nomem;
  1732. case CHARGE_NOMEM: /* OOM routine works */
  1733. if (!oom) {
  1734. css_put(&mem->css);
  1735. goto nomem;
  1736. }
  1737. /* If oom, we never return -ENOMEM */
  1738. nr_oom_retries--;
  1739. break;
  1740. case CHARGE_OOM_DIE: /* Killed by OOM Killer */
  1741. css_put(&mem->css);
  1742. goto bypass;
  1743. }
  1744. } while (ret != CHARGE_OK);
  1745. if (csize > PAGE_SIZE)
  1746. refill_stock(mem, csize - PAGE_SIZE);
  1747. css_put(&mem->css);
  1748. done:
  1749. *memcg = mem;
  1750. return 0;
  1751. nomem:
  1752. *memcg = NULL;
  1753. return -ENOMEM;
  1754. bypass:
  1755. *memcg = NULL;
  1756. return 0;
  1757. }
  1758. /*
  1759. * Somemtimes we have to undo a charge we got by try_charge().
  1760. * This function is for that and do uncharge, put css's refcnt.
  1761. * gotten by try_charge().
  1762. */
  1763. static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
  1764. unsigned long count)
  1765. {
  1766. if (!mem_cgroup_is_root(mem)) {
  1767. res_counter_uncharge(&mem->res, PAGE_SIZE * count);
  1768. if (do_swap_account)
  1769. res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
  1770. }
  1771. }
  1772. static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
  1773. {
  1774. __mem_cgroup_cancel_charge(mem, 1);
  1775. }
  1776. /*
  1777. * A helper function to get mem_cgroup from ID. must be called under
  1778. * rcu_read_lock(). The caller must check css_is_removed() or some if
  1779. * it's concern. (dropping refcnt from swap can be called against removed
  1780. * memcg.)
  1781. */
  1782. static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
  1783. {
  1784. struct cgroup_subsys_state *css;
  1785. /* ID 0 is unused ID */
  1786. if (!id)
  1787. return NULL;
  1788. css = css_lookup(&mem_cgroup_subsys, id);
  1789. if (!css)
  1790. return NULL;
  1791. return container_of(css, struct mem_cgroup, css);
  1792. }
  1793. struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  1794. {
  1795. struct mem_cgroup *mem = NULL;
  1796. struct page_cgroup *pc;
  1797. unsigned short id;
  1798. swp_entry_t ent;
  1799. VM_BUG_ON(!PageLocked(page));
  1800. pc = lookup_page_cgroup(page);
  1801. lock_page_cgroup(pc);
  1802. if (PageCgroupUsed(pc)) {
  1803. mem = pc->mem_cgroup;
  1804. if (mem && !css_tryget(&mem->css))
  1805. mem = NULL;
  1806. } else if (PageSwapCache(page)) {
  1807. ent.val = page_private(page);
  1808. id = lookup_swap_cgroup(ent);
  1809. rcu_read_lock();
  1810. mem = mem_cgroup_lookup(id);
  1811. if (mem && !css_tryget(&mem->css))
  1812. mem = NULL;
  1813. rcu_read_unlock();
  1814. }
  1815. unlock_page_cgroup(pc);
  1816. return mem;
  1817. }
  1818. /*
  1819. * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
  1820. * USED state. If already USED, uncharge and return.
  1821. */
  1822. static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
  1823. struct page_cgroup *pc,
  1824. enum charge_type ctype)
  1825. {
  1826. /* try_charge() can return NULL to *memcg, taking care of it. */
  1827. if (!mem)
  1828. return;
  1829. lock_page_cgroup(pc);
  1830. if (unlikely(PageCgroupUsed(pc))) {
  1831. unlock_page_cgroup(pc);
  1832. mem_cgroup_cancel_charge(mem);
  1833. return;
  1834. }
  1835. pc->mem_cgroup = mem;
  1836. /*
  1837. * We access a page_cgroup asynchronously without lock_page_cgroup().
  1838. * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
  1839. * is accessed after testing USED bit. To make pc->mem_cgroup visible
  1840. * before USED bit, we need memory barrier here.
  1841. * See mem_cgroup_add_lru_list(), etc.
  1842. */
  1843. smp_wmb();
  1844. switch (ctype) {
  1845. case MEM_CGROUP_CHARGE_TYPE_CACHE:
  1846. case MEM_CGROUP_CHARGE_TYPE_SHMEM:
  1847. SetPageCgroupCache(pc);
  1848. SetPageCgroupUsed(pc);
  1849. break;
  1850. case MEM_CGROUP_CHARGE_TYPE_MAPPED:
  1851. ClearPageCgroupCache(pc);
  1852. SetPageCgroupUsed(pc);
  1853. break;
  1854. default:
  1855. break;
  1856. }
  1857. mem_cgroup_charge_statistics(mem, pc, true);
  1858. unlock_page_cgroup(pc);
  1859. /*
  1860. * "charge_statistics" updated event counter. Then, check it.
  1861. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
  1862. * if they exceeds softlimit.
  1863. */
  1864. memcg_check_events(mem, pc->page);
  1865. }
  1866. /**
  1867. * __mem_cgroup_move_account - move account of the page
  1868. * @pc: page_cgroup of the page.
  1869. * @from: mem_cgroup which the page is moved from.
  1870. * @to: mem_cgroup which the page is moved to. @from != @to.
  1871. * @uncharge: whether we should call uncharge and css_put against @from.
  1872. *
  1873. * The caller must confirm following.
  1874. * - page is not on LRU (isolate_page() is useful.)
  1875. * - the pc is locked, used, and ->mem_cgroup points to @from.
  1876. *
  1877. * This function doesn't do "charge" nor css_get to new cgroup. It should be
  1878. * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
  1879. * true, this function does "uncharge" from old cgroup, but it doesn't if
  1880. * @uncharge is false, so a caller should do "uncharge".
  1881. */
  1882. static void __mem_cgroup_move_account(struct page_cgroup *pc,
  1883. struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
  1884. {
  1885. VM_BUG_ON(from == to);
  1886. VM_BUG_ON(PageLRU(pc->page));
  1887. VM_BUG_ON(!PageCgroupLocked(pc));
  1888. VM_BUG_ON(!PageCgroupUsed(pc));
  1889. VM_BUG_ON(pc->mem_cgroup != from);
  1890. if (PageCgroupFileMapped(pc)) {
  1891. /* Update mapped_file data for mem_cgroup */
  1892. preempt_disable();
  1893. __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  1894. __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  1895. preempt_enable();
  1896. }
  1897. mem_cgroup_charge_statistics(from, pc, false);
  1898. if (uncharge)
  1899. /* This is not "cancel", but cancel_charge does all we need. */
  1900. mem_cgroup_cancel_charge(from);
  1901. /* caller should have done css_get */
  1902. pc->mem_cgroup = to;
  1903. mem_cgroup_charge_statistics(to, pc, true);
  1904. /*
  1905. * We charges against "to" which may not have any tasks. Then, "to"
  1906. * can be under rmdir(). But in current implementation, caller of
  1907. * this function is just force_empty() and move charge, so it's
  1908. * garanteed that "to" is never removed. So, we don't check rmdir
  1909. * status here.
  1910. */
  1911. }
  1912. /*
  1913. * check whether the @pc is valid for moving account and call
  1914. * __mem_cgroup_move_account()
  1915. */
  1916. static int mem_cgroup_move_account(struct page_cgroup *pc,
  1917. struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
  1918. {
  1919. int ret = -EINVAL;
  1920. lock_page_cgroup(pc);
  1921. if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
  1922. __mem_cgroup_move_account(pc, from, to, uncharge);
  1923. ret = 0;
  1924. }
  1925. unlock_page_cgroup(pc);
  1926. /*
  1927. * check events
  1928. */
  1929. memcg_check_events(to, pc->page);
  1930. memcg_check_events(from, pc->page);
  1931. return ret;
  1932. }
  1933. /*
  1934. * move charges to its parent.
  1935. */
  1936. static int mem_cgroup_move_parent(struct page_cgroup *pc,
  1937. struct mem_cgroup *child,
  1938. gfp_t gfp_mask)
  1939. {
  1940. struct page *page = pc->page;
  1941. struct cgroup *cg = child->css.cgroup;
  1942. struct cgroup *pcg = cg->parent;
  1943. struct mem_cgroup *parent;
  1944. int ret;
  1945. /* Is ROOT ? */
  1946. if (!pcg)
  1947. return -EINVAL;
  1948. ret = -EBUSY;
  1949. if (!get_page_unless_zero(page))
  1950. goto out;
  1951. if (isolate_lru_page(page))
  1952. goto put;
  1953. parent = mem_cgroup_from_cont(pcg);
  1954. ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
  1955. if (ret || !parent)
  1956. goto put_back;
  1957. ret = mem_cgroup_move_account(pc, child, parent, true);
  1958. if (ret)
  1959. mem_cgroup_cancel_charge(parent);
  1960. put_back:
  1961. putback_lru_page(page);
  1962. put:
  1963. put_page(page);
  1964. out:
  1965. return ret;
  1966. }
  1967. /*
  1968. * Charge the memory controller for page usage.
  1969. * Return
  1970. * 0 if the charge was successful
  1971. * < 0 if the cgroup is over its limit
  1972. */
  1973. static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
  1974. gfp_t gfp_mask, enum charge_type ctype)
  1975. {
  1976. struct mem_cgroup *mem = NULL;
  1977. struct page_cgroup *pc;
  1978. int ret;
  1979. pc = lookup_page_cgroup(page);
  1980. /* can happen at boot */
  1981. if (unlikely(!pc))
  1982. return 0;
  1983. prefetchw(pc);
  1984. ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
  1985. if (ret || !mem)
  1986. return ret;
  1987. __mem_cgroup_commit_charge(mem, pc, ctype);
  1988. return 0;
  1989. }
  1990. int mem_cgroup_newpage_charge(struct page *page,
  1991. struct mm_struct *mm, gfp_t gfp_mask)
  1992. {
  1993. if (mem_cgroup_disabled())
  1994. return 0;
  1995. if (PageCompound(page))
  1996. return 0;
  1997. /*
  1998. * If already mapped, we don't have to account.
  1999. * If page cache, page->mapping has address_space.
  2000. * But page->mapping may have out-of-use anon_vma pointer,
  2001. * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
  2002. * is NULL.
  2003. */
  2004. if (page_mapped(page) || (page->mapping && !PageAnon(page)))
  2005. return 0;
  2006. if (unlikely(!mm))
  2007. mm = &init_mm;
  2008. return mem_cgroup_charge_common(page, mm, gfp_mask,
  2009. MEM_CGROUP_CHARGE_TYPE_MAPPED);
  2010. }
  2011. static void
  2012. __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
  2013. enum charge_type ctype);
  2014. int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  2015. gfp_t gfp_mask)
  2016. {
  2017. int ret;
  2018. if (mem_cgroup_disabled())
  2019. return 0;
  2020. if (PageCompound(page))
  2021. return 0;
  2022. /*
  2023. * Corner case handling. This is called from add_to_page_cache()
  2024. * in usual. But some FS (shmem) precharges this page before calling it
  2025. * and call add_to_page_cache() with GFP_NOWAIT.
  2026. *
  2027. * For GFP_NOWAIT case, the page may be pre-charged before calling
  2028. * add_to_page_cache(). (See shmem.c) check it here and avoid to call
  2029. * charge twice. (It works but has to pay a bit larger cost.)
  2030. * And when the page is SwapCache, it should take swap information
  2031. * into account. This is under lock_page() now.
  2032. */
  2033. if (!(gfp_mask & __GFP_WAIT)) {
  2034. struct page_cgroup *pc;
  2035. pc = lookup_page_cgroup(page);
  2036. if (!pc)
  2037. return 0;
  2038. lock_page_cgroup(pc);
  2039. if (PageCgroupUsed(pc)) {
  2040. unlock_page_cgroup(pc);
  2041. return 0;
  2042. }
  2043. unlock_page_cgroup(pc);
  2044. }
  2045. if (unlikely(!mm))
  2046. mm = &init_mm;
  2047. if (page_is_file_cache(page))
  2048. return mem_cgroup_charge_common(page, mm, gfp_mask,
  2049. MEM_CGROUP_CHARGE_TYPE_CACHE);
  2050. /* shmem */
  2051. if (PageSwapCache(page)) {
  2052. struct mem_cgroup *mem = NULL;
  2053. ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
  2054. if (!ret)
  2055. __mem_cgroup_commit_charge_swapin(page, mem,
  2056. MEM_CGROUP_CHARGE_TYPE_SHMEM);
  2057. } else
  2058. ret = mem_cgroup_charge_common(page, mm, gfp_mask,
  2059. MEM_CGROUP_CHARGE_TYPE_SHMEM);
  2060. return ret;
  2061. }
  2062. /*
  2063. * While swap-in, try_charge -> commit or cancel, the page is locked.
  2064. * And when try_charge() successfully returns, one refcnt to memcg without
  2065. * struct page_cgroup is acquired. This refcnt will be consumed by
  2066. * "commit()" or removed by "cancel()"
  2067. */
  2068. int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  2069. struct page *page,
  2070. gfp_t mask, struct mem_cgroup **ptr)
  2071. {
  2072. struct mem_cgroup *mem;
  2073. int ret;
  2074. if (mem_cgroup_disabled())
  2075. return 0;
  2076. if (!do_swap_account)
  2077. goto charge_cur_mm;
  2078. /*
  2079. * A racing thread's fault, or swapoff, may have already updated
  2080. * the pte, and even removed page from swap cache: in those cases
  2081. * do_swap_page()'s pte_same() test will fail; but there's also a
  2082. * KSM case which does need to charge the page.
  2083. */
  2084. if (!PageSwapCache(page))
  2085. goto charge_cur_mm;
  2086. mem = try_get_mem_cgroup_from_page(page);
  2087. if (!mem)
  2088. goto charge_cur_mm;
  2089. *ptr = mem;
  2090. ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
  2091. css_put(&mem->css);
  2092. return ret;
  2093. charge_cur_mm:
  2094. if (unlikely(!mm))
  2095. mm = &init_mm;
  2096. return __mem_cgroup_try_charge(mm, mask, ptr, true);
  2097. }
  2098. static void
  2099. __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
  2100. enum charge_type ctype)
  2101. {
  2102. struct page_cgroup *pc;
  2103. if (mem_cgroup_disabled())
  2104. return;
  2105. if (!ptr)
  2106. return;
  2107. cgroup_exclude_rmdir(&ptr->css);
  2108. pc = lookup_page_cgroup(page);
  2109. mem_cgroup_lru_del_before_commit_swapcache(page);
  2110. __mem_cgroup_commit_charge(ptr, pc, ctype);
  2111. mem_cgroup_lru_add_after_commit_swapcache(page);
  2112. /*
  2113. * Now swap is on-memory. This means this page may be
  2114. * counted both as mem and swap....double count.
  2115. * Fix it by uncharging from memsw. Basically, this SwapCache is stable
  2116. * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
  2117. * may call delete_from_swap_cache() before reach here.
  2118. */
  2119. if (do_swap_account && PageSwapCache(page)) {
  2120. swp_entry_t ent = {.val = page_private(page)};
  2121. unsigned short id;
  2122. struct mem_cgroup *memcg;
  2123. id = swap_cgroup_record(ent, 0);
  2124. rcu_read_lock();
  2125. memcg = mem_cgroup_lookup(id);
  2126. if (memcg) {
  2127. /*
  2128. * This recorded memcg can be obsolete one. So, avoid
  2129. * calling css_tryget
  2130. */
  2131. if (!mem_cgroup_is_root(memcg))
  2132. res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
  2133. mem_cgroup_swap_statistics(memcg, false);
  2134. mem_cgroup_put(memcg);
  2135. }
  2136. rcu_read_unlock();
  2137. }
  2138. /*
  2139. * At swapin, we may charge account against cgroup which has no tasks.
  2140. * So, rmdir()->pre_destroy() can be called while we do this charge.
  2141. * In that case, we need to call pre_destroy() again. check it here.
  2142. */
  2143. cgroup_release_and_wakeup_rmdir(&ptr->css);
  2144. }
  2145. void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
  2146. {
  2147. __mem_cgroup_commit_charge_swapin(page, ptr,
  2148. MEM_CGROUP_CHARGE_TYPE_MAPPED);
  2149. }
  2150. void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
  2151. {
  2152. if (mem_cgroup_disabled())
  2153. return;
  2154. if (!mem)
  2155. return;
  2156. mem_cgroup_cancel_charge(mem);
  2157. }
  2158. static void
  2159. __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
  2160. {
  2161. struct memcg_batch_info *batch = NULL;
  2162. bool uncharge_memsw = true;
  2163. /* If swapout, usage of swap doesn't decrease */
  2164. if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
  2165. uncharge_memsw = false;
  2166. batch = &current->memcg_batch;
  2167. /*
  2168. * In usual, we do css_get() when we remember memcg pointer.
  2169. * But in this case, we keep res->usage until end of a series of
  2170. * uncharges. Then, it's ok to ignore memcg's refcnt.
  2171. */
  2172. if (!batch->memcg)
  2173. batch->memcg = mem;
  2174. /*
  2175. * do_batch > 0 when unmapping pages or inode invalidate/truncate.
  2176. * In those cases, all pages freed continously can be expected to be in
  2177. * the same cgroup and we have chance to coalesce uncharges.
  2178. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
  2179. * because we want to do uncharge as soon as possible.
  2180. */
  2181. if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
  2182. goto direct_uncharge;
  2183. /*
  2184. * In typical case, batch->memcg == mem. This means we can
  2185. * merge a series of uncharges to an uncharge of res_counter.
  2186. * If not, we uncharge res_counter ony by one.
  2187. */
  2188. if (batch->memcg != mem)
  2189. goto direct_uncharge;
  2190. /* remember freed charge and uncharge it later */
  2191. batch->bytes += PAGE_SIZE;
  2192. if (uncharge_memsw)
  2193. batch->memsw_bytes += PAGE_SIZE;
  2194. return;
  2195. direct_uncharge:
  2196. res_counter_uncharge(&mem->res, PAGE_SIZE);
  2197. if (uncharge_memsw)
  2198. res_counter_uncharge(&mem->memsw, PAGE_SIZE);
  2199. if (unlikely(batch->memcg != mem))
  2200. memcg_oom_recover(mem);
  2201. return;
  2202. }
  2203. /*
  2204. * uncharge if !page_mapped(page)
  2205. */
  2206. static struct mem_cgroup *
  2207. __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
  2208. {
  2209. struct page_cgroup *pc;
  2210. struct mem_cgroup *mem = NULL;
  2211. if (mem_cgroup_disabled())
  2212. return NULL;
  2213. if (PageSwapCache(page))
  2214. return NULL;
  2215. /*
  2216. * Check if our page_cgroup is valid
  2217. */
  2218. pc = lookup_page_cgroup(page);
  2219. if (unlikely(!pc || !PageCgroupUsed(pc)))
  2220. return NULL;
  2221. lock_page_cgroup(pc);
  2222. mem = pc->mem_cgroup;
  2223. if (!PageCgroupUsed(pc))
  2224. goto unlock_out;
  2225. switch (ctype) {
  2226. case MEM_CGROUP_CHARGE_TYPE_MAPPED:
  2227. case MEM_CGROUP_CHARGE_TYPE_DROP:
  2228. /* See mem_cgroup_prepare_migration() */
  2229. if (page_mapped(page) || PageCgroupMigration(pc))
  2230. goto unlock_out;
  2231. break;
  2232. case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
  2233. if (!PageAnon(page)) { /* Shared memory */
  2234. if (page->mapping && !page_is_file_cache(page))
  2235. goto unlock_out;
  2236. } else if (page_mapped(page)) /* Anon */
  2237. goto unlock_out;
  2238. break;
  2239. default:
  2240. break;
  2241. }
  2242. mem_cgroup_charge_statistics(mem, pc, false);
  2243. ClearPageCgroupUsed(pc);
  2244. /*
  2245. * pc->mem_cgroup is not cleared here. It will be accessed when it's
  2246. * freed from LRU. This is safe because uncharged page is expected not
  2247. * to be reused (freed soon). Exception is SwapCache, it's handled by
  2248. * special functions.
  2249. */
  2250. unlock_page_cgroup(pc);
  2251. /*
  2252. * even after unlock, we have mem->res.usage here and this memcg
  2253. * will never be freed.
  2254. */
  2255. memcg_check_events(mem, page);
  2256. if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
  2257. mem_cgroup_swap_statistics(mem, true);
  2258. mem_cgroup_get(mem);
  2259. }
  2260. if (!mem_cgroup_is_root(mem))
  2261. __do_uncharge(mem, ctype);
  2262. return mem;
  2263. unlock_out:
  2264. unlock_page_cgroup(pc);
  2265. return NULL;
  2266. }
  2267. void mem_cgroup_uncharge_page(struct page *page)
  2268. {
  2269. /* early check. */
  2270. if (page_mapped(page))
  2271. return;
  2272. if (page->mapping && !PageAnon(page))
  2273. return;
  2274. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
  2275. }
  2276. void mem_cgroup_uncharge_cache_page(struct page *page)
  2277. {
  2278. VM_BUG_ON(page_mapped(page));
  2279. VM_BUG_ON(page->mapping);
  2280. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
  2281. }
  2282. /*
  2283. * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
  2284. * In that cases, pages are freed continuously and we can expect pages
  2285. * are in the same memcg. All these calls itself limits the number of
  2286. * pages freed at once, then uncharge_start/end() is called properly.
  2287. * This may be called prural(2) times in a context,
  2288. */
  2289. void mem_cgroup_uncharge_start(void)
  2290. {
  2291. current->memcg_batch.do_batch++;
  2292. /* We can do nest. */
  2293. if (current->memcg_batch.do_batch == 1) {
  2294. current->memcg_batch.memcg = NULL;
  2295. current->memcg_batch.bytes = 0;
  2296. current->memcg_batch.memsw_bytes = 0;
  2297. }
  2298. }
  2299. void mem_cgroup_uncharge_end(void)
  2300. {
  2301. struct memcg_batch_info *batch = &current->memcg_batch;
  2302. if (!batch->do_batch)
  2303. return;
  2304. batch->do_batch--;
  2305. if (batch->do_batch) /* If stacked, do nothing. */
  2306. return;
  2307. if (!batch->memcg)
  2308. return;
  2309. /*
  2310. * This "batch->memcg" is valid without any css_get/put etc...
  2311. * bacause we hide charges behind us.
  2312. */
  2313. if (batch->bytes)
  2314. res_counter_uncharge(&batch->memcg->res, batch->bytes);
  2315. if (batch->memsw_bytes)
  2316. res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
  2317. memcg_oom_recover(batch->memcg);
  2318. /* forget this pointer (for sanity check) */
  2319. batch->memcg = NULL;
  2320. }
  2321. #ifdef CONFIG_SWAP
  2322. /*
  2323. * called after __delete_from_swap_cache() and drop "page" account.
  2324. * memcg information is recorded to swap_cgroup of "ent"
  2325. */
  2326. void
  2327. mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
  2328. {
  2329. struct mem_cgroup *memcg;
  2330. int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
  2331. if (!swapout) /* this was a swap cache but the swap is unused ! */
  2332. ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
  2333. memcg = __mem_cgroup_uncharge_common(page, ctype);
  2334. /*
  2335. * record memcg information, if swapout && memcg != NULL,
  2336. * mem_cgroup_get() was called in uncharge().
  2337. */
  2338. if (do_swap_account && swapout && memcg)
  2339. swap_cgroup_record(ent, css_id(&memcg->css));
  2340. }
  2341. #endif
  2342. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  2343. /*
  2344. * called from swap_entry_free(). remove record in swap_cgroup and
  2345. * uncharge "memsw" account.
  2346. */
  2347. void mem_cgroup_uncharge_swap(swp_entry_t ent)
  2348. {
  2349. struct mem_cgroup *memcg;
  2350. unsigned short id;
  2351. if (!do_swap_account)
  2352. return;
  2353. id = swap_cgroup_record(ent, 0);
  2354. rcu_read_lock();
  2355. memcg = mem_cgroup_lookup(id);
  2356. if (memcg) {
  2357. /*
  2358. * We uncharge this because swap is freed.
  2359. * This memcg can be obsolete one. We avoid calling css_tryget
  2360. */
  2361. if (!mem_cgroup_is_root(memcg))
  2362. res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
  2363. mem_cgroup_swap_statistics(memcg, false);
  2364. mem_cgroup_put(memcg);
  2365. }
  2366. rcu_read_unlock();
  2367. }
  2368. /**
  2369. * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
  2370. * @entry: swap entry to be moved
  2371. * @from: mem_cgroup which the entry is moved from
  2372. * @to: mem_cgroup which the entry is moved to
  2373. * @need_fixup: whether we should fixup res_counters and refcounts.
  2374. *
  2375. * It succeeds only when the swap_cgroup's record for this entry is the same
  2376. * as the mem_cgroup's id of @from.
  2377. *
  2378. * Returns 0 on success, -EINVAL on failure.
  2379. *
  2380. * The caller must have charged to @to, IOW, called res_counter_charge() about
  2381. * both res and memsw, and called css_get().
  2382. */
  2383. static int mem_cgroup_move_swap_account(swp_entry_t entry,
  2384. struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
  2385. {
  2386. unsigned short old_id, new_id;
  2387. old_id = css_id(&from->css);
  2388. new_id = css_id(&to->css);
  2389. if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
  2390. mem_cgroup_swap_statistics(from, false);
  2391. mem_cgroup_swap_statistics(to, true);
  2392. /*
  2393. * This function is only called from task migration context now.
  2394. * It postpones res_counter and refcount handling till the end
  2395. * of task migration(mem_cgroup_clear_mc()) for performance
  2396. * improvement. But we cannot postpone mem_cgroup_get(to)
  2397. * because if the process that has been moved to @to does
  2398. * swap-in, the refcount of @to might be decreased to 0.
  2399. */
  2400. mem_cgroup_get(to);
  2401. if (need_fixup) {
  2402. if (!mem_cgroup_is_root(from))
  2403. res_counter_uncharge(&from->memsw, PAGE_SIZE);
  2404. mem_cgroup_put(from);
  2405. /*
  2406. * we charged both to->res and to->memsw, so we should
  2407. * uncharge to->res.
  2408. */
  2409. if (!mem_cgroup_is_root(to))
  2410. res_counter_uncharge(&to->res, PAGE_SIZE);
  2411. }
  2412. return 0;
  2413. }
  2414. return -EINVAL;
  2415. }
  2416. #else
  2417. static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
  2418. struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
  2419. {
  2420. return -EINVAL;
  2421. }
  2422. #endif
  2423. /*
  2424. * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
  2425. * page belongs to.
  2426. */
  2427. int mem_cgroup_prepare_migration(struct page *page,
  2428. struct page *newpage, struct mem_cgroup **ptr)
  2429. {
  2430. struct page_cgroup *pc;
  2431. struct mem_cgroup *mem = NULL;
  2432. enum charge_type ctype;
  2433. int ret = 0;
  2434. if (mem_cgroup_disabled())
  2435. return 0;
  2436. pc = lookup_page_cgroup(page);
  2437. lock_page_cgroup(pc);
  2438. if (PageCgroupUsed(pc)) {
  2439. mem = pc->mem_cgroup;
  2440. css_get(&mem->css);
  2441. /*
  2442. * At migrating an anonymous page, its mapcount goes down
  2443. * to 0 and uncharge() will be called. But, even if it's fully
  2444. * unmapped, migration may fail and this page has to be
  2445. * charged again. We set MIGRATION flag here and delay uncharge
  2446. * until end_migration() is called
  2447. *
  2448. * Corner Case Thinking
  2449. * A)
  2450. * When the old page was mapped as Anon and it's unmap-and-freed
  2451. * while migration was ongoing.
  2452. * If unmap finds the old page, uncharge() of it will be delayed
  2453. * until end_migration(). If unmap finds a new page, it's
  2454. * uncharged when it make mapcount to be 1->0. If unmap code
  2455. * finds swap_migration_entry, the new page will not be mapped
  2456. * and end_migration() will find it(mapcount==0).
  2457. *
  2458. * B)
  2459. * When the old page was mapped but migraion fails, the kernel
  2460. * remaps it. A charge for it is kept by MIGRATION flag even
  2461. * if mapcount goes down to 0. We can do remap successfully
  2462. * without charging it again.
  2463. *
  2464. * C)
  2465. * The "old" page is under lock_page() until the end of
  2466. * migration, so, the old page itself will not be swapped-out.
  2467. * If the new page is swapped out before end_migraton, our
  2468. * hook to usual swap-out path will catch the event.
  2469. */
  2470. if (PageAnon(page))
  2471. SetPageCgroupMigration(pc);
  2472. }
  2473. unlock_page_cgroup(pc);
  2474. /*
  2475. * If the page is not charged at this point,
  2476. * we return here.
  2477. */
  2478. if (!mem)
  2479. return 0;
  2480. *ptr = mem;
  2481. ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
  2482. css_put(&mem->css);/* drop extra refcnt */
  2483. if (ret || *ptr == NULL) {
  2484. if (PageAnon(page)) {
  2485. lock_page_cgroup(pc);
  2486. ClearPageCgroupMigration(pc);
  2487. unlock_page_cgroup(pc);
  2488. /*
  2489. * The old page may be fully unmapped while we kept it.
  2490. */
  2491. mem_cgroup_uncharge_page(page);
  2492. }
  2493. return -ENOMEM;
  2494. }
  2495. /*
  2496. * We charge new page before it's used/mapped. So, even if unlock_page()
  2497. * is called before end_migration, we can catch all events on this new
  2498. * page. In the case new page is migrated but not remapped, new page's
  2499. * mapcount will be finally 0 and we call uncharge in end_migration().
  2500. */
  2501. pc = lookup_page_cgroup(newpage);
  2502. if (PageAnon(page))
  2503. ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
  2504. else if (page_is_file_cache(page))
  2505. ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  2506. else
  2507. ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
  2508. __mem_cgroup_commit_charge(mem, pc, ctype);
  2509. return ret;
  2510. }
  2511. /* remove redundant charge if migration failed*/
  2512. void mem_cgroup_end_migration(struct mem_cgroup *mem,
  2513. struct page *oldpage, struct page *newpage)
  2514. {
  2515. struct page *used, *unused;
  2516. struct page_cgroup *pc;
  2517. if (!mem)
  2518. return;
  2519. /* blocks rmdir() */
  2520. cgroup_exclude_rmdir(&mem->css);
  2521. /* at migration success, oldpage->mapping is NULL. */
  2522. if (oldpage->mapping) {
  2523. used = oldpage;
  2524. unused = newpage;
  2525. } else {
  2526. used = newpage;
  2527. unused = oldpage;
  2528. }
  2529. /*
  2530. * We disallowed uncharge of pages under migration because mapcount
  2531. * of the page goes down to zero, temporarly.
  2532. * Clear the flag and check the page should be charged.
  2533. */
  2534. pc = lookup_page_cgroup(oldpage);
  2535. lock_page_cgroup(pc);
  2536. ClearPageCgroupMigration(pc);
  2537. unlock_page_cgroup(pc);
  2538. __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
  2539. /*
  2540. * If a page is a file cache, radix-tree replacement is very atomic
  2541. * and we can skip this check. When it was an Anon page, its mapcount
  2542. * goes down to 0. But because we added MIGRATION flage, it's not
  2543. * uncharged yet. There are several case but page->mapcount check
  2544. * and USED bit check in mem_cgroup_uncharge_page() will do enough
  2545. * check. (see prepare_charge() also)
  2546. */
  2547. if (PageAnon(used))
  2548. mem_cgroup_uncharge_page(used);
  2549. /*
  2550. * At migration, we may charge account against cgroup which has no
  2551. * tasks.
  2552. * So, rmdir()->pre_destroy() can be called while we do this charge.
  2553. * In that case, we need to call pre_destroy() again. check it here.
  2554. */
  2555. cgroup_release_and_wakeup_rmdir(&mem->css);
  2556. }
  2557. /*
  2558. * A call to try to shrink memory usage on charge failure at shmem's swapin.
  2559. * Calling hierarchical_reclaim is not enough because we should update
  2560. * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
  2561. * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
  2562. * not from the memcg which this page would be charged to.
  2563. * try_charge_swapin does all of these works properly.
  2564. */
  2565. int mem_cgroup_shmem_charge_fallback(struct page *page,
  2566. struct mm_struct *mm,
  2567. gfp_t gfp_mask)
  2568. {
  2569. struct mem_cgroup *mem = NULL;
  2570. int ret;
  2571. if (mem_cgroup_disabled())
  2572. return 0;
  2573. ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
  2574. if (!ret)
  2575. mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
  2576. return ret;
  2577. }
  2578. static DEFINE_MUTEX(set_limit_mutex);
  2579. static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
  2580. unsigned long long val)
  2581. {
  2582. int retry_count;
  2583. u64 memswlimit, memlimit;
  2584. int ret = 0;
  2585. int children = mem_cgroup_count_children(memcg);
  2586. u64 curusage, oldusage;
  2587. int enlarge;
  2588. /*
  2589. * For keeping hierarchical_reclaim simple, how long we should retry
  2590. * is depends on callers. We set our retry-count to be function
  2591. * of # of children which we should visit in this loop.
  2592. */
  2593. retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
  2594. oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  2595. enlarge = 0;
  2596. while (retry_count) {
  2597. if (signal_pending(current)) {
  2598. ret = -EINTR;
  2599. break;
  2600. }
  2601. /*
  2602. * Rather than hide all in some function, I do this in
  2603. * open coded manner. You see what this really does.
  2604. * We have to guarantee mem->res.limit < mem->memsw.limit.
  2605. */
  2606. mutex_lock(&set_limit_mutex);
  2607. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  2608. if (memswlimit < val) {
  2609. ret = -EINVAL;
  2610. mutex_unlock(&set_limit_mutex);
  2611. break;
  2612. }
  2613. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  2614. if (memlimit < val)
  2615. enlarge = 1;
  2616. ret = res_counter_set_limit(&memcg->res, val);
  2617. if (!ret) {
  2618. if (memswlimit == val)
  2619. memcg->memsw_is_minimum = true;
  2620. else
  2621. memcg->memsw_is_minimum = false;
  2622. }
  2623. mutex_unlock(&set_limit_mutex);
  2624. if (!ret)
  2625. break;
  2626. mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
  2627. MEM_CGROUP_RECLAIM_SHRINK);
  2628. curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  2629. /* Usage is reduced ? */
  2630. if (curusage >= oldusage)
  2631. retry_count--;
  2632. else
  2633. oldusage = curusage;
  2634. }
  2635. if (!ret && enlarge)
  2636. memcg_oom_recover(memcg);
  2637. return ret;
  2638. }
  2639. static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
  2640. unsigned long long val)
  2641. {
  2642. int retry_count;
  2643. u64 memlimit, memswlimit, oldusage, curusage;
  2644. int children = mem_cgroup_count_children(memcg);
  2645. int ret = -EBUSY;
  2646. int enlarge = 0;
  2647. /* see mem_cgroup_resize_res_limit */
  2648. retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
  2649. oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  2650. while (retry_count) {
  2651. if (signal_pending(current)) {
  2652. ret = -EINTR;
  2653. break;
  2654. }
  2655. /*
  2656. * Rather than hide all in some function, I do this in
  2657. * open coded manner. You see what this really does.
  2658. * We have to guarantee mem->res.limit < mem->memsw.limit.
  2659. */
  2660. mutex_lock(&set_limit_mutex);
  2661. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  2662. if (memlimit > val) {
  2663. ret = -EINVAL;
  2664. mutex_unlock(&set_limit_mutex);
  2665. break;
  2666. }
  2667. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  2668. if (memswlimit < val)
  2669. enlarge = 1;
  2670. ret = res_counter_set_limit(&memcg->memsw, val);
  2671. if (!ret) {
  2672. if (memlimit == val)
  2673. memcg->memsw_is_minimum = true;
  2674. else
  2675. memcg->memsw_is_minimum = false;
  2676. }
  2677. mutex_unlock(&set_limit_mutex);
  2678. if (!ret)
  2679. break;
  2680. mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
  2681. MEM_CGROUP_RECLAIM_NOSWAP |
  2682. MEM_CGROUP_RECLAIM_SHRINK);
  2683. curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  2684. /* Usage is reduced ? */
  2685. if (curusage >= oldusage)
  2686. retry_count--;
  2687. else
  2688. oldusage = curusage;
  2689. }
  2690. if (!ret && enlarge)
  2691. memcg_oom_recover(memcg);
  2692. return ret;
  2693. }
  2694. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  2695. gfp_t gfp_mask)
  2696. {
  2697. unsigned long nr_reclaimed = 0;
  2698. struct mem_cgroup_per_zone *mz, *next_mz = NULL;
  2699. unsigned long reclaimed;
  2700. int loop = 0;
  2701. struct mem_cgroup_tree_per_zone *mctz;
  2702. unsigned long long excess;
  2703. if (order > 0)
  2704. return 0;
  2705. mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
  2706. /*
  2707. * This loop can run a while, specially if mem_cgroup's continuously
  2708. * keep exceeding their soft limit and putting the system under
  2709. * pressure
  2710. */
  2711. do {
  2712. if (next_mz)
  2713. mz = next_mz;
  2714. else
  2715. mz = mem_cgroup_largest_soft_limit_node(mctz);
  2716. if (!mz)
  2717. break;
  2718. reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
  2719. gfp_mask,
  2720. MEM_CGROUP_RECLAIM_SOFT);
  2721. nr_reclaimed += reclaimed;
  2722. spin_lock(&mctz->lock);
  2723. /*
  2724. * If we failed to reclaim anything from this memory cgroup
  2725. * it is time to move on to the next cgroup
  2726. */
  2727. next_mz = NULL;
  2728. if (!reclaimed) {
  2729. do {
  2730. /*
  2731. * Loop until we find yet another one.
  2732. *
  2733. * By the time we get the soft_limit lock
  2734. * again, someone might have aded the
  2735. * group back on the RB tree. Iterate to
  2736. * make sure we get a different mem.
  2737. * mem_cgroup_largest_soft_limit_node returns
  2738. * NULL if no other cgroup is present on
  2739. * the tree
  2740. */
  2741. next_mz =
  2742. __mem_cgroup_largest_soft_limit_node(mctz);
  2743. if (next_mz == mz) {
  2744. css_put(&next_mz->mem->css);
  2745. next_mz = NULL;
  2746. } else /* next_mz == NULL or other memcg */
  2747. break;
  2748. } while (1);
  2749. }
  2750. __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
  2751. excess = res_counter_soft_limit_excess(&mz->mem->res);
  2752. /*
  2753. * One school of thought says that we should not add
  2754. * back the node to the tree if reclaim returns 0.
  2755. * But our reclaim could return 0, simply because due
  2756. * to priority we are exposing a smaller subset of
  2757. * memory to reclaim from. Consider this as a longer
  2758. * term TODO.
  2759. */
  2760. /* If excess == 0, no tree ops */
  2761. __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
  2762. spin_unlock(&mctz->lock);
  2763. css_put(&mz->mem->css);
  2764. loop++;
  2765. /*
  2766. * Could not reclaim anything and there are no more
  2767. * mem cgroups to try or we seem to be looping without
  2768. * reclaiming anything.
  2769. */
  2770. if (!nr_reclaimed &&
  2771. (next_mz == NULL ||
  2772. loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
  2773. break;
  2774. } while (!nr_reclaimed);
  2775. if (next_mz)
  2776. css_put(&next_mz->mem->css);
  2777. return nr_reclaimed;
  2778. }
  2779. /*
  2780. * This routine traverse page_cgroup in given list and drop them all.
  2781. * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  2782. */
  2783. static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
  2784. int node, int zid, enum lru_list lru)
  2785. {
  2786. struct zone *zone;
  2787. struct mem_cgroup_per_zone *mz;
  2788. struct page_cgroup *pc, *busy;
  2789. unsigned long flags, loop;
  2790. struct list_head *list;
  2791. int ret = 0;
  2792. zone = &NODE_DATA(node)->node_zones[zid];
  2793. mz = mem_cgroup_zoneinfo(mem, node, zid);
  2794. list = &mz->lists[lru];
  2795. loop = MEM_CGROUP_ZSTAT(mz, lru);
  2796. /* give some margin against EBUSY etc...*/
  2797. loop += 256;
  2798. busy = NULL;
  2799. while (loop--) {
  2800. ret = 0;
  2801. spin_lock_irqsave(&zone->lru_lock, flags);
  2802. if (list_empty(list)) {
  2803. spin_unlock_irqrestore(&zone->lru_lock, flags);
  2804. break;
  2805. }
  2806. pc = list_entry(list->prev, struct page_cgroup, lru);
  2807. if (busy == pc) {
  2808. list_move(&pc->lru, list);
  2809. busy = NULL;
  2810. spin_unlock_irqrestore(&zone->lru_lock, flags);
  2811. continue;
  2812. }
  2813. spin_unlock_irqrestore(&zone->lru_lock, flags);
  2814. ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
  2815. if (ret == -ENOMEM)
  2816. break;
  2817. if (ret == -EBUSY || ret == -EINVAL) {
  2818. /* found lock contention or "pc" is obsolete. */
  2819. busy = pc;
  2820. cond_resched();
  2821. } else
  2822. busy = NULL;
  2823. }
  2824. if (!ret && !list_empty(list))
  2825. return -EBUSY;
  2826. return ret;
  2827. }
  2828. /*
  2829. * make mem_cgroup's charge to be 0 if there is no task.
  2830. * This enables deleting this mem_cgroup.
  2831. */
  2832. static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
  2833. {
  2834. int ret;
  2835. int node, zid, shrink;
  2836. int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2837. struct cgroup *cgrp = mem->css.cgroup;
  2838. css_get(&mem->css);
  2839. shrink = 0;
  2840. /* should free all ? */
  2841. if (free_all)
  2842. goto try_to_free;
  2843. move_account:
  2844. do {
  2845. ret = -EBUSY;
  2846. if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
  2847. goto out;
  2848. ret = -EINTR;
  2849. if (signal_pending(current))
  2850. goto out;
  2851. /* This is for making all *used* pages to be on LRU. */
  2852. lru_add_drain_all();
  2853. drain_all_stock_sync();
  2854. ret = 0;
  2855. mem_cgroup_start_move(mem);
  2856. for_each_node_state(node, N_HIGH_MEMORY) {
  2857. for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
  2858. enum lru_list l;
  2859. for_each_lru(l) {
  2860. ret = mem_cgroup_force_empty_list(mem,
  2861. node, zid, l);
  2862. if (ret)
  2863. break;
  2864. }
  2865. }
  2866. if (ret)
  2867. break;
  2868. }
  2869. mem_cgroup_end_move(mem);
  2870. memcg_oom_recover(mem);
  2871. /* it seems parent cgroup doesn't have enough mem */
  2872. if (ret == -ENOMEM)
  2873. goto try_to_free;
  2874. cond_resched();
  2875. /* "ret" should also be checked to ensure all lists are empty. */
  2876. } while (mem->res.usage > 0 || ret);
  2877. out:
  2878. css_put(&mem->css);
  2879. return ret;
  2880. try_to_free:
  2881. /* returns EBUSY if there is a task or if we come here twice. */
  2882. if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
  2883. ret = -EBUSY;
  2884. goto out;
  2885. }
  2886. /* we call try-to-free pages for make this cgroup empty */
  2887. lru_add_drain_all();
  2888. /* try to free all pages in this cgroup */
  2889. shrink = 1;
  2890. while (nr_retries && mem->res.usage > 0) {
  2891. int progress;
  2892. if (signal_pending(current)) {
  2893. ret = -EINTR;
  2894. goto out;
  2895. }
  2896. progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
  2897. false, get_swappiness(mem));
  2898. if (!progress) {
  2899. nr_retries--;
  2900. /* maybe some writeback is necessary */
  2901. congestion_wait(BLK_RW_ASYNC, HZ/10);
  2902. }
  2903. }
  2904. lru_add_drain();
  2905. /* try move_account...there may be some *locked* pages. */
  2906. goto move_account;
  2907. }
  2908. int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
  2909. {
  2910. return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
  2911. }
  2912. static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
  2913. {
  2914. return mem_cgroup_from_cont(cont)->use_hierarchy;
  2915. }
  2916. static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
  2917. u64 val)
  2918. {
  2919. int retval = 0;
  2920. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  2921. struct cgroup *parent = cont->parent;
  2922. struct mem_cgroup *parent_mem = NULL;
  2923. if (parent)
  2924. parent_mem = mem_cgroup_from_cont(parent);
  2925. cgroup_lock();
  2926. /*
  2927. * If parent's use_hierarchy is set, we can't make any modifications
  2928. * in the child subtrees. If it is unset, then the change can
  2929. * occur, provided the current cgroup has no children.
  2930. *
  2931. * For the root cgroup, parent_mem is NULL, we allow value to be
  2932. * set if there are no children.
  2933. */
  2934. if ((!parent_mem || !parent_mem->use_hierarchy) &&
  2935. (val == 1 || val == 0)) {
  2936. if (list_empty(&cont->children))
  2937. mem->use_hierarchy = val;
  2938. else
  2939. retval = -EBUSY;
  2940. } else
  2941. retval = -EINVAL;
  2942. cgroup_unlock();
  2943. return retval;
  2944. }
  2945. static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
  2946. enum mem_cgroup_stat_index idx)
  2947. {
  2948. struct mem_cgroup *iter;
  2949. s64 val = 0;
  2950. /* each per cpu's value can be minus.Then, use s64 */
  2951. for_each_mem_cgroup_tree(iter, mem)
  2952. val += mem_cgroup_read_stat(iter, idx);
  2953. if (val < 0) /* race ? */
  2954. val = 0;
  2955. return val;
  2956. }
  2957. static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
  2958. {
  2959. u64 val;
  2960. if (!mem_cgroup_is_root(mem)) {
  2961. if (!swap)
  2962. return res_counter_read_u64(&mem->res, RES_USAGE);
  2963. else
  2964. return res_counter_read_u64(&mem->memsw, RES_USAGE);
  2965. }
  2966. val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
  2967. val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
  2968. if (swap)
  2969. val += mem_cgroup_get_recursive_idx_stat(mem,
  2970. MEM_CGROUP_STAT_SWAPOUT);
  2971. return val << PAGE_SHIFT;
  2972. }
  2973. static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
  2974. {
  2975. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  2976. u64 val;
  2977. int type, name;
  2978. type = MEMFILE_TYPE(cft->private);
  2979. name = MEMFILE_ATTR(cft->private);
  2980. switch (type) {
  2981. case _MEM:
  2982. if (name == RES_USAGE)
  2983. val = mem_cgroup_usage(mem, false);
  2984. else
  2985. val = res_counter_read_u64(&mem->res, name);
  2986. break;
  2987. case _MEMSWAP:
  2988. if (name == RES_USAGE)
  2989. val = mem_cgroup_usage(mem, true);
  2990. else
  2991. val = res_counter_read_u64(&mem->memsw, name);
  2992. break;
  2993. default:
  2994. BUG();
  2995. break;
  2996. }
  2997. return val;
  2998. }
  2999. /*
  3000. * The user of this function is...
  3001. * RES_LIMIT.
  3002. */
  3003. static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  3004. const char *buffer)
  3005. {
  3006. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  3007. int type, name;
  3008. unsigned long long val;
  3009. int ret;
  3010. type = MEMFILE_TYPE(cft->private);
  3011. name = MEMFILE_ATTR(cft->private);
  3012. switch (name) {
  3013. case RES_LIMIT:
  3014. if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
  3015. ret = -EINVAL;
  3016. break;
  3017. }
  3018. /* This function does all necessary parse...reuse it */
  3019. ret = res_counter_memparse_write_strategy(buffer, &val);
  3020. if (ret)
  3021. break;
  3022. if (type == _MEM)
  3023. ret = mem_cgroup_resize_limit(memcg, val);
  3024. else
  3025. ret = mem_cgroup_resize_memsw_limit(memcg, val);
  3026. break;
  3027. case RES_SOFT_LIMIT:
  3028. ret = res_counter_memparse_write_strategy(buffer, &val);
  3029. if (ret)
  3030. break;
  3031. /*
  3032. * For memsw, soft limits are hard to implement in terms
  3033. * of semantics, for now, we support soft limits for
  3034. * control without swap
  3035. */
  3036. if (type == _MEM)
  3037. ret = res_counter_set_soft_limit(&memcg->res, val);
  3038. else
  3039. ret = -EINVAL;
  3040. break;
  3041. default:
  3042. ret = -EINVAL; /* should be BUG() ? */
  3043. break;
  3044. }
  3045. return ret;
  3046. }
  3047. static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
  3048. unsigned long long *mem_limit, unsigned long long *memsw_limit)
  3049. {
  3050. struct cgroup *cgroup;
  3051. unsigned long long min_limit, min_memsw_limit, tmp;
  3052. min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3053. min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3054. cgroup = memcg->css.cgroup;
  3055. if (!memcg->use_hierarchy)
  3056. goto out;
  3057. while (cgroup->parent) {
  3058. cgroup = cgroup->parent;
  3059. memcg = mem_cgroup_from_cont(cgroup);
  3060. if (!memcg->use_hierarchy)
  3061. break;
  3062. tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3063. min_limit = min(min_limit, tmp);
  3064. tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3065. min_memsw_limit = min(min_memsw_limit, tmp);
  3066. }
  3067. out:
  3068. *mem_limit = min_limit;
  3069. *memsw_limit = min_memsw_limit;
  3070. return;
  3071. }
  3072. static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  3073. {
  3074. struct mem_cgroup *mem;
  3075. int type, name;
  3076. mem = mem_cgroup_from_cont(cont);
  3077. type = MEMFILE_TYPE(event);
  3078. name = MEMFILE_ATTR(event);
  3079. switch (name) {
  3080. case RES_MAX_USAGE:
  3081. if (type == _MEM)
  3082. res_counter_reset_max(&mem->res);
  3083. else
  3084. res_counter_reset_max(&mem->memsw);
  3085. break;
  3086. case RES_FAILCNT:
  3087. if (type == _MEM)
  3088. res_counter_reset_failcnt(&mem->res);
  3089. else
  3090. res_counter_reset_failcnt(&mem->memsw);
  3091. break;
  3092. }
  3093. return 0;
  3094. }
  3095. static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
  3096. struct cftype *cft)
  3097. {
  3098. return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
  3099. }
  3100. #ifdef CONFIG_MMU
  3101. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  3102. struct cftype *cft, u64 val)
  3103. {
  3104. struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  3105. if (val >= (1 << NR_MOVE_TYPE))
  3106. return -EINVAL;
  3107. /*
  3108. * We check this value several times in both in can_attach() and
  3109. * attach(), so we need cgroup lock to prevent this value from being
  3110. * inconsistent.
  3111. */
  3112. cgroup_lock();
  3113. mem->move_charge_at_immigrate = val;
  3114. cgroup_unlock();
  3115. return 0;
  3116. }
  3117. #else
  3118. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  3119. struct cftype *cft, u64 val)
  3120. {
  3121. return -ENOSYS;
  3122. }
  3123. #endif
  3124. /* For read statistics */
  3125. enum {
  3126. MCS_CACHE,
  3127. MCS_RSS,
  3128. MCS_FILE_MAPPED,
  3129. MCS_PGPGIN,
  3130. MCS_PGPGOUT,
  3131. MCS_SWAP,
  3132. MCS_INACTIVE_ANON,
  3133. MCS_ACTIVE_ANON,
  3134. MCS_INACTIVE_FILE,
  3135. MCS_ACTIVE_FILE,
  3136. MCS_UNEVICTABLE,
  3137. NR_MCS_STAT,
  3138. };
  3139. struct mcs_total_stat {
  3140. s64 stat[NR_MCS_STAT];
  3141. };
  3142. struct {
  3143. char *local_name;
  3144. char *total_name;
  3145. } memcg_stat_strings[NR_MCS_STAT] = {
  3146. {"cache", "total_cache"},
  3147. {"rss", "total_rss"},
  3148. {"mapped_file", "total_mapped_file"},
  3149. {"pgpgin", "total_pgpgin"},
  3150. {"pgpgout", "total_pgpgout"},
  3151. {"swap", "total_swap"},
  3152. {"inactive_anon", "total_inactive_anon"},
  3153. {"active_anon", "total_active_anon"},
  3154. {"inactive_file", "total_inactive_file"},
  3155. {"active_file", "total_active_file"},
  3156. {"unevictable", "total_unevictable"}
  3157. };
  3158. static void
  3159. mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
  3160. {
  3161. s64 val;
  3162. /* per cpu stat */
  3163. val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
  3164. s->stat[MCS_CACHE] += val * PAGE_SIZE;
  3165. val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
  3166. s->stat[MCS_RSS] += val * PAGE_SIZE;
  3167. val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
  3168. s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
  3169. val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
  3170. s->stat[MCS_PGPGIN] += val;
  3171. val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
  3172. s->stat[MCS_PGPGOUT] += val;
  3173. if (do_swap_account) {
  3174. val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
  3175. s->stat[MCS_SWAP] += val * PAGE_SIZE;
  3176. }
  3177. /* per zone stat */
  3178. val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
  3179. s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
  3180. val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
  3181. s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
  3182. val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
  3183. s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
  3184. val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
  3185. s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
  3186. val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
  3187. s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
  3188. }
  3189. static void
  3190. mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
  3191. {
  3192. struct mem_cgroup *iter;
  3193. for_each_mem_cgroup_tree(iter, mem)
  3194. mem_cgroup_get_local_stat(iter, s);
  3195. }
  3196. static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
  3197. struct cgroup_map_cb *cb)
  3198. {
  3199. struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
  3200. struct mcs_total_stat mystat;
  3201. int i;
  3202. memset(&mystat, 0, sizeof(mystat));
  3203. mem_cgroup_get_local_stat(mem_cont, &mystat);
  3204. for (i = 0; i < NR_MCS_STAT; i++) {
  3205. if (i == MCS_SWAP && !do_swap_account)
  3206. continue;
  3207. cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
  3208. }
  3209. /* Hierarchical information */
  3210. {
  3211. unsigned long long limit, memsw_limit;
  3212. memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
  3213. cb->fill(cb, "hierarchical_memory_limit", limit);
  3214. if (do_swap_account)
  3215. cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
  3216. }
  3217. memset(&mystat, 0, sizeof(mystat));
  3218. mem_cgroup_get_total_stat(mem_cont, &mystat);
  3219. for (i = 0; i < NR_MCS_STAT; i++) {
  3220. if (i == MCS_SWAP && !do_swap_account)
  3221. continue;
  3222. cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
  3223. }
  3224. #ifdef CONFIG_DEBUG_VM
  3225. cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
  3226. {
  3227. int nid, zid;
  3228. struct mem_cgroup_per_zone *mz;
  3229. unsigned long recent_rotated[2] = {0, 0};
  3230. unsigned long recent_scanned[2] = {0, 0};
  3231. for_each_online_node(nid)
  3232. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  3233. mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
  3234. recent_rotated[0] +=
  3235. mz->reclaim_stat.recent_rotated[0];
  3236. recent_rotated[1] +=
  3237. mz->reclaim_stat.recent_rotated[1];
  3238. recent_scanned[0] +=
  3239. mz->reclaim_stat.recent_scanned[0];
  3240. recent_scanned[1] +=
  3241. mz->reclaim_stat.recent_scanned[1];
  3242. }
  3243. cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
  3244. cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
  3245. cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
  3246. cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
  3247. }
  3248. #endif
  3249. return 0;
  3250. }
  3251. static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
  3252. {
  3253. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3254. return get_swappiness(memcg);
  3255. }
  3256. static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
  3257. u64 val)
  3258. {
  3259. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3260. struct mem_cgroup *parent;
  3261. if (val > 100)
  3262. return -EINVAL;
  3263. if (cgrp->parent == NULL)
  3264. return -EINVAL;
  3265. parent = mem_cgroup_from_cont(cgrp->parent);
  3266. cgroup_lock();
  3267. /* If under hierarchy, only empty-root can set this value */
  3268. if ((parent->use_hierarchy) ||
  3269. (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
  3270. cgroup_unlock();
  3271. return -EINVAL;
  3272. }
  3273. spin_lock(&memcg->reclaim_param_lock);
  3274. memcg->swappiness = val;
  3275. spin_unlock(&memcg->reclaim_param_lock);
  3276. cgroup_unlock();
  3277. return 0;
  3278. }
  3279. static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
  3280. {
  3281. struct mem_cgroup_threshold_ary *t;
  3282. u64 usage;
  3283. int i;
  3284. rcu_read_lock();
  3285. if (!swap)
  3286. t = rcu_dereference(memcg->thresholds.primary);
  3287. else
  3288. t = rcu_dereference(memcg->memsw_thresholds.primary);
  3289. if (!t)
  3290. goto unlock;
  3291. usage = mem_cgroup_usage(memcg, swap);
  3292. /*
  3293. * current_threshold points to threshold just below usage.
  3294. * If it's not true, a threshold was crossed after last
  3295. * call of __mem_cgroup_threshold().
  3296. */
  3297. i = t->current_threshold;
  3298. /*
  3299. * Iterate backward over array of thresholds starting from
  3300. * current_threshold and check if a threshold is crossed.
  3301. * If none of thresholds below usage is crossed, we read
  3302. * only one element of the array here.
  3303. */
  3304. for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
  3305. eventfd_signal(t->entries[i].eventfd, 1);
  3306. /* i = current_threshold + 1 */
  3307. i++;
  3308. /*
  3309. * Iterate forward over array of thresholds starting from
  3310. * current_threshold+1 and check if a threshold is crossed.
  3311. * If none of thresholds above usage is crossed, we read
  3312. * only one element of the array here.
  3313. */
  3314. for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
  3315. eventfd_signal(t->entries[i].eventfd, 1);
  3316. /* Update current_threshold */
  3317. t->current_threshold = i - 1;
  3318. unlock:
  3319. rcu_read_unlock();
  3320. }
  3321. static void mem_cgroup_threshold(struct mem_cgroup *memcg)
  3322. {
  3323. while (memcg) {
  3324. __mem_cgroup_threshold(memcg, false);
  3325. if (do_swap_account)
  3326. __mem_cgroup_threshold(memcg, true);
  3327. memcg = parent_mem_cgroup(memcg);
  3328. }
  3329. }
  3330. static int compare_thresholds(const void *a, const void *b)
  3331. {
  3332. const struct mem_cgroup_threshold *_a = a;
  3333. const struct mem_cgroup_threshold *_b = b;
  3334. return _a->threshold - _b->threshold;
  3335. }
  3336. static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
  3337. {
  3338. struct mem_cgroup_eventfd_list *ev;
  3339. list_for_each_entry(ev, &mem->oom_notify, list)
  3340. eventfd_signal(ev->eventfd, 1);
  3341. return 0;
  3342. }
  3343. static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
  3344. {
  3345. struct mem_cgroup *iter;
  3346. for_each_mem_cgroup_tree(iter, mem)
  3347. mem_cgroup_oom_notify_cb(iter);
  3348. }
  3349. static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
  3350. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  3351. {
  3352. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3353. struct mem_cgroup_thresholds *thresholds;
  3354. struct mem_cgroup_threshold_ary *new;
  3355. int type = MEMFILE_TYPE(cft->private);
  3356. u64 threshold, usage;
  3357. int i, size, ret;
  3358. ret = res_counter_memparse_write_strategy(args, &threshold);
  3359. if (ret)
  3360. return ret;
  3361. mutex_lock(&memcg->thresholds_lock);
  3362. if (type == _MEM)
  3363. thresholds = &memcg->thresholds;
  3364. else if (type == _MEMSWAP)
  3365. thresholds = &memcg->memsw_thresholds;
  3366. else
  3367. BUG();
  3368. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  3369. /* Check if a threshold crossed before adding a new one */
  3370. if (thresholds->primary)
  3371. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  3372. size = thresholds->primary ? thresholds->primary->size + 1 : 1;
  3373. /* Allocate memory for new array of thresholds */
  3374. new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
  3375. GFP_KERNEL);
  3376. if (!new) {
  3377. ret = -ENOMEM;
  3378. goto unlock;
  3379. }
  3380. new->size = size;
  3381. /* Copy thresholds (if any) to new array */
  3382. if (thresholds->primary) {
  3383. memcpy(new->entries, thresholds->primary->entries, (size - 1) *
  3384. sizeof(struct mem_cgroup_threshold));
  3385. }
  3386. /* Add new threshold */
  3387. new->entries[size - 1].eventfd = eventfd;
  3388. new->entries[size - 1].threshold = threshold;
  3389. /* Sort thresholds. Registering of new threshold isn't time-critical */
  3390. sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
  3391. compare_thresholds, NULL);
  3392. /* Find current threshold */
  3393. new->current_threshold = -1;
  3394. for (i = 0; i < size; i++) {
  3395. if (new->entries[i].threshold < usage) {
  3396. /*
  3397. * new->current_threshold will not be used until
  3398. * rcu_assign_pointer(), so it's safe to increment
  3399. * it here.
  3400. */
  3401. ++new->current_threshold;
  3402. }
  3403. }
  3404. /* Free old spare buffer and save old primary buffer as spare */
  3405. kfree(thresholds->spare);
  3406. thresholds->spare = thresholds->primary;
  3407. rcu_assign_pointer(thresholds->primary, new);
  3408. /* To be sure that nobody uses thresholds */
  3409. synchronize_rcu();
  3410. unlock:
  3411. mutex_unlock(&memcg->thresholds_lock);
  3412. return ret;
  3413. }
  3414. static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
  3415. struct cftype *cft, struct eventfd_ctx *eventfd)
  3416. {
  3417. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3418. struct mem_cgroup_thresholds *thresholds;
  3419. struct mem_cgroup_threshold_ary *new;
  3420. int type = MEMFILE_TYPE(cft->private);
  3421. u64 usage;
  3422. int i, j, size;
  3423. mutex_lock(&memcg->thresholds_lock);
  3424. if (type == _MEM)
  3425. thresholds = &memcg->thresholds;
  3426. else if (type == _MEMSWAP)
  3427. thresholds = &memcg->memsw_thresholds;
  3428. else
  3429. BUG();
  3430. /*
  3431. * Something went wrong if we trying to unregister a threshold
  3432. * if we don't have thresholds
  3433. */
  3434. BUG_ON(!thresholds);
  3435. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  3436. /* Check if a threshold crossed before removing */
  3437. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  3438. /* Calculate new number of threshold */
  3439. size = 0;
  3440. for (i = 0; i < thresholds->primary->size; i++) {
  3441. if (thresholds->primary->entries[i].eventfd != eventfd)
  3442. size++;
  3443. }
  3444. new = thresholds->spare;
  3445. /* Set thresholds array to NULL if we don't have thresholds */
  3446. if (!size) {
  3447. kfree(new);
  3448. new = NULL;
  3449. goto swap_buffers;
  3450. }
  3451. new->size = size;
  3452. /* Copy thresholds and find current threshold */
  3453. new->current_threshold = -1;
  3454. for (i = 0, j = 0; i < thresholds->primary->size; i++) {
  3455. if (thresholds->primary->entries[i].eventfd == eventfd)
  3456. continue;
  3457. new->entries[j] = thresholds->primary->entries[i];
  3458. if (new->entries[j].threshold < usage) {
  3459. /*
  3460. * new->current_threshold will not be used
  3461. * until rcu_assign_pointer(), so it's safe to increment
  3462. * it here.
  3463. */
  3464. ++new->current_threshold;
  3465. }
  3466. j++;
  3467. }
  3468. swap_buffers:
  3469. /* Swap primary and spare array */
  3470. thresholds->spare = thresholds->primary;
  3471. rcu_assign_pointer(thresholds->primary, new);
  3472. /* To be sure that nobody uses thresholds */
  3473. synchronize_rcu();
  3474. mutex_unlock(&memcg->thresholds_lock);
  3475. }
  3476. static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
  3477. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  3478. {
  3479. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3480. struct mem_cgroup_eventfd_list *event;
  3481. int type = MEMFILE_TYPE(cft->private);
  3482. BUG_ON(type != _OOM_TYPE);
  3483. event = kmalloc(sizeof(*event), GFP_KERNEL);
  3484. if (!event)
  3485. return -ENOMEM;
  3486. mutex_lock(&memcg_oom_mutex);
  3487. event->eventfd = eventfd;
  3488. list_add(&event->list, &memcg->oom_notify);
  3489. /* already in OOM ? */
  3490. if (atomic_read(&memcg->oom_lock))
  3491. eventfd_signal(eventfd, 1);
  3492. mutex_unlock(&memcg_oom_mutex);
  3493. return 0;
  3494. }
  3495. static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
  3496. struct cftype *cft, struct eventfd_ctx *eventfd)
  3497. {
  3498. struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  3499. struct mem_cgroup_eventfd_list *ev, *tmp;
  3500. int type = MEMFILE_TYPE(cft->private);
  3501. BUG_ON(type != _OOM_TYPE);
  3502. mutex_lock(&memcg_oom_mutex);
  3503. list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
  3504. if (ev->eventfd == eventfd) {
  3505. list_del(&ev->list);
  3506. kfree(ev);
  3507. }
  3508. }
  3509. mutex_unlock(&memcg_oom_mutex);
  3510. }
  3511. static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
  3512. struct cftype *cft, struct cgroup_map_cb *cb)
  3513. {
  3514. struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  3515. cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
  3516. if (atomic_read(&mem->oom_lock))
  3517. cb->fill(cb, "under_oom", 1);
  3518. else
  3519. cb->fill(cb, "under_oom", 0);
  3520. return 0;
  3521. }
  3522. static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
  3523. struct cftype *cft, u64 val)
  3524. {
  3525. struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  3526. struct mem_cgroup *parent;
  3527. /* cannot set to root cgroup and only 0 and 1 are allowed */
  3528. if (!cgrp->parent || !((val == 0) || (val == 1)))
  3529. return -EINVAL;
  3530. parent = mem_cgroup_from_cont(cgrp->parent);
  3531. cgroup_lock();
  3532. /* oom-kill-disable is a flag for subhierarchy. */
  3533. if ((parent->use_hierarchy) ||
  3534. (mem->use_hierarchy && !list_empty(&cgrp->children))) {
  3535. cgroup_unlock();
  3536. return -EINVAL;
  3537. }
  3538. mem->oom_kill_disable = val;
  3539. if (!val)
  3540. memcg_oom_recover(mem);
  3541. cgroup_unlock();
  3542. return 0;
  3543. }
  3544. static struct cftype mem_cgroup_files[] = {
  3545. {
  3546. .name = "usage_in_bytes",
  3547. .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
  3548. .read_u64 = mem_cgroup_read,
  3549. .register_event = mem_cgroup_usage_register_event,
  3550. .unregister_event = mem_cgroup_usage_unregister_event,
  3551. },
  3552. {
  3553. .name = "max_usage_in_bytes",
  3554. .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
  3555. .trigger = mem_cgroup_reset,
  3556. .read_u64 = mem_cgroup_read,
  3557. },
  3558. {
  3559. .name = "limit_in_bytes",
  3560. .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
  3561. .write_string = mem_cgroup_write,
  3562. .read_u64 = mem_cgroup_read,
  3563. },
  3564. {
  3565. .name = "soft_limit_in_bytes",
  3566. .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
  3567. .write_string = mem_cgroup_write,
  3568. .read_u64 = mem_cgroup_read,
  3569. },
  3570. {
  3571. .name = "failcnt",
  3572. .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
  3573. .trigger = mem_cgroup_reset,
  3574. .read_u64 = mem_cgroup_read,
  3575. },
  3576. {
  3577. .name = "stat",
  3578. .read_map = mem_control_stat_show,
  3579. },
  3580. {
  3581. .name = "force_empty",
  3582. .trigger = mem_cgroup_force_empty_write,
  3583. },
  3584. {
  3585. .name = "use_hierarchy",
  3586. .write_u64 = mem_cgroup_hierarchy_write,
  3587. .read_u64 = mem_cgroup_hierarchy_read,
  3588. },
  3589. {
  3590. .name = "swappiness",
  3591. .read_u64 = mem_cgroup_swappiness_read,
  3592. .write_u64 = mem_cgroup_swappiness_write,
  3593. },
  3594. {
  3595. .name = "move_charge_at_immigrate",
  3596. .read_u64 = mem_cgroup_move_charge_read,
  3597. .write_u64 = mem_cgroup_move_charge_write,
  3598. },
  3599. {
  3600. .name = "oom_control",
  3601. .read_map = mem_cgroup_oom_control_read,
  3602. .write_u64 = mem_cgroup_oom_control_write,
  3603. .register_event = mem_cgroup_oom_register_event,
  3604. .unregister_event = mem_cgroup_oom_unregister_event,
  3605. .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
  3606. },
  3607. };
  3608. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  3609. static struct cftype memsw_cgroup_files[] = {
  3610. {
  3611. .name = "memsw.usage_in_bytes",
  3612. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
  3613. .read_u64 = mem_cgroup_read,
  3614. .register_event = mem_cgroup_usage_register_event,
  3615. .unregister_event = mem_cgroup_usage_unregister_event,
  3616. },
  3617. {
  3618. .name = "memsw.max_usage_in_bytes",
  3619. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
  3620. .trigger = mem_cgroup_reset,
  3621. .read_u64 = mem_cgroup_read,
  3622. },
  3623. {
  3624. .name = "memsw.limit_in_bytes",
  3625. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
  3626. .write_string = mem_cgroup_write,
  3627. .read_u64 = mem_cgroup_read,
  3628. },
  3629. {
  3630. .name = "memsw.failcnt",
  3631. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
  3632. .trigger = mem_cgroup_reset,
  3633. .read_u64 = mem_cgroup_read,
  3634. },
  3635. };
  3636. static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
  3637. {
  3638. if (!do_swap_account)
  3639. return 0;
  3640. return cgroup_add_files(cont, ss, memsw_cgroup_files,
  3641. ARRAY_SIZE(memsw_cgroup_files));
  3642. };
  3643. #else
  3644. static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
  3645. {
  3646. return 0;
  3647. }
  3648. #endif
  3649. static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  3650. {
  3651. struct mem_cgroup_per_node *pn;
  3652. struct mem_cgroup_per_zone *mz;
  3653. enum lru_list l;
  3654. int zone, tmp = node;
  3655. /*
  3656. * This routine is called against possible nodes.
  3657. * But it's BUG to call kmalloc() against offline node.
  3658. *
  3659. * TODO: this routine can waste much memory for nodes which will
  3660. * never be onlined. It's better to use memory hotplug callback
  3661. * function.
  3662. */
  3663. if (!node_state(node, N_NORMAL_MEMORY))
  3664. tmp = -1;
  3665. pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
  3666. if (!pn)
  3667. return 1;
  3668. mem->info.nodeinfo[node] = pn;
  3669. memset(pn, 0, sizeof(*pn));
  3670. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  3671. mz = &pn->zoneinfo[zone];
  3672. for_each_lru(l)
  3673. INIT_LIST_HEAD(&mz->lists[l]);
  3674. mz->usage_in_excess = 0;
  3675. mz->on_tree = false;
  3676. mz->mem = mem;
  3677. }
  3678. return 0;
  3679. }
  3680. static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  3681. {
  3682. kfree(mem->info.nodeinfo[node]);
  3683. }
  3684. static struct mem_cgroup *mem_cgroup_alloc(void)
  3685. {
  3686. struct mem_cgroup *mem;
  3687. int size = sizeof(struct mem_cgroup);
  3688. /* Can be very big if MAX_NUMNODES is very big */
  3689. if (size < PAGE_SIZE)
  3690. mem = kmalloc(size, GFP_KERNEL);
  3691. else
  3692. mem = vmalloc(size);
  3693. if (!mem)
  3694. return NULL;
  3695. memset(mem, 0, size);
  3696. mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
  3697. if (!mem->stat)
  3698. goto out_free;
  3699. spin_lock_init(&mem->pcp_counter_lock);
  3700. return mem;
  3701. out_free:
  3702. if (size < PAGE_SIZE)
  3703. kfree(mem);
  3704. else
  3705. vfree(mem);
  3706. return NULL;
  3707. }
  3708. /*
  3709. * At destroying mem_cgroup, references from swap_cgroup can remain.
  3710. * (scanning all at force_empty is too costly...)
  3711. *
  3712. * Instead of clearing all references at force_empty, we remember
  3713. * the number of reference from swap_cgroup and free mem_cgroup when
  3714. * it goes down to 0.
  3715. *
  3716. * Removal of cgroup itself succeeds regardless of refs from swap.
  3717. */
  3718. static void __mem_cgroup_free(struct mem_cgroup *mem)
  3719. {
  3720. int node;
  3721. mem_cgroup_remove_from_trees(mem);
  3722. free_css_id(&mem_cgroup_subsys, &mem->css);
  3723. for_each_node_state(node, N_POSSIBLE)
  3724. free_mem_cgroup_per_zone_info(mem, node);
  3725. free_percpu(mem->stat);
  3726. if (sizeof(struct mem_cgroup) < PAGE_SIZE)
  3727. kfree(mem);
  3728. else
  3729. vfree(mem);
  3730. }
  3731. static void mem_cgroup_get(struct mem_cgroup *mem)
  3732. {
  3733. atomic_inc(&mem->refcnt);
  3734. }
  3735. static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
  3736. {
  3737. if (atomic_sub_and_test(count, &mem->refcnt)) {
  3738. struct mem_cgroup *parent = parent_mem_cgroup(mem);
  3739. __mem_cgroup_free(mem);
  3740. if (parent)
  3741. mem_cgroup_put(parent);
  3742. }
  3743. }
  3744. static void mem_cgroup_put(struct mem_cgroup *mem)
  3745. {
  3746. __mem_cgroup_put(mem, 1);
  3747. }
  3748. /*
  3749. * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  3750. */
  3751. static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
  3752. {
  3753. if (!mem->res.parent)
  3754. return NULL;
  3755. return mem_cgroup_from_res_counter(mem->res.parent, res);
  3756. }
  3757. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  3758. static void __init enable_swap_cgroup(void)
  3759. {
  3760. if (!mem_cgroup_disabled() && really_do_swap_account)
  3761. do_swap_account = 1;
  3762. }
  3763. #else
  3764. static void __init enable_swap_cgroup(void)
  3765. {
  3766. }
  3767. #endif
  3768. static int mem_cgroup_soft_limit_tree_init(void)
  3769. {
  3770. struct mem_cgroup_tree_per_node *rtpn;
  3771. struct mem_cgroup_tree_per_zone *rtpz;
  3772. int tmp, node, zone;
  3773. for_each_node_state(node, N_POSSIBLE) {
  3774. tmp = node;
  3775. if (!node_state(node, N_NORMAL_MEMORY))
  3776. tmp = -1;
  3777. rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
  3778. if (!rtpn)
  3779. return 1;
  3780. soft_limit_tree.rb_tree_per_node[node] = rtpn;
  3781. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  3782. rtpz = &rtpn->rb_tree_per_zone[zone];
  3783. rtpz->rb_root = RB_ROOT;
  3784. spin_lock_init(&rtpz->lock);
  3785. }
  3786. }
  3787. return 0;
  3788. }
  3789. static struct cgroup_subsys_state * __ref
  3790. mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
  3791. {
  3792. struct mem_cgroup *mem, *parent;
  3793. long error = -ENOMEM;
  3794. int node;
  3795. mem = mem_cgroup_alloc();
  3796. if (!mem)
  3797. return ERR_PTR(error);
  3798. for_each_node_state(node, N_POSSIBLE)
  3799. if (alloc_mem_cgroup_per_zone_info(mem, node))
  3800. goto free_out;
  3801. /* root ? */
  3802. if (cont->parent == NULL) {
  3803. int cpu;
  3804. enable_swap_cgroup();
  3805. parent = NULL;
  3806. root_mem_cgroup = mem;
  3807. if (mem_cgroup_soft_limit_tree_init())
  3808. goto free_out;
  3809. for_each_possible_cpu(cpu) {
  3810. struct memcg_stock_pcp *stock =
  3811. &per_cpu(memcg_stock, cpu);
  3812. INIT_WORK(&stock->work, drain_local_stock);
  3813. }
  3814. hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
  3815. } else {
  3816. parent = mem_cgroup_from_cont(cont->parent);
  3817. mem->use_hierarchy = parent->use_hierarchy;
  3818. mem->oom_kill_disable = parent->oom_kill_disable;
  3819. }
  3820. if (parent && parent->use_hierarchy) {
  3821. res_counter_init(&mem->res, &parent->res);
  3822. res_counter_init(&mem->memsw, &parent->memsw);
  3823. /*
  3824. * We increment refcnt of the parent to ensure that we can
  3825. * safely access it on res_counter_charge/uncharge.
  3826. * This refcnt will be decremented when freeing this
  3827. * mem_cgroup(see mem_cgroup_put).
  3828. */
  3829. mem_cgroup_get(parent);
  3830. } else {
  3831. res_counter_init(&mem->res, NULL);
  3832. res_counter_init(&mem->memsw, NULL);
  3833. }
  3834. mem->last_scanned_child = 0;
  3835. spin_lock_init(&mem->reclaim_param_lock);
  3836. INIT_LIST_HEAD(&mem->oom_notify);
  3837. if (parent)
  3838. mem->swappiness = get_swappiness(parent);
  3839. atomic_set(&mem->refcnt, 1);
  3840. mem->move_charge_at_immigrate = 0;
  3841. mutex_init(&mem->thresholds_lock);
  3842. return &mem->css;
  3843. free_out:
  3844. __mem_cgroup_free(mem);
  3845. root_mem_cgroup = NULL;
  3846. return ERR_PTR(error);
  3847. }
  3848. static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
  3849. struct cgroup *cont)
  3850. {
  3851. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  3852. return mem_cgroup_force_empty(mem, false);
  3853. }
  3854. static void mem_cgroup_destroy(struct cgroup_subsys *ss,
  3855. struct cgroup *cont)
  3856. {
  3857. struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  3858. mem_cgroup_put(mem);
  3859. }
  3860. static int mem_cgroup_populate(struct cgroup_subsys *ss,
  3861. struct cgroup *cont)
  3862. {
  3863. int ret;
  3864. ret = cgroup_add_files(cont, ss, mem_cgroup_files,
  3865. ARRAY_SIZE(mem_cgroup_files));
  3866. if (!ret)
  3867. ret = register_memsw_files(cont, ss);
  3868. return ret;
  3869. }
  3870. #ifdef CONFIG_MMU
  3871. /* Handlers for move charge at task migration. */
  3872. #define PRECHARGE_COUNT_AT_ONCE 256
  3873. static int mem_cgroup_do_precharge(unsigned long count)
  3874. {
  3875. int ret = 0;
  3876. int batch_count = PRECHARGE_COUNT_AT_ONCE;
  3877. struct mem_cgroup *mem = mc.to;
  3878. if (mem_cgroup_is_root(mem)) {
  3879. mc.precharge += count;
  3880. /* we don't need css_get for root */
  3881. return ret;
  3882. }
  3883. /* try to charge at once */
  3884. if (count > 1) {
  3885. struct res_counter *dummy;
  3886. /*
  3887. * "mem" cannot be under rmdir() because we've already checked
  3888. * by cgroup_lock_live_cgroup() that it is not removed and we
  3889. * are still under the same cgroup_mutex. So we can postpone
  3890. * css_get().
  3891. */
  3892. if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
  3893. goto one_by_one;
  3894. if (do_swap_account && res_counter_charge(&mem->memsw,
  3895. PAGE_SIZE * count, &dummy)) {
  3896. res_counter_uncharge(&mem->res, PAGE_SIZE * count);
  3897. goto one_by_one;
  3898. }
  3899. mc.precharge += count;
  3900. return ret;
  3901. }
  3902. one_by_one:
  3903. /* fall back to one by one charge */
  3904. while (count--) {
  3905. if (signal_pending(current)) {
  3906. ret = -EINTR;
  3907. break;
  3908. }
  3909. if (!batch_count--) {
  3910. batch_count = PRECHARGE_COUNT_AT_ONCE;
  3911. cond_resched();
  3912. }
  3913. ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
  3914. if (ret || !mem)
  3915. /* mem_cgroup_clear_mc() will do uncharge later */
  3916. return -ENOMEM;
  3917. mc.precharge++;
  3918. }
  3919. return ret;
  3920. }
  3921. /**
  3922. * is_target_pte_for_mc - check a pte whether it is valid for move charge
  3923. * @vma: the vma the pte to be checked belongs
  3924. * @addr: the address corresponding to the pte to be checked
  3925. * @ptent: the pte to be checked
  3926. * @target: the pointer the target page or swap ent will be stored(can be NULL)
  3927. *
  3928. * Returns
  3929. * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
  3930. * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
  3931. * move charge. if @target is not NULL, the page is stored in target->page
  3932. * with extra refcnt got(Callers should handle it).
  3933. * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
  3934. * target for charge migration. if @target is not NULL, the entry is stored
  3935. * in target->ent.
  3936. *
  3937. * Called with pte lock held.
  3938. */
  3939. union mc_target {
  3940. struct page *page;
  3941. swp_entry_t ent;
  3942. };
  3943. enum mc_target_type {
  3944. MC_TARGET_NONE, /* not used */
  3945. MC_TARGET_PAGE,
  3946. MC_TARGET_SWAP,
  3947. };
  3948. static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
  3949. unsigned long addr, pte_t ptent)
  3950. {
  3951. struct page *page = vm_normal_page(vma, addr, ptent);
  3952. if (!page || !page_mapped(page))
  3953. return NULL;
  3954. if (PageAnon(page)) {
  3955. /* we don't move shared anon */
  3956. if (!move_anon() || page_mapcount(page) > 2)
  3957. return NULL;
  3958. } else if (!move_file())
  3959. /* we ignore mapcount for file pages */
  3960. return NULL;
  3961. if (!get_page_unless_zero(page))
  3962. return NULL;
  3963. return page;
  3964. }
  3965. static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  3966. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  3967. {
  3968. int usage_count;
  3969. struct page *page = NULL;
  3970. swp_entry_t ent = pte_to_swp_entry(ptent);
  3971. if (!move_anon() || non_swap_entry(ent))
  3972. return NULL;
  3973. usage_count = mem_cgroup_count_swap_user(ent, &page);
  3974. if (usage_count > 1) { /* we don't move shared anon */
  3975. if (page)
  3976. put_page(page);
  3977. return NULL;
  3978. }
  3979. if (do_swap_account)
  3980. entry->val = ent.val;
  3981. return page;
  3982. }
  3983. static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
  3984. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  3985. {
  3986. struct page *page = NULL;
  3987. struct inode *inode;
  3988. struct address_space *mapping;
  3989. pgoff_t pgoff;
  3990. if (!vma->vm_file) /* anonymous vma */
  3991. return NULL;
  3992. if (!move_file())
  3993. return NULL;
  3994. inode = vma->vm_file->f_path.dentry->d_inode;
  3995. mapping = vma->vm_file->f_mapping;
  3996. if (pte_none(ptent))
  3997. pgoff = linear_page_index(vma, addr);
  3998. else /* pte_file(ptent) is true */
  3999. pgoff = pte_to_pgoff(ptent);
  4000. /* page is moved even if it's not RSS of this task(page-faulted). */
  4001. if (!mapping_cap_swap_backed(mapping)) { /* normal file */
  4002. page = find_get_page(mapping, pgoff);
  4003. } else { /* shmem/tmpfs file. we should take account of swap too. */
  4004. swp_entry_t ent;
  4005. mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
  4006. if (do_swap_account)
  4007. entry->val = ent.val;
  4008. }
  4009. return page;
  4010. }
  4011. static int is_target_pte_for_mc(struct vm_area_struct *vma,
  4012. unsigned long addr, pte_t ptent, union mc_target *target)
  4013. {
  4014. struct page *page = NULL;
  4015. struct page_cgroup *pc;
  4016. int ret = 0;
  4017. swp_entry_t ent = { .val = 0 };
  4018. if (pte_present(ptent))
  4019. page = mc_handle_present_pte(vma, addr, ptent);
  4020. else if (is_swap_pte(ptent))
  4021. page = mc_handle_swap_pte(vma, addr, ptent, &ent);
  4022. else if (pte_none(ptent) || pte_file(ptent))
  4023. page = mc_handle_file_pte(vma, addr, ptent, &ent);
  4024. if (!page && !ent.val)
  4025. return 0;
  4026. if (page) {
  4027. pc = lookup_page_cgroup(page);
  4028. /*
  4029. * Do only loose check w/o page_cgroup lock.
  4030. * mem_cgroup_move_account() checks the pc is valid or not under
  4031. * the lock.
  4032. */
  4033. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  4034. ret = MC_TARGET_PAGE;
  4035. if (target)
  4036. target->page = page;
  4037. }
  4038. if (!ret || !target)
  4039. put_page(page);
  4040. }
  4041. /* There is a swap entry and a page doesn't exist or isn't charged */
  4042. if (ent.val && !ret &&
  4043. css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
  4044. ret = MC_TARGET_SWAP;
  4045. if (target)
  4046. target->ent = ent;
  4047. }
  4048. return ret;
  4049. }
  4050. static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
  4051. unsigned long addr, unsigned long end,
  4052. struct mm_walk *walk)
  4053. {
  4054. struct vm_area_struct *vma = walk->private;
  4055. pte_t *pte;
  4056. spinlock_t *ptl;
  4057. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  4058. for (; addr != end; pte++, addr += PAGE_SIZE)
  4059. if (is_target_pte_for_mc(vma, addr, *pte, NULL))
  4060. mc.precharge++; /* increment precharge temporarily */
  4061. pte_unmap_unlock(pte - 1, ptl);
  4062. cond_resched();
  4063. return 0;
  4064. }
  4065. static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
  4066. {
  4067. unsigned long precharge;
  4068. struct vm_area_struct *vma;
  4069. down_read(&mm->mmap_sem);
  4070. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  4071. struct mm_walk mem_cgroup_count_precharge_walk = {
  4072. .pmd_entry = mem_cgroup_count_precharge_pte_range,
  4073. .mm = mm,
  4074. .private = vma,
  4075. };
  4076. if (is_vm_hugetlb_page(vma))
  4077. continue;
  4078. walk_page_range(vma->vm_start, vma->vm_end,
  4079. &mem_cgroup_count_precharge_walk);
  4080. }
  4081. up_read(&mm->mmap_sem);
  4082. precharge = mc.precharge;
  4083. mc.precharge = 0;
  4084. return precharge;
  4085. }
  4086. static int mem_cgroup_precharge_mc(struct mm_struct *mm)
  4087. {
  4088. return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
  4089. }
  4090. static void mem_cgroup_clear_mc(void)
  4091. {
  4092. struct mem_cgroup *from = mc.from;
  4093. struct mem_cgroup *to = mc.to;
  4094. /* we must uncharge all the leftover precharges from mc.to */
  4095. if (mc.precharge) {
  4096. __mem_cgroup_cancel_charge(mc.to, mc.precharge);
  4097. mc.precharge = 0;
  4098. }
  4099. /*
  4100. * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
  4101. * we must uncharge here.
  4102. */
  4103. if (mc.moved_charge) {
  4104. __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
  4105. mc.moved_charge = 0;
  4106. }
  4107. /* we must fixup refcnts and charges */
  4108. if (mc.moved_swap) {
  4109. /* uncharge swap account from the old cgroup */
  4110. if (!mem_cgroup_is_root(mc.from))
  4111. res_counter_uncharge(&mc.from->memsw,
  4112. PAGE_SIZE * mc.moved_swap);
  4113. __mem_cgroup_put(mc.from, mc.moved_swap);
  4114. if (!mem_cgroup_is_root(mc.to)) {
  4115. /*
  4116. * we charged both to->res and to->memsw, so we should
  4117. * uncharge to->res.
  4118. */
  4119. res_counter_uncharge(&mc.to->res,
  4120. PAGE_SIZE * mc.moved_swap);
  4121. }
  4122. /* we've already done mem_cgroup_get(mc.to) */
  4123. mc.moved_swap = 0;
  4124. }
  4125. spin_lock(&mc.lock);
  4126. mc.from = NULL;
  4127. mc.to = NULL;
  4128. mc.moving_task = NULL;
  4129. spin_unlock(&mc.lock);
  4130. mem_cgroup_end_move(from);
  4131. memcg_oom_recover(from);
  4132. memcg_oom_recover(to);
  4133. wake_up_all(&mc.waitq);
  4134. }
  4135. static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
  4136. struct cgroup *cgroup,
  4137. struct task_struct *p,
  4138. bool threadgroup)
  4139. {
  4140. int ret = 0;
  4141. struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
  4142. if (mem->move_charge_at_immigrate) {
  4143. struct mm_struct *mm;
  4144. struct mem_cgroup *from = mem_cgroup_from_task(p);
  4145. VM_BUG_ON(from == mem);
  4146. mm = get_task_mm(p);
  4147. if (!mm)
  4148. return 0;
  4149. /* We move charges only when we move a owner of the mm */
  4150. if (mm->owner == p) {
  4151. VM_BUG_ON(mc.from);
  4152. VM_BUG_ON(mc.to);
  4153. VM_BUG_ON(mc.precharge);
  4154. VM_BUG_ON(mc.moved_charge);
  4155. VM_BUG_ON(mc.moved_swap);
  4156. VM_BUG_ON(mc.moving_task);
  4157. mem_cgroup_start_move(from);
  4158. spin_lock(&mc.lock);
  4159. mc.from = from;
  4160. mc.to = mem;
  4161. mc.precharge = 0;
  4162. mc.moved_charge = 0;
  4163. mc.moved_swap = 0;
  4164. mc.moving_task = current;
  4165. spin_unlock(&mc.lock);
  4166. ret = mem_cgroup_precharge_mc(mm);
  4167. if (ret)
  4168. mem_cgroup_clear_mc();
  4169. }
  4170. mmput(mm);
  4171. }
  4172. return ret;
  4173. }
  4174. static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
  4175. struct cgroup *cgroup,
  4176. struct task_struct *p,
  4177. bool threadgroup)
  4178. {
  4179. mem_cgroup_clear_mc();
  4180. }
  4181. static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
  4182. unsigned long addr, unsigned long end,
  4183. struct mm_walk *walk)
  4184. {
  4185. int ret = 0;
  4186. struct vm_area_struct *vma = walk->private;
  4187. pte_t *pte;
  4188. spinlock_t *ptl;
  4189. retry:
  4190. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  4191. for (; addr != end; addr += PAGE_SIZE) {
  4192. pte_t ptent = *(pte++);
  4193. union mc_target target;
  4194. int type;
  4195. struct page *page;
  4196. struct page_cgroup *pc;
  4197. swp_entry_t ent;
  4198. if (!mc.precharge)
  4199. break;
  4200. type = is_target_pte_for_mc(vma, addr, ptent, &target);
  4201. switch (type) {
  4202. case MC_TARGET_PAGE:
  4203. page = target.page;
  4204. if (isolate_lru_page(page))
  4205. goto put;
  4206. pc = lookup_page_cgroup(page);
  4207. if (!mem_cgroup_move_account(pc,
  4208. mc.from, mc.to, false)) {
  4209. mc.precharge--;
  4210. /* we uncharge from mc.from later. */
  4211. mc.moved_charge++;
  4212. }
  4213. putback_lru_page(page);
  4214. put: /* is_target_pte_for_mc() gets the page */
  4215. put_page(page);
  4216. break;
  4217. case MC_TARGET_SWAP:
  4218. ent = target.ent;
  4219. if (!mem_cgroup_move_swap_account(ent,
  4220. mc.from, mc.to, false)) {
  4221. mc.precharge--;
  4222. /* we fixup refcnts and charges later. */
  4223. mc.moved_swap++;
  4224. }
  4225. break;
  4226. default:
  4227. break;
  4228. }
  4229. }
  4230. pte_unmap_unlock(pte - 1, ptl);
  4231. cond_resched();
  4232. if (addr != end) {
  4233. /*
  4234. * We have consumed all precharges we got in can_attach().
  4235. * We try charge one by one, but don't do any additional
  4236. * charges to mc.to if we have failed in charge once in attach()
  4237. * phase.
  4238. */
  4239. ret = mem_cgroup_do_precharge(1);
  4240. if (!ret)
  4241. goto retry;
  4242. }
  4243. return ret;
  4244. }
  4245. static void mem_cgroup_move_charge(struct mm_struct *mm)
  4246. {
  4247. struct vm_area_struct *vma;
  4248. lru_add_drain_all();
  4249. down_read(&mm->mmap_sem);
  4250. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  4251. int ret;
  4252. struct mm_walk mem_cgroup_move_charge_walk = {
  4253. .pmd_entry = mem_cgroup_move_charge_pte_range,
  4254. .mm = mm,
  4255. .private = vma,
  4256. };
  4257. if (is_vm_hugetlb_page(vma))
  4258. continue;
  4259. ret = walk_page_range(vma->vm_start, vma->vm_end,
  4260. &mem_cgroup_move_charge_walk);
  4261. if (ret)
  4262. /*
  4263. * means we have consumed all precharges and failed in
  4264. * doing additional charge. Just abandon here.
  4265. */
  4266. break;
  4267. }
  4268. up_read(&mm->mmap_sem);
  4269. }
  4270. static void mem_cgroup_move_task(struct cgroup_subsys *ss,
  4271. struct cgroup *cont,
  4272. struct cgroup *old_cont,
  4273. struct task_struct *p,
  4274. bool threadgroup)
  4275. {
  4276. struct mm_struct *mm;
  4277. if (!mc.to)
  4278. /* no need to move charge */
  4279. return;
  4280. mm = get_task_mm(p);
  4281. if (mm) {
  4282. mem_cgroup_move_charge(mm);
  4283. mmput(mm);
  4284. }
  4285. mem_cgroup_clear_mc();
  4286. }
  4287. #else /* !CONFIG_MMU */
  4288. static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
  4289. struct cgroup *cgroup,
  4290. struct task_struct *p,
  4291. bool threadgroup)
  4292. {
  4293. return 0;
  4294. }
  4295. static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
  4296. struct cgroup *cgroup,
  4297. struct task_struct *p,
  4298. bool threadgroup)
  4299. {
  4300. }
  4301. static void mem_cgroup_move_task(struct cgroup_subsys *ss,
  4302. struct cgroup *cont,
  4303. struct cgroup *old_cont,
  4304. struct task_struct *p,
  4305. bool threadgroup)
  4306. {
  4307. }
  4308. #endif
  4309. struct cgroup_subsys mem_cgroup_subsys = {
  4310. .name = "memory",
  4311. .subsys_id = mem_cgroup_subsys_id,
  4312. .create = mem_cgroup_create,
  4313. .pre_destroy = mem_cgroup_pre_destroy,
  4314. .destroy = mem_cgroup_destroy,
  4315. .populate = mem_cgroup_populate,
  4316. .can_attach = mem_cgroup_can_attach,
  4317. .cancel_attach = mem_cgroup_cancel_attach,
  4318. .attach = mem_cgroup_move_task,
  4319. .early_init = 0,
  4320. .use_id = 1,
  4321. };
  4322. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  4323. static int __init disable_swap_account(char *s)
  4324. {
  4325. really_do_swap_account = 0;
  4326. return 1;
  4327. }
  4328. __setup("noswapaccount", disable_swap_account);
  4329. #endif