memcontrol.c 146 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655
  1. /* memcontrol.c - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * Memory thresholds
  10. * Copyright (C) 2009 Nokia Corporation
  11. * Author: Kirill A. Shutemov
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. */
  23. #include <linux/res_counter.h>
  24. #include <linux/memcontrol.h>
  25. #include <linux/cgroup.h>
  26. #include <linux/mm.h>
  27. #include <linux/hugetlb.h>
  28. #include <linux/pagemap.h>
  29. #include <linux/smp.h>
  30. #include <linux/page-flags.h>
  31. #include <linux/backing-dev.h>
  32. #include <linux/bit_spinlock.h>
  33. #include <linux/rcupdate.h>
  34. #include <linux/limits.h>
  35. #include <linux/export.h>
  36. #include <linux/mutex.h>
  37. #include <linux/rbtree.h>
  38. #include <linux/slab.h>
  39. #include <linux/swap.h>
  40. #include <linux/swapops.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/eventfd.h>
  43. #include <linux/sort.h>
  44. #include <linux/fs.h>
  45. #include <linux/seq_file.h>
  46. #include <linux/vmalloc.h>
  47. #include <linux/mm_inline.h>
  48. #include <linux/page_cgroup.h>
  49. #include <linux/cpu.h>
  50. #include <linux/oom.h>
  51. #include "internal.h"
  52. #include <net/sock.h>
  53. #include <net/tcp_memcontrol.h>
  54. #include <asm/uaccess.h>
  55. #include <trace/events/vmscan.h>
  56. struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  57. #define MEM_CGROUP_RECLAIM_RETRIES 5
  58. struct mem_cgroup *root_mem_cgroup __read_mostly;
  59. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  60. /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  61. int do_swap_account __read_mostly;
  62. /* for remember boot option*/
  63. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
  64. static int really_do_swap_account __initdata = 1;
  65. #else
  66. static int really_do_swap_account __initdata = 0;
  67. #endif
  68. #else
  69. #define do_swap_account (0)
  70. #endif
  71. /*
  72. * Statistics for memory cgroup.
  73. */
  74. enum mem_cgroup_stat_index {
  75. /*
  76. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  77. */
  78. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  79. MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
  80. MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
  81. MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
  82. MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
  83. MEM_CGROUP_STAT_NSTATS,
  84. };
  85. enum mem_cgroup_events_index {
  86. MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
  87. MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
  88. MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
  89. MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
  90. MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
  91. MEM_CGROUP_EVENTS_NSTATS,
  92. };
  93. /*
  94. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  95. * it will be incremated by the number of pages. This counter is used for
  96. * for trigger some periodic events. This is straightforward and better
  97. * than using jiffies etc. to handle periodic memcg event.
  98. */
  99. enum mem_cgroup_events_target {
  100. MEM_CGROUP_TARGET_THRESH,
  101. MEM_CGROUP_TARGET_SOFTLIMIT,
  102. MEM_CGROUP_TARGET_NUMAINFO,
  103. MEM_CGROUP_NTARGETS,
  104. };
  105. #define THRESHOLDS_EVENTS_TARGET (128)
  106. #define SOFTLIMIT_EVENTS_TARGET (1024)
  107. #define NUMAINFO_EVENTS_TARGET (1024)
  108. struct mem_cgroup_stat_cpu {
  109. long count[MEM_CGROUP_STAT_NSTATS];
  110. unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
  111. unsigned long targets[MEM_CGROUP_NTARGETS];
  112. };
  113. struct mem_cgroup_reclaim_iter {
  114. /* css_id of the last scanned hierarchy member */
  115. int position;
  116. /* scan generation, increased every round-trip */
  117. unsigned int generation;
  118. };
  119. /*
  120. * per-zone information in memory controller.
  121. */
  122. struct mem_cgroup_per_zone {
  123. struct lruvec lruvec;
  124. unsigned long lru_size[NR_LRU_LISTS];
  125. struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
  126. struct zone_reclaim_stat reclaim_stat;
  127. struct rb_node tree_node; /* RB tree node */
  128. unsigned long long usage_in_excess;/* Set to the value by which */
  129. /* the soft limit is exceeded*/
  130. bool on_tree;
  131. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  132. /* use container_of */
  133. };
  134. struct mem_cgroup_per_node {
  135. struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  136. };
  137. struct mem_cgroup_lru_info {
  138. struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
  139. };
  140. /*
  141. * Cgroups above their limits are maintained in a RB-Tree, independent of
  142. * their hierarchy representation
  143. */
  144. struct mem_cgroup_tree_per_zone {
  145. struct rb_root rb_root;
  146. spinlock_t lock;
  147. };
  148. struct mem_cgroup_tree_per_node {
  149. struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
  150. };
  151. struct mem_cgroup_tree {
  152. struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
  153. };
  154. static struct mem_cgroup_tree soft_limit_tree __read_mostly;
  155. struct mem_cgroup_threshold {
  156. struct eventfd_ctx *eventfd;
  157. u64 threshold;
  158. };
  159. /* For threshold */
  160. struct mem_cgroup_threshold_ary {
  161. /* An array index points to threshold just below usage. */
  162. int current_threshold;
  163. /* Size of entries[] */
  164. unsigned int size;
  165. /* Array of thresholds */
  166. struct mem_cgroup_threshold entries[0];
  167. };
  168. struct mem_cgroup_thresholds {
  169. /* Primary thresholds array */
  170. struct mem_cgroup_threshold_ary *primary;
  171. /*
  172. * Spare threshold array.
  173. * This is needed to make mem_cgroup_unregister_event() "never fail".
  174. * It must be able to store at least primary->size - 1 entries.
  175. */
  176. struct mem_cgroup_threshold_ary *spare;
  177. };
  178. /* for OOM */
  179. struct mem_cgroup_eventfd_list {
  180. struct list_head list;
  181. struct eventfd_ctx *eventfd;
  182. };
  183. static void mem_cgroup_threshold(struct mem_cgroup *memcg);
  184. static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
  185. /*
  186. * The memory controller data structure. The memory controller controls both
  187. * page cache and RSS per cgroup. We would eventually like to provide
  188. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  189. * to help the administrator determine what knobs to tune.
  190. *
  191. * TODO: Add a water mark for the memory controller. Reclaim will begin when
  192. * we hit the water mark. May be even add a low water mark, such that
  193. * no reclaim occurs from a cgroup at it's low water mark, this is
  194. * a feature that will be implemented much later in the future.
  195. */
  196. struct mem_cgroup {
  197. struct cgroup_subsys_state css;
  198. /*
  199. * the counter to account for memory usage
  200. */
  201. struct res_counter res;
  202. union {
  203. /*
  204. * the counter to account for mem+swap usage.
  205. */
  206. struct res_counter memsw;
  207. /*
  208. * rcu_freeing is used only when freeing struct mem_cgroup,
  209. * so put it into a union to avoid wasting more memory.
  210. * It must be disjoint from the css field. It could be
  211. * in a union with the res field, but res plays a much
  212. * larger part in mem_cgroup life than memsw, and might
  213. * be of interest, even at time of free, when debugging.
  214. * So share rcu_head with the less interesting memsw.
  215. */
  216. struct rcu_head rcu_freeing;
  217. /*
  218. * But when using vfree(), that cannot be done at
  219. * interrupt time, so we must then queue the work.
  220. */
  221. struct work_struct work_freeing;
  222. };
  223. /*
  224. * Per cgroup active and inactive list, similar to the
  225. * per zone LRU lists.
  226. */
  227. struct mem_cgroup_lru_info info;
  228. int last_scanned_node;
  229. #if MAX_NUMNODES > 1
  230. nodemask_t scan_nodes;
  231. atomic_t numainfo_events;
  232. atomic_t numainfo_updating;
  233. #endif
  234. /*
  235. * Should the accounting and control be hierarchical, per subtree?
  236. */
  237. bool use_hierarchy;
  238. bool oom_lock;
  239. atomic_t under_oom;
  240. atomic_t refcnt;
  241. int swappiness;
  242. /* OOM-Killer disable */
  243. int oom_kill_disable;
  244. /* set when res.limit == memsw.limit */
  245. bool memsw_is_minimum;
  246. /* protect arrays of thresholds */
  247. struct mutex thresholds_lock;
  248. /* thresholds for memory usage. RCU-protected */
  249. struct mem_cgroup_thresholds thresholds;
  250. /* thresholds for mem+swap usage. RCU-protected */
  251. struct mem_cgroup_thresholds memsw_thresholds;
  252. /* For oom notifier event fd */
  253. struct list_head oom_notify;
  254. /*
  255. * Should we move charges of a task when a task is moved into this
  256. * mem_cgroup ? And what type of charges should we move ?
  257. */
  258. unsigned long move_charge_at_immigrate;
  259. /*
  260. * set > 0 if pages under this cgroup are moving to other cgroup.
  261. */
  262. atomic_t moving_account;
  263. /* taken only while moving_account > 0 */
  264. spinlock_t move_lock;
  265. /*
  266. * percpu counter.
  267. */
  268. struct mem_cgroup_stat_cpu *stat;
  269. /*
  270. * used when a cpu is offlined or other synchronizations
  271. * See mem_cgroup_read_stat().
  272. */
  273. struct mem_cgroup_stat_cpu nocpu_base;
  274. spinlock_t pcp_counter_lock;
  275. #ifdef CONFIG_INET
  276. struct tcp_memcontrol tcp_mem;
  277. #endif
  278. };
  279. /* Stuffs for move charges at task migration. */
  280. /*
  281. * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
  282. * left-shifted bitmap of these types.
  283. */
  284. enum move_type {
  285. MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
  286. MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
  287. NR_MOVE_TYPE,
  288. };
  289. /* "mc" and its members are protected by cgroup_mutex */
  290. static struct move_charge_struct {
  291. spinlock_t lock; /* for from, to */
  292. struct mem_cgroup *from;
  293. struct mem_cgroup *to;
  294. unsigned long precharge;
  295. unsigned long moved_charge;
  296. unsigned long moved_swap;
  297. struct task_struct *moving_task; /* a task moving charges */
  298. wait_queue_head_t waitq; /* a waitq for other context */
  299. } mc = {
  300. .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
  301. .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
  302. };
  303. static bool move_anon(void)
  304. {
  305. return test_bit(MOVE_CHARGE_TYPE_ANON,
  306. &mc.to->move_charge_at_immigrate);
  307. }
  308. static bool move_file(void)
  309. {
  310. return test_bit(MOVE_CHARGE_TYPE_FILE,
  311. &mc.to->move_charge_at_immigrate);
  312. }
  313. /*
  314. * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  315. * limit reclaim to prevent infinite loops, if they ever occur.
  316. */
  317. #define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
  318. #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
  319. enum charge_type {
  320. MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  321. MEM_CGROUP_CHARGE_TYPE_MAPPED,
  322. MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
  323. MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
  324. MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
  325. MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
  326. NR_CHARGE_TYPE,
  327. };
  328. /* for encoding cft->private value on file */
  329. #define _MEM (0)
  330. #define _MEMSWAP (1)
  331. #define _OOM_TYPE (2)
  332. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  333. #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
  334. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  335. /* Used for OOM nofiier */
  336. #define OOM_CONTROL (0)
  337. /*
  338. * Reclaim flags for mem_cgroup_hierarchical_reclaim
  339. */
  340. #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
  341. #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
  342. #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
  343. #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
  344. static void mem_cgroup_get(struct mem_cgroup *memcg);
  345. static void mem_cgroup_put(struct mem_cgroup *memcg);
  346. /* Writing them here to avoid exposing memcg's inner layout */
  347. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
  348. #include <net/sock.h>
  349. #include <net/ip.h>
  350. static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
  351. void sock_update_memcg(struct sock *sk)
  352. {
  353. if (mem_cgroup_sockets_enabled) {
  354. struct mem_cgroup *memcg;
  355. BUG_ON(!sk->sk_prot->proto_cgroup);
  356. /* Socket cloning can throw us here with sk_cgrp already
  357. * filled. It won't however, necessarily happen from
  358. * process context. So the test for root memcg given
  359. * the current task's memcg won't help us in this case.
  360. *
  361. * Respecting the original socket's memcg is a better
  362. * decision in this case.
  363. */
  364. if (sk->sk_cgrp) {
  365. BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
  366. mem_cgroup_get(sk->sk_cgrp->memcg);
  367. return;
  368. }
  369. rcu_read_lock();
  370. memcg = mem_cgroup_from_task(current);
  371. if (!mem_cgroup_is_root(memcg)) {
  372. mem_cgroup_get(memcg);
  373. sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
  374. }
  375. rcu_read_unlock();
  376. }
  377. }
  378. EXPORT_SYMBOL(sock_update_memcg);
  379. void sock_release_memcg(struct sock *sk)
  380. {
  381. if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
  382. struct mem_cgroup *memcg;
  383. WARN_ON(!sk->sk_cgrp->memcg);
  384. memcg = sk->sk_cgrp->memcg;
  385. mem_cgroup_put(memcg);
  386. }
  387. }
  388. #ifdef CONFIG_INET
  389. struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
  390. {
  391. if (!memcg || mem_cgroup_is_root(memcg))
  392. return NULL;
  393. return &memcg->tcp_mem.cg_proto;
  394. }
  395. EXPORT_SYMBOL(tcp_proto_cgroup);
  396. #endif /* CONFIG_INET */
  397. #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
  398. static void drain_all_stock_async(struct mem_cgroup *memcg);
  399. static struct mem_cgroup_per_zone *
  400. mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
  401. {
  402. return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
  403. }
  404. struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
  405. {
  406. return &memcg->css;
  407. }
  408. static struct mem_cgroup_per_zone *
  409. page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
  410. {
  411. int nid = page_to_nid(page);
  412. int zid = page_zonenum(page);
  413. return mem_cgroup_zoneinfo(memcg, nid, zid);
  414. }
  415. static struct mem_cgroup_tree_per_zone *
  416. soft_limit_tree_node_zone(int nid, int zid)
  417. {
  418. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  419. }
  420. static struct mem_cgroup_tree_per_zone *
  421. soft_limit_tree_from_page(struct page *page)
  422. {
  423. int nid = page_to_nid(page);
  424. int zid = page_zonenum(page);
  425. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  426. }
  427. static void
  428. __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
  429. struct mem_cgroup_per_zone *mz,
  430. struct mem_cgroup_tree_per_zone *mctz,
  431. unsigned long long new_usage_in_excess)
  432. {
  433. struct rb_node **p = &mctz->rb_root.rb_node;
  434. struct rb_node *parent = NULL;
  435. struct mem_cgroup_per_zone *mz_node;
  436. if (mz->on_tree)
  437. return;
  438. mz->usage_in_excess = new_usage_in_excess;
  439. if (!mz->usage_in_excess)
  440. return;
  441. while (*p) {
  442. parent = *p;
  443. mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
  444. tree_node);
  445. if (mz->usage_in_excess < mz_node->usage_in_excess)
  446. p = &(*p)->rb_left;
  447. /*
  448. * We can't avoid mem cgroups that are over their soft
  449. * limit by the same amount
  450. */
  451. else if (mz->usage_in_excess >= mz_node->usage_in_excess)
  452. p = &(*p)->rb_right;
  453. }
  454. rb_link_node(&mz->tree_node, parent, p);
  455. rb_insert_color(&mz->tree_node, &mctz->rb_root);
  456. mz->on_tree = true;
  457. }
  458. static void
  459. __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
  460. struct mem_cgroup_per_zone *mz,
  461. struct mem_cgroup_tree_per_zone *mctz)
  462. {
  463. if (!mz->on_tree)
  464. return;
  465. rb_erase(&mz->tree_node, &mctz->rb_root);
  466. mz->on_tree = false;
  467. }
  468. static void
  469. mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
  470. struct mem_cgroup_per_zone *mz,
  471. struct mem_cgroup_tree_per_zone *mctz)
  472. {
  473. spin_lock(&mctz->lock);
  474. __mem_cgroup_remove_exceeded(memcg, mz, mctz);
  475. spin_unlock(&mctz->lock);
  476. }
  477. static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
  478. {
  479. unsigned long long excess;
  480. struct mem_cgroup_per_zone *mz;
  481. struct mem_cgroup_tree_per_zone *mctz;
  482. int nid = page_to_nid(page);
  483. int zid = page_zonenum(page);
  484. mctz = soft_limit_tree_from_page(page);
  485. /*
  486. * Necessary to update all ancestors when hierarchy is used.
  487. * because their event counter is not touched.
  488. */
  489. for (; memcg; memcg = parent_mem_cgroup(memcg)) {
  490. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  491. excess = res_counter_soft_limit_excess(&memcg->res);
  492. /*
  493. * We have to update the tree if mz is on RB-tree or
  494. * mem is over its softlimit.
  495. */
  496. if (excess || mz->on_tree) {
  497. spin_lock(&mctz->lock);
  498. /* if on-tree, remove it */
  499. if (mz->on_tree)
  500. __mem_cgroup_remove_exceeded(memcg, mz, mctz);
  501. /*
  502. * Insert again. mz->usage_in_excess will be updated.
  503. * If excess is 0, no tree ops.
  504. */
  505. __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
  506. spin_unlock(&mctz->lock);
  507. }
  508. }
  509. }
  510. static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
  511. {
  512. int node, zone;
  513. struct mem_cgroup_per_zone *mz;
  514. struct mem_cgroup_tree_per_zone *mctz;
  515. for_each_node(node) {
  516. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  517. mz = mem_cgroup_zoneinfo(memcg, node, zone);
  518. mctz = soft_limit_tree_node_zone(node, zone);
  519. mem_cgroup_remove_exceeded(memcg, mz, mctz);
  520. }
  521. }
  522. }
  523. static struct mem_cgroup_per_zone *
  524. __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  525. {
  526. struct rb_node *rightmost = NULL;
  527. struct mem_cgroup_per_zone *mz;
  528. retry:
  529. mz = NULL;
  530. rightmost = rb_last(&mctz->rb_root);
  531. if (!rightmost)
  532. goto done; /* Nothing to reclaim from */
  533. mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
  534. /*
  535. * Remove the node now but someone else can add it back,
  536. * we will to add it back at the end of reclaim to its correct
  537. * position in the tree.
  538. */
  539. __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
  540. if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
  541. !css_tryget(&mz->memcg->css))
  542. goto retry;
  543. done:
  544. return mz;
  545. }
  546. static struct mem_cgroup_per_zone *
  547. mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  548. {
  549. struct mem_cgroup_per_zone *mz;
  550. spin_lock(&mctz->lock);
  551. mz = __mem_cgroup_largest_soft_limit_node(mctz);
  552. spin_unlock(&mctz->lock);
  553. return mz;
  554. }
  555. /*
  556. * Implementation Note: reading percpu statistics for memcg.
  557. *
  558. * Both of vmstat[] and percpu_counter has threshold and do periodic
  559. * synchronization to implement "quick" read. There are trade-off between
  560. * reading cost and precision of value. Then, we may have a chance to implement
  561. * a periodic synchronizion of counter in memcg's counter.
  562. *
  563. * But this _read() function is used for user interface now. The user accounts
  564. * memory usage by memory cgroup and he _always_ requires exact value because
  565. * he accounts memory. Even if we provide quick-and-fuzzy read, we always
  566. * have to visit all online cpus and make sum. So, for now, unnecessary
  567. * synchronization is not implemented. (just implemented for cpu hotplug)
  568. *
  569. * If there are kernel internal actions which can make use of some not-exact
  570. * value, and reading all cpu value can be performance bottleneck in some
  571. * common workload, threashold and synchonization as vmstat[] should be
  572. * implemented.
  573. */
  574. static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
  575. enum mem_cgroup_stat_index idx)
  576. {
  577. long val = 0;
  578. int cpu;
  579. get_online_cpus();
  580. for_each_online_cpu(cpu)
  581. val += per_cpu(memcg->stat->count[idx], cpu);
  582. #ifdef CONFIG_HOTPLUG_CPU
  583. spin_lock(&memcg->pcp_counter_lock);
  584. val += memcg->nocpu_base.count[idx];
  585. spin_unlock(&memcg->pcp_counter_lock);
  586. #endif
  587. put_online_cpus();
  588. return val;
  589. }
  590. static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
  591. bool charge)
  592. {
  593. int val = (charge) ? 1 : -1;
  594. this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
  595. }
  596. static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
  597. enum mem_cgroup_events_index idx)
  598. {
  599. unsigned long val = 0;
  600. int cpu;
  601. for_each_online_cpu(cpu)
  602. val += per_cpu(memcg->stat->events[idx], cpu);
  603. #ifdef CONFIG_HOTPLUG_CPU
  604. spin_lock(&memcg->pcp_counter_lock);
  605. val += memcg->nocpu_base.events[idx];
  606. spin_unlock(&memcg->pcp_counter_lock);
  607. #endif
  608. return val;
  609. }
  610. static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
  611. bool anon, int nr_pages)
  612. {
  613. preempt_disable();
  614. /*
  615. * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
  616. * counted as CACHE even if it's on ANON LRU.
  617. */
  618. if (anon)
  619. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
  620. nr_pages);
  621. else
  622. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
  623. nr_pages);
  624. /* pagein of a big page is an event. So, ignore page size */
  625. if (nr_pages > 0)
  626. __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
  627. else {
  628. __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
  629. nr_pages = -nr_pages; /* for event */
  630. }
  631. __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
  632. preempt_enable();
  633. }
  634. unsigned long
  635. mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
  636. unsigned int lru_mask)
  637. {
  638. struct mem_cgroup_per_zone *mz;
  639. enum lru_list lru;
  640. unsigned long ret = 0;
  641. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  642. for_each_lru(lru) {
  643. if (BIT(lru) & lru_mask)
  644. ret += mz->lru_size[lru];
  645. }
  646. return ret;
  647. }
  648. static unsigned long
  649. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  650. int nid, unsigned int lru_mask)
  651. {
  652. u64 total = 0;
  653. int zid;
  654. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  655. total += mem_cgroup_zone_nr_lru_pages(memcg,
  656. nid, zid, lru_mask);
  657. return total;
  658. }
  659. static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
  660. unsigned int lru_mask)
  661. {
  662. int nid;
  663. u64 total = 0;
  664. for_each_node_state(nid, N_HIGH_MEMORY)
  665. total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
  666. return total;
  667. }
  668. static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
  669. enum mem_cgroup_events_target target)
  670. {
  671. unsigned long val, next;
  672. val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
  673. next = __this_cpu_read(memcg->stat->targets[target]);
  674. /* from time_after() in jiffies.h */
  675. if ((long)next - (long)val < 0) {
  676. switch (target) {
  677. case MEM_CGROUP_TARGET_THRESH:
  678. next = val + THRESHOLDS_EVENTS_TARGET;
  679. break;
  680. case MEM_CGROUP_TARGET_SOFTLIMIT:
  681. next = val + SOFTLIMIT_EVENTS_TARGET;
  682. break;
  683. case MEM_CGROUP_TARGET_NUMAINFO:
  684. next = val + NUMAINFO_EVENTS_TARGET;
  685. break;
  686. default:
  687. break;
  688. }
  689. __this_cpu_write(memcg->stat->targets[target], next);
  690. return true;
  691. }
  692. return false;
  693. }
  694. /*
  695. * Check events in order.
  696. *
  697. */
  698. static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
  699. {
  700. preempt_disable();
  701. /* threshold event is triggered in finer grain than soft limit */
  702. if (unlikely(mem_cgroup_event_ratelimit(memcg,
  703. MEM_CGROUP_TARGET_THRESH))) {
  704. bool do_softlimit;
  705. bool do_numainfo __maybe_unused;
  706. do_softlimit = mem_cgroup_event_ratelimit(memcg,
  707. MEM_CGROUP_TARGET_SOFTLIMIT);
  708. #if MAX_NUMNODES > 1
  709. do_numainfo = mem_cgroup_event_ratelimit(memcg,
  710. MEM_CGROUP_TARGET_NUMAINFO);
  711. #endif
  712. preempt_enable();
  713. mem_cgroup_threshold(memcg);
  714. if (unlikely(do_softlimit))
  715. mem_cgroup_update_tree(memcg, page);
  716. #if MAX_NUMNODES > 1
  717. if (unlikely(do_numainfo))
  718. atomic_inc(&memcg->numainfo_events);
  719. #endif
  720. } else
  721. preempt_enable();
  722. }
  723. struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
  724. {
  725. return container_of(cgroup_subsys_state(cont,
  726. mem_cgroup_subsys_id), struct mem_cgroup,
  727. css);
  728. }
  729. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
  730. {
  731. /*
  732. * mm_update_next_owner() may clear mm->owner to NULL
  733. * if it races with swapoff, page migration, etc.
  734. * So this can be called with p == NULL.
  735. */
  736. if (unlikely(!p))
  737. return NULL;
  738. return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
  739. struct mem_cgroup, css);
  740. }
  741. struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  742. {
  743. struct mem_cgroup *memcg = NULL;
  744. if (!mm)
  745. return NULL;
  746. /*
  747. * Because we have no locks, mm->owner's may be being moved to other
  748. * cgroup. We use css_tryget() here even if this looks
  749. * pessimistic (rather than adding locks here).
  750. */
  751. rcu_read_lock();
  752. do {
  753. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  754. if (unlikely(!memcg))
  755. break;
  756. } while (!css_tryget(&memcg->css));
  757. rcu_read_unlock();
  758. return memcg;
  759. }
  760. /**
  761. * mem_cgroup_iter - iterate over memory cgroup hierarchy
  762. * @root: hierarchy root
  763. * @prev: previously returned memcg, NULL on first invocation
  764. * @reclaim: cookie for shared reclaim walks, NULL for full walks
  765. *
  766. * Returns references to children of the hierarchy below @root, or
  767. * @root itself, or %NULL after a full round-trip.
  768. *
  769. * Caller must pass the return value in @prev on subsequent
  770. * invocations for reference counting, or use mem_cgroup_iter_break()
  771. * to cancel a hierarchy walk before the round-trip is complete.
  772. *
  773. * Reclaimers can specify a zone and a priority level in @reclaim to
  774. * divide up the memcgs in the hierarchy among all concurrent
  775. * reclaimers operating on the same zone and priority.
  776. */
  777. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
  778. struct mem_cgroup *prev,
  779. struct mem_cgroup_reclaim_cookie *reclaim)
  780. {
  781. struct mem_cgroup *memcg = NULL;
  782. int id = 0;
  783. if (mem_cgroup_disabled())
  784. return NULL;
  785. if (!root)
  786. root = root_mem_cgroup;
  787. if (prev && !reclaim)
  788. id = css_id(&prev->css);
  789. if (prev && prev != root)
  790. css_put(&prev->css);
  791. if (!root->use_hierarchy && root != root_mem_cgroup) {
  792. if (prev)
  793. return NULL;
  794. return root;
  795. }
  796. while (!memcg) {
  797. struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
  798. struct cgroup_subsys_state *css;
  799. if (reclaim) {
  800. int nid = zone_to_nid(reclaim->zone);
  801. int zid = zone_idx(reclaim->zone);
  802. struct mem_cgroup_per_zone *mz;
  803. mz = mem_cgroup_zoneinfo(root, nid, zid);
  804. iter = &mz->reclaim_iter[reclaim->priority];
  805. if (prev && reclaim->generation != iter->generation)
  806. return NULL;
  807. id = iter->position;
  808. }
  809. rcu_read_lock();
  810. css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
  811. if (css) {
  812. if (css == &root->css || css_tryget(css))
  813. memcg = container_of(css,
  814. struct mem_cgroup, css);
  815. } else
  816. id = 0;
  817. rcu_read_unlock();
  818. if (reclaim) {
  819. iter->position = id;
  820. if (!css)
  821. iter->generation++;
  822. else if (!prev && memcg)
  823. reclaim->generation = iter->generation;
  824. }
  825. if (prev && !css)
  826. return NULL;
  827. }
  828. return memcg;
  829. }
  830. /**
  831. * mem_cgroup_iter_break - abort a hierarchy walk prematurely
  832. * @root: hierarchy root
  833. * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
  834. */
  835. void mem_cgroup_iter_break(struct mem_cgroup *root,
  836. struct mem_cgroup *prev)
  837. {
  838. if (!root)
  839. root = root_mem_cgroup;
  840. if (prev && prev != root)
  841. css_put(&prev->css);
  842. }
  843. /*
  844. * Iteration constructs for visiting all cgroups (under a tree). If
  845. * loops are exited prematurely (break), mem_cgroup_iter_break() must
  846. * be used for reference counting.
  847. */
  848. #define for_each_mem_cgroup_tree(iter, root) \
  849. for (iter = mem_cgroup_iter(root, NULL, NULL); \
  850. iter != NULL; \
  851. iter = mem_cgroup_iter(root, iter, NULL))
  852. #define for_each_mem_cgroup(iter) \
  853. for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
  854. iter != NULL; \
  855. iter = mem_cgroup_iter(NULL, iter, NULL))
  856. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  857. {
  858. return (memcg == root_mem_cgroup);
  859. }
  860. void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  861. {
  862. struct mem_cgroup *memcg;
  863. if (!mm)
  864. return;
  865. rcu_read_lock();
  866. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  867. if (unlikely(!memcg))
  868. goto out;
  869. switch (idx) {
  870. case PGFAULT:
  871. this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
  872. break;
  873. case PGMAJFAULT:
  874. this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
  875. break;
  876. default:
  877. BUG();
  878. }
  879. out:
  880. rcu_read_unlock();
  881. }
  882. EXPORT_SYMBOL(mem_cgroup_count_vm_event);
  883. /**
  884. * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
  885. * @zone: zone of the wanted lruvec
  886. * @mem: memcg of the wanted lruvec
  887. *
  888. * Returns the lru list vector holding pages for the given @zone and
  889. * @mem. This can be the global zone lruvec, if the memory controller
  890. * is disabled.
  891. */
  892. struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  893. struct mem_cgroup *memcg)
  894. {
  895. struct mem_cgroup_per_zone *mz;
  896. if (mem_cgroup_disabled())
  897. return &zone->lruvec;
  898. mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
  899. return &mz->lruvec;
  900. }
  901. /*
  902. * Following LRU functions are allowed to be used without PCG_LOCK.
  903. * Operations are called by routine of global LRU independently from memcg.
  904. * What we have to take care of here is validness of pc->mem_cgroup.
  905. *
  906. * Changes to pc->mem_cgroup happens when
  907. * 1. charge
  908. * 2. moving account
  909. * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
  910. * It is added to LRU before charge.
  911. * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
  912. * When moving account, the page is not on LRU. It's isolated.
  913. */
  914. /**
  915. * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
  916. * @zone: zone of the page
  917. * @page: the page
  918. * @lru: current lru
  919. *
  920. * This function accounts for @page being added to @lru, and returns
  921. * the lruvec for the given @zone and the memcg @page is charged to.
  922. *
  923. * The callsite is then responsible for physically linking the page to
  924. * the returned lruvec->lists[@lru].
  925. */
  926. struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
  927. enum lru_list lru)
  928. {
  929. struct mem_cgroup_per_zone *mz;
  930. struct mem_cgroup *memcg;
  931. struct page_cgroup *pc;
  932. if (mem_cgroup_disabled())
  933. return &zone->lruvec;
  934. pc = lookup_page_cgroup(page);
  935. memcg = pc->mem_cgroup;
  936. /*
  937. * Surreptitiously switch any uncharged page to root:
  938. * an uncharged page off lru does nothing to secure
  939. * its former mem_cgroup from sudden removal.
  940. *
  941. * Our caller holds lru_lock, and PageCgroupUsed is updated
  942. * under page_cgroup lock: between them, they make all uses
  943. * of pc->mem_cgroup safe.
  944. */
  945. if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
  946. pc->mem_cgroup = memcg = root_mem_cgroup;
  947. mz = page_cgroup_zoneinfo(memcg, page);
  948. /* compound_order() is stabilized through lru_lock */
  949. mz->lru_size[lru] += 1 << compound_order(page);
  950. return &mz->lruvec;
  951. }
  952. /**
  953. * mem_cgroup_lru_del_list - account for removing an lru page
  954. * @page: the page
  955. * @lru: target lru
  956. *
  957. * This function accounts for @page being removed from @lru.
  958. *
  959. * The callsite is then responsible for physically unlinking
  960. * @page->lru.
  961. */
  962. void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
  963. {
  964. struct mem_cgroup_per_zone *mz;
  965. struct mem_cgroup *memcg;
  966. struct page_cgroup *pc;
  967. if (mem_cgroup_disabled())
  968. return;
  969. pc = lookup_page_cgroup(page);
  970. memcg = pc->mem_cgroup;
  971. VM_BUG_ON(!memcg);
  972. mz = page_cgroup_zoneinfo(memcg, page);
  973. /* huge page split is done under lru_lock. so, we have no races. */
  974. VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
  975. mz->lru_size[lru] -= 1 << compound_order(page);
  976. }
  977. void mem_cgroup_lru_del(struct page *page)
  978. {
  979. mem_cgroup_lru_del_list(page, page_lru(page));
  980. }
  981. /**
  982. * mem_cgroup_lru_move_lists - account for moving a page between lrus
  983. * @zone: zone of the page
  984. * @page: the page
  985. * @from: current lru
  986. * @to: target lru
  987. *
  988. * This function accounts for @page being moved between the lrus @from
  989. * and @to, and returns the lruvec for the given @zone and the memcg
  990. * @page is charged to.
  991. *
  992. * The callsite is then responsible for physically relinking
  993. * @page->lru to the returned lruvec->lists[@to].
  994. */
  995. struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
  996. struct page *page,
  997. enum lru_list from,
  998. enum lru_list to)
  999. {
  1000. /* XXX: Optimize this, especially for @from == @to */
  1001. mem_cgroup_lru_del_list(page, from);
  1002. return mem_cgroup_lru_add_list(zone, page, to);
  1003. }
  1004. /*
  1005. * Checks whether given mem is same or in the root_mem_cgroup's
  1006. * hierarchy subtree
  1007. */
  1008. static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  1009. struct mem_cgroup *memcg)
  1010. {
  1011. if (root_memcg != memcg) {
  1012. return (root_memcg->use_hierarchy &&
  1013. css_is_ancestor(&memcg->css, &root_memcg->css));
  1014. }
  1015. return true;
  1016. }
  1017. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
  1018. {
  1019. int ret;
  1020. struct mem_cgroup *curr = NULL;
  1021. struct task_struct *p;
  1022. p = find_lock_task_mm(task);
  1023. if (p) {
  1024. curr = try_get_mem_cgroup_from_mm(p->mm);
  1025. task_unlock(p);
  1026. } else {
  1027. /*
  1028. * All threads may have already detached their mm's, but the oom
  1029. * killer still needs to detect if they have already been oom
  1030. * killed to prevent needlessly killing additional tasks.
  1031. */
  1032. task_lock(task);
  1033. curr = mem_cgroup_from_task(task);
  1034. if (curr)
  1035. css_get(&curr->css);
  1036. task_unlock(task);
  1037. }
  1038. if (!curr)
  1039. return 0;
  1040. /*
  1041. * We should check use_hierarchy of "memcg" not "curr". Because checking
  1042. * use_hierarchy of "curr" here make this function true if hierarchy is
  1043. * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
  1044. * hierarchy(even if use_hierarchy is disabled in "memcg").
  1045. */
  1046. ret = mem_cgroup_same_or_subtree(memcg, curr);
  1047. css_put(&curr->css);
  1048. return ret;
  1049. }
  1050. int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
  1051. {
  1052. unsigned long inactive_ratio;
  1053. int nid = zone_to_nid(zone);
  1054. int zid = zone_idx(zone);
  1055. unsigned long inactive;
  1056. unsigned long active;
  1057. unsigned long gb;
  1058. inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
  1059. BIT(LRU_INACTIVE_ANON));
  1060. active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
  1061. BIT(LRU_ACTIVE_ANON));
  1062. gb = (inactive + active) >> (30 - PAGE_SHIFT);
  1063. if (gb)
  1064. inactive_ratio = int_sqrt(10 * gb);
  1065. else
  1066. inactive_ratio = 1;
  1067. return inactive * inactive_ratio < active;
  1068. }
  1069. int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
  1070. {
  1071. unsigned long active;
  1072. unsigned long inactive;
  1073. int zid = zone_idx(zone);
  1074. int nid = zone_to_nid(zone);
  1075. inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
  1076. BIT(LRU_INACTIVE_FILE));
  1077. active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
  1078. BIT(LRU_ACTIVE_FILE));
  1079. return (active > inactive);
  1080. }
  1081. struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  1082. struct zone *zone)
  1083. {
  1084. int nid = zone_to_nid(zone);
  1085. int zid = zone_idx(zone);
  1086. struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  1087. return &mz->reclaim_stat;
  1088. }
  1089. struct zone_reclaim_stat *
  1090. mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  1091. {
  1092. struct page_cgroup *pc;
  1093. struct mem_cgroup_per_zone *mz;
  1094. if (mem_cgroup_disabled())
  1095. return NULL;
  1096. pc = lookup_page_cgroup(page);
  1097. if (!PageCgroupUsed(pc))
  1098. return NULL;
  1099. /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
  1100. smp_rmb();
  1101. mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
  1102. return &mz->reclaim_stat;
  1103. }
  1104. #define mem_cgroup_from_res_counter(counter, member) \
  1105. container_of(counter, struct mem_cgroup, member)
  1106. /**
  1107. * mem_cgroup_margin - calculate chargeable space of a memory cgroup
  1108. * @mem: the memory cgroup
  1109. *
  1110. * Returns the maximum amount of memory @mem can be charged with, in
  1111. * pages.
  1112. */
  1113. static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
  1114. {
  1115. unsigned long long margin;
  1116. margin = res_counter_margin(&memcg->res);
  1117. if (do_swap_account)
  1118. margin = min(margin, res_counter_margin(&memcg->memsw));
  1119. return margin >> PAGE_SHIFT;
  1120. }
  1121. int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  1122. {
  1123. struct cgroup *cgrp = memcg->css.cgroup;
  1124. /* root ? */
  1125. if (cgrp->parent == NULL)
  1126. return vm_swappiness;
  1127. return memcg->swappiness;
  1128. }
  1129. /*
  1130. * memcg->moving_account is used for checking possibility that some thread is
  1131. * calling move_account(). When a thread on CPU-A starts moving pages under
  1132. * a memcg, other threads should check memcg->moving_account under
  1133. * rcu_read_lock(), like this:
  1134. *
  1135. * CPU-A CPU-B
  1136. * rcu_read_lock()
  1137. * memcg->moving_account+1 if (memcg->mocing_account)
  1138. * take heavy locks.
  1139. * synchronize_rcu() update something.
  1140. * rcu_read_unlock()
  1141. * start move here.
  1142. */
  1143. /* for quick checking without looking up memcg */
  1144. atomic_t memcg_moving __read_mostly;
  1145. static void mem_cgroup_start_move(struct mem_cgroup *memcg)
  1146. {
  1147. atomic_inc(&memcg_moving);
  1148. atomic_inc(&memcg->moving_account);
  1149. synchronize_rcu();
  1150. }
  1151. static void mem_cgroup_end_move(struct mem_cgroup *memcg)
  1152. {
  1153. /*
  1154. * Now, mem_cgroup_clear_mc() may call this function with NULL.
  1155. * We check NULL in callee rather than caller.
  1156. */
  1157. if (memcg) {
  1158. atomic_dec(&memcg_moving);
  1159. atomic_dec(&memcg->moving_account);
  1160. }
  1161. }
  1162. /*
  1163. * 2 routines for checking "mem" is under move_account() or not.
  1164. *
  1165. * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
  1166. * is used for avoiding races in accounting. If true,
  1167. * pc->mem_cgroup may be overwritten.
  1168. *
  1169. * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
  1170. * under hierarchy of moving cgroups. This is for
  1171. * waiting at hith-memory prressure caused by "move".
  1172. */
  1173. static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
  1174. {
  1175. VM_BUG_ON(!rcu_read_lock_held());
  1176. return atomic_read(&memcg->moving_account) > 0;
  1177. }
  1178. static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
  1179. {
  1180. struct mem_cgroup *from;
  1181. struct mem_cgroup *to;
  1182. bool ret = false;
  1183. /*
  1184. * Unlike task_move routines, we access mc.to, mc.from not under
  1185. * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
  1186. */
  1187. spin_lock(&mc.lock);
  1188. from = mc.from;
  1189. to = mc.to;
  1190. if (!from)
  1191. goto unlock;
  1192. ret = mem_cgroup_same_or_subtree(memcg, from)
  1193. || mem_cgroup_same_or_subtree(memcg, to);
  1194. unlock:
  1195. spin_unlock(&mc.lock);
  1196. return ret;
  1197. }
  1198. static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
  1199. {
  1200. if (mc.moving_task && current != mc.moving_task) {
  1201. if (mem_cgroup_under_move(memcg)) {
  1202. DEFINE_WAIT(wait);
  1203. prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
  1204. /* moving charge context might have finished. */
  1205. if (mc.moving_task)
  1206. schedule();
  1207. finish_wait(&mc.waitq, &wait);
  1208. return true;
  1209. }
  1210. }
  1211. return false;
  1212. }
  1213. /*
  1214. * Take this lock when
  1215. * - a code tries to modify page's memcg while it's USED.
  1216. * - a code tries to modify page state accounting in a memcg.
  1217. * see mem_cgroup_stolen(), too.
  1218. */
  1219. static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
  1220. unsigned long *flags)
  1221. {
  1222. spin_lock_irqsave(&memcg->move_lock, *flags);
  1223. }
  1224. static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
  1225. unsigned long *flags)
  1226. {
  1227. spin_unlock_irqrestore(&memcg->move_lock, *flags);
  1228. }
  1229. /**
  1230. * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
  1231. * @memcg: The memory cgroup that went over limit
  1232. * @p: Task that is going to be killed
  1233. *
  1234. * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
  1235. * enabled
  1236. */
  1237. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  1238. {
  1239. struct cgroup *task_cgrp;
  1240. struct cgroup *mem_cgrp;
  1241. /*
  1242. * Need a buffer in BSS, can't rely on allocations. The code relies
  1243. * on the assumption that OOM is serialized for memory controller.
  1244. * If this assumption is broken, revisit this code.
  1245. */
  1246. static char memcg_name[PATH_MAX];
  1247. int ret;
  1248. if (!memcg || !p)
  1249. return;
  1250. rcu_read_lock();
  1251. mem_cgrp = memcg->css.cgroup;
  1252. task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
  1253. ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
  1254. if (ret < 0) {
  1255. /*
  1256. * Unfortunately, we are unable to convert to a useful name
  1257. * But we'll still print out the usage information
  1258. */
  1259. rcu_read_unlock();
  1260. goto done;
  1261. }
  1262. rcu_read_unlock();
  1263. printk(KERN_INFO "Task in %s killed", memcg_name);
  1264. rcu_read_lock();
  1265. ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
  1266. if (ret < 0) {
  1267. rcu_read_unlock();
  1268. goto done;
  1269. }
  1270. rcu_read_unlock();
  1271. /*
  1272. * Continues from above, so we don't need an KERN_ level
  1273. */
  1274. printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
  1275. done:
  1276. printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
  1277. res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
  1278. res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
  1279. res_counter_read_u64(&memcg->res, RES_FAILCNT));
  1280. printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
  1281. "failcnt %llu\n",
  1282. res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
  1283. res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
  1284. res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
  1285. }
  1286. /*
  1287. * This function returns the number of memcg under hierarchy tree. Returns
  1288. * 1(self count) if no children.
  1289. */
  1290. static int mem_cgroup_count_children(struct mem_cgroup *memcg)
  1291. {
  1292. int num = 0;
  1293. struct mem_cgroup *iter;
  1294. for_each_mem_cgroup_tree(iter, memcg)
  1295. num++;
  1296. return num;
  1297. }
  1298. /*
  1299. * Return the memory (and swap, if configured) limit for a memcg.
  1300. */
  1301. u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
  1302. {
  1303. u64 limit;
  1304. u64 memsw;
  1305. limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  1306. limit += total_swap_pages << PAGE_SHIFT;
  1307. memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  1308. /*
  1309. * If memsw is finite and limits the amount of swap space available
  1310. * to this memcg, return that limit.
  1311. */
  1312. return min(limit, memsw);
  1313. }
  1314. static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
  1315. gfp_t gfp_mask,
  1316. unsigned long flags)
  1317. {
  1318. unsigned long total = 0;
  1319. bool noswap = false;
  1320. int loop;
  1321. if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
  1322. noswap = true;
  1323. if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
  1324. noswap = true;
  1325. for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
  1326. if (loop)
  1327. drain_all_stock_async(memcg);
  1328. total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
  1329. /*
  1330. * Allow limit shrinkers, which are triggered directly
  1331. * by userspace, to catch signals and stop reclaim
  1332. * after minimal progress, regardless of the margin.
  1333. */
  1334. if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
  1335. break;
  1336. if (mem_cgroup_margin(memcg))
  1337. break;
  1338. /*
  1339. * If nothing was reclaimed after two attempts, there
  1340. * may be no reclaimable pages in this hierarchy.
  1341. */
  1342. if (loop && !total)
  1343. break;
  1344. }
  1345. return total;
  1346. }
  1347. /**
  1348. * test_mem_cgroup_node_reclaimable
  1349. * @mem: the target memcg
  1350. * @nid: the node ID to be checked.
  1351. * @noswap : specify true here if the user wants flle only information.
  1352. *
  1353. * This function returns whether the specified memcg contains any
  1354. * reclaimable pages on a node. Returns true if there are any reclaimable
  1355. * pages in the node.
  1356. */
  1357. static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
  1358. int nid, bool noswap)
  1359. {
  1360. if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
  1361. return true;
  1362. if (noswap || !total_swap_pages)
  1363. return false;
  1364. if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
  1365. return true;
  1366. return false;
  1367. }
  1368. #if MAX_NUMNODES > 1
  1369. /*
  1370. * Always updating the nodemask is not very good - even if we have an empty
  1371. * list or the wrong list here, we can start from some node and traverse all
  1372. * nodes based on the zonelist. So update the list loosely once per 10 secs.
  1373. *
  1374. */
  1375. static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
  1376. {
  1377. int nid;
  1378. /*
  1379. * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
  1380. * pagein/pageout changes since the last update.
  1381. */
  1382. if (!atomic_read(&memcg->numainfo_events))
  1383. return;
  1384. if (atomic_inc_return(&memcg->numainfo_updating) > 1)
  1385. return;
  1386. /* make a nodemask where this memcg uses memory from */
  1387. memcg->scan_nodes = node_states[N_HIGH_MEMORY];
  1388. for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
  1389. if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
  1390. node_clear(nid, memcg->scan_nodes);
  1391. }
  1392. atomic_set(&memcg->numainfo_events, 0);
  1393. atomic_set(&memcg->numainfo_updating, 0);
  1394. }
  1395. /*
  1396. * Selecting a node where we start reclaim from. Because what we need is just
  1397. * reducing usage counter, start from anywhere is O,K. Considering
  1398. * memory reclaim from current node, there are pros. and cons.
  1399. *
  1400. * Freeing memory from current node means freeing memory from a node which
  1401. * we'll use or we've used. So, it may make LRU bad. And if several threads
  1402. * hit limits, it will see a contention on a node. But freeing from remote
  1403. * node means more costs for memory reclaim because of memory latency.
  1404. *
  1405. * Now, we use round-robin. Better algorithm is welcomed.
  1406. */
  1407. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  1408. {
  1409. int node;
  1410. mem_cgroup_may_update_nodemask(memcg);
  1411. node = memcg->last_scanned_node;
  1412. node = next_node(node, memcg->scan_nodes);
  1413. if (node == MAX_NUMNODES)
  1414. node = first_node(memcg->scan_nodes);
  1415. /*
  1416. * We call this when we hit limit, not when pages are added to LRU.
  1417. * No LRU may hold pages because all pages are UNEVICTABLE or
  1418. * memcg is too small and all pages are not on LRU. In that case,
  1419. * we use curret node.
  1420. */
  1421. if (unlikely(node == MAX_NUMNODES))
  1422. node = numa_node_id();
  1423. memcg->last_scanned_node = node;
  1424. return node;
  1425. }
  1426. /*
  1427. * Check all nodes whether it contains reclaimable pages or not.
  1428. * For quick scan, we make use of scan_nodes. This will allow us to skip
  1429. * unused nodes. But scan_nodes is lazily updated and may not cotain
  1430. * enough new information. We need to do double check.
  1431. */
  1432. bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  1433. {
  1434. int nid;
  1435. /*
  1436. * quick check...making use of scan_node.
  1437. * We can skip unused nodes.
  1438. */
  1439. if (!nodes_empty(memcg->scan_nodes)) {
  1440. for (nid = first_node(memcg->scan_nodes);
  1441. nid < MAX_NUMNODES;
  1442. nid = next_node(nid, memcg->scan_nodes)) {
  1443. if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
  1444. return true;
  1445. }
  1446. }
  1447. /*
  1448. * Check rest of nodes.
  1449. */
  1450. for_each_node_state(nid, N_HIGH_MEMORY) {
  1451. if (node_isset(nid, memcg->scan_nodes))
  1452. continue;
  1453. if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
  1454. return true;
  1455. }
  1456. return false;
  1457. }
  1458. #else
  1459. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  1460. {
  1461. return 0;
  1462. }
  1463. bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  1464. {
  1465. return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
  1466. }
  1467. #endif
  1468. static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
  1469. struct zone *zone,
  1470. gfp_t gfp_mask,
  1471. unsigned long *total_scanned)
  1472. {
  1473. struct mem_cgroup *victim = NULL;
  1474. int total = 0;
  1475. int loop = 0;
  1476. unsigned long excess;
  1477. unsigned long nr_scanned;
  1478. struct mem_cgroup_reclaim_cookie reclaim = {
  1479. .zone = zone,
  1480. .priority = 0,
  1481. };
  1482. excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
  1483. while (1) {
  1484. victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
  1485. if (!victim) {
  1486. loop++;
  1487. if (loop >= 2) {
  1488. /*
  1489. * If we have not been able to reclaim
  1490. * anything, it might because there are
  1491. * no reclaimable pages under this hierarchy
  1492. */
  1493. if (!total)
  1494. break;
  1495. /*
  1496. * We want to do more targeted reclaim.
  1497. * excess >> 2 is not to excessive so as to
  1498. * reclaim too much, nor too less that we keep
  1499. * coming back to reclaim from this cgroup
  1500. */
  1501. if (total >= (excess >> 2) ||
  1502. (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
  1503. break;
  1504. }
  1505. continue;
  1506. }
  1507. if (!mem_cgroup_reclaimable(victim, false))
  1508. continue;
  1509. total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
  1510. zone, &nr_scanned);
  1511. *total_scanned += nr_scanned;
  1512. if (!res_counter_soft_limit_excess(&root_memcg->res))
  1513. break;
  1514. }
  1515. mem_cgroup_iter_break(root_memcg, victim);
  1516. return total;
  1517. }
  1518. /*
  1519. * Check OOM-Killer is already running under our hierarchy.
  1520. * If someone is running, return false.
  1521. * Has to be called with memcg_oom_lock
  1522. */
  1523. static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
  1524. {
  1525. struct mem_cgroup *iter, *failed = NULL;
  1526. for_each_mem_cgroup_tree(iter, memcg) {
  1527. if (iter->oom_lock) {
  1528. /*
  1529. * this subtree of our hierarchy is already locked
  1530. * so we cannot give a lock.
  1531. */
  1532. failed = iter;
  1533. mem_cgroup_iter_break(memcg, iter);
  1534. break;
  1535. } else
  1536. iter->oom_lock = true;
  1537. }
  1538. if (!failed)
  1539. return true;
  1540. /*
  1541. * OK, we failed to lock the whole subtree so we have to clean up
  1542. * what we set up to the failing subtree
  1543. */
  1544. for_each_mem_cgroup_tree(iter, memcg) {
  1545. if (iter == failed) {
  1546. mem_cgroup_iter_break(memcg, iter);
  1547. break;
  1548. }
  1549. iter->oom_lock = false;
  1550. }
  1551. return false;
  1552. }
  1553. /*
  1554. * Has to be called with memcg_oom_lock
  1555. */
  1556. static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
  1557. {
  1558. struct mem_cgroup *iter;
  1559. for_each_mem_cgroup_tree(iter, memcg)
  1560. iter->oom_lock = false;
  1561. return 0;
  1562. }
  1563. static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
  1564. {
  1565. struct mem_cgroup *iter;
  1566. for_each_mem_cgroup_tree(iter, memcg)
  1567. atomic_inc(&iter->under_oom);
  1568. }
  1569. static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
  1570. {
  1571. struct mem_cgroup *iter;
  1572. /*
  1573. * When a new child is created while the hierarchy is under oom,
  1574. * mem_cgroup_oom_lock() may not be called. We have to use
  1575. * atomic_add_unless() here.
  1576. */
  1577. for_each_mem_cgroup_tree(iter, memcg)
  1578. atomic_add_unless(&iter->under_oom, -1, 0);
  1579. }
  1580. static DEFINE_SPINLOCK(memcg_oom_lock);
  1581. static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
  1582. struct oom_wait_info {
  1583. struct mem_cgroup *memcg;
  1584. wait_queue_t wait;
  1585. };
  1586. static int memcg_oom_wake_function(wait_queue_t *wait,
  1587. unsigned mode, int sync, void *arg)
  1588. {
  1589. struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
  1590. struct mem_cgroup *oom_wait_memcg;
  1591. struct oom_wait_info *oom_wait_info;
  1592. oom_wait_info = container_of(wait, struct oom_wait_info, wait);
  1593. oom_wait_memcg = oom_wait_info->memcg;
  1594. /*
  1595. * Both of oom_wait_info->memcg and wake_memcg are stable under us.
  1596. * Then we can use css_is_ancestor without taking care of RCU.
  1597. */
  1598. if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
  1599. && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
  1600. return 0;
  1601. return autoremove_wake_function(wait, mode, sync, arg);
  1602. }
  1603. static void memcg_wakeup_oom(struct mem_cgroup *memcg)
  1604. {
  1605. /* for filtering, pass "memcg" as argument. */
  1606. __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
  1607. }
  1608. static void memcg_oom_recover(struct mem_cgroup *memcg)
  1609. {
  1610. if (memcg && atomic_read(&memcg->under_oom))
  1611. memcg_wakeup_oom(memcg);
  1612. }
  1613. /*
  1614. * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  1615. */
  1616. bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
  1617. {
  1618. struct oom_wait_info owait;
  1619. bool locked, need_to_kill;
  1620. owait.memcg = memcg;
  1621. owait.wait.flags = 0;
  1622. owait.wait.func = memcg_oom_wake_function;
  1623. owait.wait.private = current;
  1624. INIT_LIST_HEAD(&owait.wait.task_list);
  1625. need_to_kill = true;
  1626. mem_cgroup_mark_under_oom(memcg);
  1627. /* At first, try to OOM lock hierarchy under memcg.*/
  1628. spin_lock(&memcg_oom_lock);
  1629. locked = mem_cgroup_oom_lock(memcg);
  1630. /*
  1631. * Even if signal_pending(), we can't quit charge() loop without
  1632. * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
  1633. * under OOM is always welcomed, use TASK_KILLABLE here.
  1634. */
  1635. prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
  1636. if (!locked || memcg->oom_kill_disable)
  1637. need_to_kill = false;
  1638. if (locked)
  1639. mem_cgroup_oom_notify(memcg);
  1640. spin_unlock(&memcg_oom_lock);
  1641. if (need_to_kill) {
  1642. finish_wait(&memcg_oom_waitq, &owait.wait);
  1643. mem_cgroup_out_of_memory(memcg, mask, order);
  1644. } else {
  1645. schedule();
  1646. finish_wait(&memcg_oom_waitq, &owait.wait);
  1647. }
  1648. spin_lock(&memcg_oom_lock);
  1649. if (locked)
  1650. mem_cgroup_oom_unlock(memcg);
  1651. memcg_wakeup_oom(memcg);
  1652. spin_unlock(&memcg_oom_lock);
  1653. mem_cgroup_unmark_under_oom(memcg);
  1654. if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
  1655. return false;
  1656. /* Give chance to dying process */
  1657. schedule_timeout_uninterruptible(1);
  1658. return true;
  1659. }
  1660. /*
  1661. * Currently used to update mapped file statistics, but the routine can be
  1662. * generalized to update other statistics as well.
  1663. *
  1664. * Notes: Race condition
  1665. *
  1666. * We usually use page_cgroup_lock() for accessing page_cgroup member but
  1667. * it tends to be costly. But considering some conditions, we doesn't need
  1668. * to do so _always_.
  1669. *
  1670. * Considering "charge", lock_page_cgroup() is not required because all
  1671. * file-stat operations happen after a page is attached to radix-tree. There
  1672. * are no race with "charge".
  1673. *
  1674. * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
  1675. * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
  1676. * if there are race with "uncharge". Statistics itself is properly handled
  1677. * by flags.
  1678. *
  1679. * Considering "move", this is an only case we see a race. To make the race
  1680. * small, we check mm->moving_account and detect there are possibility of race
  1681. * If there is, we take a lock.
  1682. */
  1683. void __mem_cgroup_begin_update_page_stat(struct page *page,
  1684. bool *locked, unsigned long *flags)
  1685. {
  1686. struct mem_cgroup *memcg;
  1687. struct page_cgroup *pc;
  1688. pc = lookup_page_cgroup(page);
  1689. again:
  1690. memcg = pc->mem_cgroup;
  1691. if (unlikely(!memcg || !PageCgroupUsed(pc)))
  1692. return;
  1693. /*
  1694. * If this memory cgroup is not under account moving, we don't
  1695. * need to take move_lock_page_cgroup(). Because we already hold
  1696. * rcu_read_lock(), any calls to move_account will be delayed until
  1697. * rcu_read_unlock() if mem_cgroup_stolen() == true.
  1698. */
  1699. if (!mem_cgroup_stolen(memcg))
  1700. return;
  1701. move_lock_mem_cgroup(memcg, flags);
  1702. if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
  1703. move_unlock_mem_cgroup(memcg, flags);
  1704. goto again;
  1705. }
  1706. *locked = true;
  1707. }
  1708. void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
  1709. {
  1710. struct page_cgroup *pc = lookup_page_cgroup(page);
  1711. /*
  1712. * It's guaranteed that pc->mem_cgroup never changes while
  1713. * lock is held because a routine modifies pc->mem_cgroup
  1714. * should take move_lock_page_cgroup().
  1715. */
  1716. move_unlock_mem_cgroup(pc->mem_cgroup, flags);
  1717. }
  1718. void mem_cgroup_update_page_stat(struct page *page,
  1719. enum mem_cgroup_page_stat_item idx, int val)
  1720. {
  1721. struct mem_cgroup *memcg;
  1722. struct page_cgroup *pc = lookup_page_cgroup(page);
  1723. unsigned long uninitialized_var(flags);
  1724. if (mem_cgroup_disabled())
  1725. return;
  1726. memcg = pc->mem_cgroup;
  1727. if (unlikely(!memcg || !PageCgroupUsed(pc)))
  1728. return;
  1729. switch (idx) {
  1730. case MEMCG_NR_FILE_MAPPED:
  1731. idx = MEM_CGROUP_STAT_FILE_MAPPED;
  1732. break;
  1733. default:
  1734. BUG();
  1735. }
  1736. this_cpu_add(memcg->stat->count[idx], val);
  1737. }
  1738. /*
  1739. * size of first charge trial. "32" comes from vmscan.c's magic value.
  1740. * TODO: maybe necessary to use big numbers in big irons.
  1741. */
  1742. #define CHARGE_BATCH 32U
  1743. struct memcg_stock_pcp {
  1744. struct mem_cgroup *cached; /* this never be root cgroup */
  1745. unsigned int nr_pages;
  1746. struct work_struct work;
  1747. unsigned long flags;
  1748. #define FLUSHING_CACHED_CHARGE (0)
  1749. };
  1750. static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
  1751. static DEFINE_MUTEX(percpu_charge_mutex);
  1752. /*
  1753. * Try to consume stocked charge on this cpu. If success, one page is consumed
  1754. * from local stock and true is returned. If the stock is 0 or charges from a
  1755. * cgroup which is not current target, returns false. This stock will be
  1756. * refilled.
  1757. */
  1758. static bool consume_stock(struct mem_cgroup *memcg)
  1759. {
  1760. struct memcg_stock_pcp *stock;
  1761. bool ret = true;
  1762. stock = &get_cpu_var(memcg_stock);
  1763. if (memcg == stock->cached && stock->nr_pages)
  1764. stock->nr_pages--;
  1765. else /* need to call res_counter_charge */
  1766. ret = false;
  1767. put_cpu_var(memcg_stock);
  1768. return ret;
  1769. }
  1770. /*
  1771. * Returns stocks cached in percpu to res_counter and reset cached information.
  1772. */
  1773. static void drain_stock(struct memcg_stock_pcp *stock)
  1774. {
  1775. struct mem_cgroup *old = stock->cached;
  1776. if (stock->nr_pages) {
  1777. unsigned long bytes = stock->nr_pages * PAGE_SIZE;
  1778. res_counter_uncharge(&old->res, bytes);
  1779. if (do_swap_account)
  1780. res_counter_uncharge(&old->memsw, bytes);
  1781. stock->nr_pages = 0;
  1782. }
  1783. stock->cached = NULL;
  1784. }
  1785. /*
  1786. * This must be called under preempt disabled or must be called by
  1787. * a thread which is pinned to local cpu.
  1788. */
  1789. static void drain_local_stock(struct work_struct *dummy)
  1790. {
  1791. struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
  1792. drain_stock(stock);
  1793. clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
  1794. }
  1795. /*
  1796. * Cache charges(val) which is from res_counter, to local per_cpu area.
  1797. * This will be consumed by consume_stock() function, later.
  1798. */
  1799. static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  1800. {
  1801. struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
  1802. if (stock->cached != memcg) { /* reset if necessary */
  1803. drain_stock(stock);
  1804. stock->cached = memcg;
  1805. }
  1806. stock->nr_pages += nr_pages;
  1807. put_cpu_var(memcg_stock);
  1808. }
  1809. /*
  1810. * Drains all per-CPU charge caches for given root_memcg resp. subtree
  1811. * of the hierarchy under it. sync flag says whether we should block
  1812. * until the work is done.
  1813. */
  1814. static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
  1815. {
  1816. int cpu, curcpu;
  1817. /* Notify other cpus that system-wide "drain" is running */
  1818. get_online_cpus();
  1819. curcpu = get_cpu();
  1820. for_each_online_cpu(cpu) {
  1821. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  1822. struct mem_cgroup *memcg;
  1823. memcg = stock->cached;
  1824. if (!memcg || !stock->nr_pages)
  1825. continue;
  1826. if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
  1827. continue;
  1828. if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
  1829. if (cpu == curcpu)
  1830. drain_local_stock(&stock->work);
  1831. else
  1832. schedule_work_on(cpu, &stock->work);
  1833. }
  1834. }
  1835. put_cpu();
  1836. if (!sync)
  1837. goto out;
  1838. for_each_online_cpu(cpu) {
  1839. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  1840. if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
  1841. flush_work(&stock->work);
  1842. }
  1843. out:
  1844. put_online_cpus();
  1845. }
  1846. /*
  1847. * Tries to drain stocked charges in other cpus. This function is asynchronous
  1848. * and just put a work per cpu for draining localy on each cpu. Caller can
  1849. * expects some charges will be back to res_counter later but cannot wait for
  1850. * it.
  1851. */
  1852. static void drain_all_stock_async(struct mem_cgroup *root_memcg)
  1853. {
  1854. /*
  1855. * If someone calls draining, avoid adding more kworker runs.
  1856. */
  1857. if (!mutex_trylock(&percpu_charge_mutex))
  1858. return;
  1859. drain_all_stock(root_memcg, false);
  1860. mutex_unlock(&percpu_charge_mutex);
  1861. }
  1862. /* This is a synchronous drain interface. */
  1863. static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
  1864. {
  1865. /* called when force_empty is called */
  1866. mutex_lock(&percpu_charge_mutex);
  1867. drain_all_stock(root_memcg, true);
  1868. mutex_unlock(&percpu_charge_mutex);
  1869. }
  1870. /*
  1871. * This function drains percpu counter value from DEAD cpu and
  1872. * move it to local cpu. Note that this function can be preempted.
  1873. */
  1874. static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
  1875. {
  1876. int i;
  1877. spin_lock(&memcg->pcp_counter_lock);
  1878. for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
  1879. long x = per_cpu(memcg->stat->count[i], cpu);
  1880. per_cpu(memcg->stat->count[i], cpu) = 0;
  1881. memcg->nocpu_base.count[i] += x;
  1882. }
  1883. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
  1884. unsigned long x = per_cpu(memcg->stat->events[i], cpu);
  1885. per_cpu(memcg->stat->events[i], cpu) = 0;
  1886. memcg->nocpu_base.events[i] += x;
  1887. }
  1888. spin_unlock(&memcg->pcp_counter_lock);
  1889. }
  1890. static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
  1891. unsigned long action,
  1892. void *hcpu)
  1893. {
  1894. int cpu = (unsigned long)hcpu;
  1895. struct memcg_stock_pcp *stock;
  1896. struct mem_cgroup *iter;
  1897. if (action == CPU_ONLINE)
  1898. return NOTIFY_OK;
  1899. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  1900. return NOTIFY_OK;
  1901. for_each_mem_cgroup(iter)
  1902. mem_cgroup_drain_pcp_counter(iter, cpu);
  1903. stock = &per_cpu(memcg_stock, cpu);
  1904. drain_stock(stock);
  1905. return NOTIFY_OK;
  1906. }
  1907. /* See __mem_cgroup_try_charge() for details */
  1908. enum {
  1909. CHARGE_OK, /* success */
  1910. CHARGE_RETRY, /* need to retry but retry is not bad */
  1911. CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
  1912. CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
  1913. CHARGE_OOM_DIE, /* the current is killed because of OOM */
  1914. };
  1915. static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
  1916. unsigned int nr_pages, bool oom_check)
  1917. {
  1918. unsigned long csize = nr_pages * PAGE_SIZE;
  1919. struct mem_cgroup *mem_over_limit;
  1920. struct res_counter *fail_res;
  1921. unsigned long flags = 0;
  1922. int ret;
  1923. ret = res_counter_charge(&memcg->res, csize, &fail_res);
  1924. if (likely(!ret)) {
  1925. if (!do_swap_account)
  1926. return CHARGE_OK;
  1927. ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
  1928. if (likely(!ret))
  1929. return CHARGE_OK;
  1930. res_counter_uncharge(&memcg->res, csize);
  1931. mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
  1932. flags |= MEM_CGROUP_RECLAIM_NOSWAP;
  1933. } else
  1934. mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
  1935. /*
  1936. * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
  1937. * of regular pages (CHARGE_BATCH), or a single regular page (1).
  1938. *
  1939. * Never reclaim on behalf of optional batching, retry with a
  1940. * single page instead.
  1941. */
  1942. if (nr_pages == CHARGE_BATCH)
  1943. return CHARGE_RETRY;
  1944. if (!(gfp_mask & __GFP_WAIT))
  1945. return CHARGE_WOULDBLOCK;
  1946. ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
  1947. if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
  1948. return CHARGE_RETRY;
  1949. /*
  1950. * Even though the limit is exceeded at this point, reclaim
  1951. * may have been able to free some pages. Retry the charge
  1952. * before killing the task.
  1953. *
  1954. * Only for regular pages, though: huge pages are rather
  1955. * unlikely to succeed so close to the limit, and we fall back
  1956. * to regular pages anyway in case of failure.
  1957. */
  1958. if (nr_pages == 1 && ret)
  1959. return CHARGE_RETRY;
  1960. /*
  1961. * At task move, charge accounts can be doubly counted. So, it's
  1962. * better to wait until the end of task_move if something is going on.
  1963. */
  1964. if (mem_cgroup_wait_acct_move(mem_over_limit))
  1965. return CHARGE_RETRY;
  1966. /* If we don't need to call oom-killer at el, return immediately */
  1967. if (!oom_check)
  1968. return CHARGE_NOMEM;
  1969. /* check OOM */
  1970. if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
  1971. return CHARGE_OOM_DIE;
  1972. return CHARGE_RETRY;
  1973. }
  1974. /*
  1975. * __mem_cgroup_try_charge() does
  1976. * 1. detect memcg to be charged against from passed *mm and *ptr,
  1977. * 2. update res_counter
  1978. * 3. call memory reclaim if necessary.
  1979. *
  1980. * In some special case, if the task is fatal, fatal_signal_pending() or
  1981. * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
  1982. * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
  1983. * as possible without any hazards. 2: all pages should have a valid
  1984. * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
  1985. * pointer, that is treated as a charge to root_mem_cgroup.
  1986. *
  1987. * So __mem_cgroup_try_charge() will return
  1988. * 0 ... on success, filling *ptr with a valid memcg pointer.
  1989. * -ENOMEM ... charge failure because of resource limits.
  1990. * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup.
  1991. *
  1992. * Unlike the exported interface, an "oom" parameter is added. if oom==true,
  1993. * the oom-killer can be invoked.
  1994. */
  1995. static int __mem_cgroup_try_charge(struct mm_struct *mm,
  1996. gfp_t gfp_mask,
  1997. unsigned int nr_pages,
  1998. struct mem_cgroup **ptr,
  1999. bool oom)
  2000. {
  2001. unsigned int batch = max(CHARGE_BATCH, nr_pages);
  2002. int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2003. struct mem_cgroup *memcg = NULL;
  2004. int ret;
  2005. /*
  2006. * Unlike gloval-vm's OOM-kill, we're not in memory shortage
  2007. * in system level. So, allow to go ahead dying process in addition to
  2008. * MEMDIE process.
  2009. */
  2010. if (unlikely(test_thread_flag(TIF_MEMDIE)
  2011. || fatal_signal_pending(current)))
  2012. goto bypass;
  2013. /*
  2014. * We always charge the cgroup the mm_struct belongs to.
  2015. * The mm_struct's mem_cgroup changes on task migration if the
  2016. * thread group leader migrates. It's possible that mm is not
  2017. * set, if so charge the init_mm (happens for pagecache usage).
  2018. */
  2019. if (!*ptr && !mm)
  2020. *ptr = root_mem_cgroup;
  2021. again:
  2022. if (*ptr) { /* css should be a valid one */
  2023. memcg = *ptr;
  2024. VM_BUG_ON(css_is_removed(&memcg->css));
  2025. if (mem_cgroup_is_root(memcg))
  2026. goto done;
  2027. if (nr_pages == 1 && consume_stock(memcg))
  2028. goto done;
  2029. css_get(&memcg->css);
  2030. } else {
  2031. struct task_struct *p;
  2032. rcu_read_lock();
  2033. p = rcu_dereference(mm->owner);
  2034. /*
  2035. * Because we don't have task_lock(), "p" can exit.
  2036. * In that case, "memcg" can point to root or p can be NULL with
  2037. * race with swapoff. Then, we have small risk of mis-accouning.
  2038. * But such kind of mis-account by race always happens because
  2039. * we don't have cgroup_mutex(). It's overkill and we allo that
  2040. * small race, here.
  2041. * (*) swapoff at el will charge against mm-struct not against
  2042. * task-struct. So, mm->owner can be NULL.
  2043. */
  2044. memcg = mem_cgroup_from_task(p);
  2045. if (!memcg)
  2046. memcg = root_mem_cgroup;
  2047. if (mem_cgroup_is_root(memcg)) {
  2048. rcu_read_unlock();
  2049. goto done;
  2050. }
  2051. if (nr_pages == 1 && consume_stock(memcg)) {
  2052. /*
  2053. * It seems dagerous to access memcg without css_get().
  2054. * But considering how consume_stok works, it's not
  2055. * necessary. If consume_stock success, some charges
  2056. * from this memcg are cached on this cpu. So, we
  2057. * don't need to call css_get()/css_tryget() before
  2058. * calling consume_stock().
  2059. */
  2060. rcu_read_unlock();
  2061. goto done;
  2062. }
  2063. /* after here, we may be blocked. we need to get refcnt */
  2064. if (!css_tryget(&memcg->css)) {
  2065. rcu_read_unlock();
  2066. goto again;
  2067. }
  2068. rcu_read_unlock();
  2069. }
  2070. do {
  2071. bool oom_check;
  2072. /* If killed, bypass charge */
  2073. if (fatal_signal_pending(current)) {
  2074. css_put(&memcg->css);
  2075. goto bypass;
  2076. }
  2077. oom_check = false;
  2078. if (oom && !nr_oom_retries) {
  2079. oom_check = true;
  2080. nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2081. }
  2082. ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
  2083. switch (ret) {
  2084. case CHARGE_OK:
  2085. break;
  2086. case CHARGE_RETRY: /* not in OOM situation but retry */
  2087. batch = nr_pages;
  2088. css_put(&memcg->css);
  2089. memcg = NULL;
  2090. goto again;
  2091. case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
  2092. css_put(&memcg->css);
  2093. goto nomem;
  2094. case CHARGE_NOMEM: /* OOM routine works */
  2095. if (!oom) {
  2096. css_put(&memcg->css);
  2097. goto nomem;
  2098. }
  2099. /* If oom, we never return -ENOMEM */
  2100. nr_oom_retries--;
  2101. break;
  2102. case CHARGE_OOM_DIE: /* Killed by OOM Killer */
  2103. css_put(&memcg->css);
  2104. goto bypass;
  2105. }
  2106. } while (ret != CHARGE_OK);
  2107. if (batch > nr_pages)
  2108. refill_stock(memcg, batch - nr_pages);
  2109. css_put(&memcg->css);
  2110. done:
  2111. *ptr = memcg;
  2112. return 0;
  2113. nomem:
  2114. *ptr = NULL;
  2115. return -ENOMEM;
  2116. bypass:
  2117. *ptr = root_mem_cgroup;
  2118. return -EINTR;
  2119. }
  2120. /*
  2121. * Somemtimes we have to undo a charge we got by try_charge().
  2122. * This function is for that and do uncharge, put css's refcnt.
  2123. * gotten by try_charge().
  2124. */
  2125. static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
  2126. unsigned int nr_pages)
  2127. {
  2128. if (!mem_cgroup_is_root(memcg)) {
  2129. unsigned long bytes = nr_pages * PAGE_SIZE;
  2130. res_counter_uncharge(&memcg->res, bytes);
  2131. if (do_swap_account)
  2132. res_counter_uncharge(&memcg->memsw, bytes);
  2133. }
  2134. }
  2135. /*
  2136. * A helper function to get mem_cgroup from ID. must be called under
  2137. * rcu_read_lock(). The caller must check css_is_removed() or some if
  2138. * it's concern. (dropping refcnt from swap can be called against removed
  2139. * memcg.)
  2140. */
  2141. static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
  2142. {
  2143. struct cgroup_subsys_state *css;
  2144. /* ID 0 is unused ID */
  2145. if (!id)
  2146. return NULL;
  2147. css = css_lookup(&mem_cgroup_subsys, id);
  2148. if (!css)
  2149. return NULL;
  2150. return container_of(css, struct mem_cgroup, css);
  2151. }
  2152. struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  2153. {
  2154. struct mem_cgroup *memcg = NULL;
  2155. struct page_cgroup *pc;
  2156. unsigned short id;
  2157. swp_entry_t ent;
  2158. VM_BUG_ON(!PageLocked(page));
  2159. pc = lookup_page_cgroup(page);
  2160. lock_page_cgroup(pc);
  2161. if (PageCgroupUsed(pc)) {
  2162. memcg = pc->mem_cgroup;
  2163. if (memcg && !css_tryget(&memcg->css))
  2164. memcg = NULL;
  2165. } else if (PageSwapCache(page)) {
  2166. ent.val = page_private(page);
  2167. id = lookup_swap_cgroup_id(ent);
  2168. rcu_read_lock();
  2169. memcg = mem_cgroup_lookup(id);
  2170. if (memcg && !css_tryget(&memcg->css))
  2171. memcg = NULL;
  2172. rcu_read_unlock();
  2173. }
  2174. unlock_page_cgroup(pc);
  2175. return memcg;
  2176. }
  2177. static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
  2178. struct page *page,
  2179. unsigned int nr_pages,
  2180. enum charge_type ctype,
  2181. bool lrucare)
  2182. {
  2183. struct page_cgroup *pc = lookup_page_cgroup(page);
  2184. struct zone *uninitialized_var(zone);
  2185. bool was_on_lru = false;
  2186. bool anon;
  2187. lock_page_cgroup(pc);
  2188. if (unlikely(PageCgroupUsed(pc))) {
  2189. unlock_page_cgroup(pc);
  2190. __mem_cgroup_cancel_charge(memcg, nr_pages);
  2191. return;
  2192. }
  2193. /*
  2194. * we don't need page_cgroup_lock about tail pages, becase they are not
  2195. * accessed by any other context at this point.
  2196. */
  2197. /*
  2198. * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
  2199. * may already be on some other mem_cgroup's LRU. Take care of it.
  2200. */
  2201. if (lrucare) {
  2202. zone = page_zone(page);
  2203. spin_lock_irq(&zone->lru_lock);
  2204. if (PageLRU(page)) {
  2205. ClearPageLRU(page);
  2206. del_page_from_lru_list(zone, page, page_lru(page));
  2207. was_on_lru = true;
  2208. }
  2209. }
  2210. pc->mem_cgroup = memcg;
  2211. /*
  2212. * We access a page_cgroup asynchronously without lock_page_cgroup().
  2213. * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
  2214. * is accessed after testing USED bit. To make pc->mem_cgroup visible
  2215. * before USED bit, we need memory barrier here.
  2216. * See mem_cgroup_add_lru_list(), etc.
  2217. */
  2218. smp_wmb();
  2219. SetPageCgroupUsed(pc);
  2220. if (lrucare) {
  2221. if (was_on_lru) {
  2222. VM_BUG_ON(PageLRU(page));
  2223. SetPageLRU(page);
  2224. add_page_to_lru_list(zone, page, page_lru(page));
  2225. }
  2226. spin_unlock_irq(&zone->lru_lock);
  2227. }
  2228. if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
  2229. anon = true;
  2230. else
  2231. anon = false;
  2232. mem_cgroup_charge_statistics(memcg, anon, nr_pages);
  2233. unlock_page_cgroup(pc);
  2234. /*
  2235. * "charge_statistics" updated event counter. Then, check it.
  2236. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
  2237. * if they exceeds softlimit.
  2238. */
  2239. memcg_check_events(memcg, page);
  2240. }
  2241. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2242. #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
  2243. /*
  2244. * Because tail pages are not marked as "used", set it. We're under
  2245. * zone->lru_lock, 'splitting on pmd' and compound_lock.
  2246. * charge/uncharge will be never happen and move_account() is done under
  2247. * compound_lock(), so we don't have to take care of races.
  2248. */
  2249. void mem_cgroup_split_huge_fixup(struct page *head)
  2250. {
  2251. struct page_cgroup *head_pc = lookup_page_cgroup(head);
  2252. struct page_cgroup *pc;
  2253. int i;
  2254. if (mem_cgroup_disabled())
  2255. return;
  2256. for (i = 1; i < HPAGE_PMD_NR; i++) {
  2257. pc = head_pc + i;
  2258. pc->mem_cgroup = head_pc->mem_cgroup;
  2259. smp_wmb();/* see __commit_charge() */
  2260. pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
  2261. }
  2262. }
  2263. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  2264. /**
  2265. * mem_cgroup_move_account - move account of the page
  2266. * @page: the page
  2267. * @nr_pages: number of regular pages (>1 for huge pages)
  2268. * @pc: page_cgroup of the page.
  2269. * @from: mem_cgroup which the page is moved from.
  2270. * @to: mem_cgroup which the page is moved to. @from != @to.
  2271. * @uncharge: whether we should call uncharge and css_put against @from.
  2272. *
  2273. * The caller must confirm following.
  2274. * - page is not on LRU (isolate_page() is useful.)
  2275. * - compound_lock is held when nr_pages > 1
  2276. *
  2277. * This function doesn't do "charge" nor css_get to new cgroup. It should be
  2278. * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
  2279. * true, this function does "uncharge" from old cgroup, but it doesn't if
  2280. * @uncharge is false, so a caller should do "uncharge".
  2281. */
  2282. static int mem_cgroup_move_account(struct page *page,
  2283. unsigned int nr_pages,
  2284. struct page_cgroup *pc,
  2285. struct mem_cgroup *from,
  2286. struct mem_cgroup *to,
  2287. bool uncharge)
  2288. {
  2289. unsigned long flags;
  2290. int ret;
  2291. bool anon = PageAnon(page);
  2292. VM_BUG_ON(from == to);
  2293. VM_BUG_ON(PageLRU(page));
  2294. /*
  2295. * The page is isolated from LRU. So, collapse function
  2296. * will not handle this page. But page splitting can happen.
  2297. * Do this check under compound_page_lock(). The caller should
  2298. * hold it.
  2299. */
  2300. ret = -EBUSY;
  2301. if (nr_pages > 1 && !PageTransHuge(page))
  2302. goto out;
  2303. lock_page_cgroup(pc);
  2304. ret = -EINVAL;
  2305. if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
  2306. goto unlock;
  2307. move_lock_mem_cgroup(from, &flags);
  2308. if (!anon && page_mapped(page)) {
  2309. /* Update mapped_file data for mem_cgroup */
  2310. preempt_disable();
  2311. __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  2312. __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  2313. preempt_enable();
  2314. }
  2315. mem_cgroup_charge_statistics(from, anon, -nr_pages);
  2316. if (uncharge)
  2317. /* This is not "cancel", but cancel_charge does all we need. */
  2318. __mem_cgroup_cancel_charge(from, nr_pages);
  2319. /* caller should have done css_get */
  2320. pc->mem_cgroup = to;
  2321. mem_cgroup_charge_statistics(to, anon, nr_pages);
  2322. /*
  2323. * We charges against "to" which may not have any tasks. Then, "to"
  2324. * can be under rmdir(). But in current implementation, caller of
  2325. * this function is just force_empty() and move charge, so it's
  2326. * guaranteed that "to" is never removed. So, we don't check rmdir
  2327. * status here.
  2328. */
  2329. move_unlock_mem_cgroup(from, &flags);
  2330. ret = 0;
  2331. unlock:
  2332. unlock_page_cgroup(pc);
  2333. /*
  2334. * check events
  2335. */
  2336. memcg_check_events(to, page);
  2337. memcg_check_events(from, page);
  2338. out:
  2339. return ret;
  2340. }
  2341. /*
  2342. * move charges to its parent.
  2343. */
  2344. static int mem_cgroup_move_parent(struct page *page,
  2345. struct page_cgroup *pc,
  2346. struct mem_cgroup *child,
  2347. gfp_t gfp_mask)
  2348. {
  2349. struct cgroup *cg = child->css.cgroup;
  2350. struct cgroup *pcg = cg->parent;
  2351. struct mem_cgroup *parent;
  2352. unsigned int nr_pages;
  2353. unsigned long uninitialized_var(flags);
  2354. int ret;
  2355. /* Is ROOT ? */
  2356. if (!pcg)
  2357. return -EINVAL;
  2358. ret = -EBUSY;
  2359. if (!get_page_unless_zero(page))
  2360. goto out;
  2361. if (isolate_lru_page(page))
  2362. goto put;
  2363. nr_pages = hpage_nr_pages(page);
  2364. parent = mem_cgroup_from_cont(pcg);
  2365. ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
  2366. if (ret)
  2367. goto put_back;
  2368. if (nr_pages > 1)
  2369. flags = compound_lock_irqsave(page);
  2370. ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
  2371. if (ret)
  2372. __mem_cgroup_cancel_charge(parent, nr_pages);
  2373. if (nr_pages > 1)
  2374. compound_unlock_irqrestore(page, flags);
  2375. put_back:
  2376. putback_lru_page(page);
  2377. put:
  2378. put_page(page);
  2379. out:
  2380. return ret;
  2381. }
  2382. /*
  2383. * Charge the memory controller for page usage.
  2384. * Return
  2385. * 0 if the charge was successful
  2386. * < 0 if the cgroup is over its limit
  2387. */
  2388. static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
  2389. gfp_t gfp_mask, enum charge_type ctype)
  2390. {
  2391. struct mem_cgroup *memcg = NULL;
  2392. unsigned int nr_pages = 1;
  2393. bool oom = true;
  2394. int ret;
  2395. if (PageTransHuge(page)) {
  2396. nr_pages <<= compound_order(page);
  2397. VM_BUG_ON(!PageTransHuge(page));
  2398. /*
  2399. * Never OOM-kill a process for a huge page. The
  2400. * fault handler will fall back to regular pages.
  2401. */
  2402. oom = false;
  2403. }
  2404. ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
  2405. if (ret == -ENOMEM)
  2406. return ret;
  2407. __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
  2408. return 0;
  2409. }
  2410. int mem_cgroup_newpage_charge(struct page *page,
  2411. struct mm_struct *mm, gfp_t gfp_mask)
  2412. {
  2413. if (mem_cgroup_disabled())
  2414. return 0;
  2415. VM_BUG_ON(page_mapped(page));
  2416. VM_BUG_ON(page->mapping && !PageAnon(page));
  2417. VM_BUG_ON(!mm);
  2418. return mem_cgroup_charge_common(page, mm, gfp_mask,
  2419. MEM_CGROUP_CHARGE_TYPE_MAPPED);
  2420. }
  2421. static void
  2422. __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
  2423. enum charge_type ctype);
  2424. int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  2425. gfp_t gfp_mask)
  2426. {
  2427. struct mem_cgroup *memcg = NULL;
  2428. enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
  2429. int ret;
  2430. if (mem_cgroup_disabled())
  2431. return 0;
  2432. if (PageCompound(page))
  2433. return 0;
  2434. if (unlikely(!mm))
  2435. mm = &init_mm;
  2436. if (!page_is_file_cache(page))
  2437. type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
  2438. if (!PageSwapCache(page))
  2439. ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
  2440. else { /* page is swapcache/shmem */
  2441. ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
  2442. if (!ret)
  2443. __mem_cgroup_commit_charge_swapin(page, memcg, type);
  2444. }
  2445. return ret;
  2446. }
  2447. /*
  2448. * While swap-in, try_charge -> commit or cancel, the page is locked.
  2449. * And when try_charge() successfully returns, one refcnt to memcg without
  2450. * struct page_cgroup is acquired. This refcnt will be consumed by
  2451. * "commit()" or removed by "cancel()"
  2452. */
  2453. int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  2454. struct page *page,
  2455. gfp_t mask, struct mem_cgroup **memcgp)
  2456. {
  2457. struct mem_cgroup *memcg;
  2458. int ret;
  2459. *memcgp = NULL;
  2460. if (mem_cgroup_disabled())
  2461. return 0;
  2462. if (!do_swap_account)
  2463. goto charge_cur_mm;
  2464. /*
  2465. * A racing thread's fault, or swapoff, may have already updated
  2466. * the pte, and even removed page from swap cache: in those cases
  2467. * do_swap_page()'s pte_same() test will fail; but there's also a
  2468. * KSM case which does need to charge the page.
  2469. */
  2470. if (!PageSwapCache(page))
  2471. goto charge_cur_mm;
  2472. memcg = try_get_mem_cgroup_from_page(page);
  2473. if (!memcg)
  2474. goto charge_cur_mm;
  2475. *memcgp = memcg;
  2476. ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
  2477. css_put(&memcg->css);
  2478. if (ret == -EINTR)
  2479. ret = 0;
  2480. return ret;
  2481. charge_cur_mm:
  2482. if (unlikely(!mm))
  2483. mm = &init_mm;
  2484. ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
  2485. if (ret == -EINTR)
  2486. ret = 0;
  2487. return ret;
  2488. }
  2489. static void
  2490. __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
  2491. enum charge_type ctype)
  2492. {
  2493. if (mem_cgroup_disabled())
  2494. return;
  2495. if (!memcg)
  2496. return;
  2497. cgroup_exclude_rmdir(&memcg->css);
  2498. __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
  2499. /*
  2500. * Now swap is on-memory. This means this page may be
  2501. * counted both as mem and swap....double count.
  2502. * Fix it by uncharging from memsw. Basically, this SwapCache is stable
  2503. * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
  2504. * may call delete_from_swap_cache() before reach here.
  2505. */
  2506. if (do_swap_account && PageSwapCache(page)) {
  2507. swp_entry_t ent = {.val = page_private(page)};
  2508. struct mem_cgroup *swap_memcg;
  2509. unsigned short id;
  2510. id = swap_cgroup_record(ent, 0);
  2511. rcu_read_lock();
  2512. swap_memcg = mem_cgroup_lookup(id);
  2513. if (swap_memcg) {
  2514. /*
  2515. * This recorded memcg can be obsolete one. So, avoid
  2516. * calling css_tryget
  2517. */
  2518. if (!mem_cgroup_is_root(swap_memcg))
  2519. res_counter_uncharge(&swap_memcg->memsw,
  2520. PAGE_SIZE);
  2521. mem_cgroup_swap_statistics(swap_memcg, false);
  2522. mem_cgroup_put(swap_memcg);
  2523. }
  2524. rcu_read_unlock();
  2525. }
  2526. /*
  2527. * At swapin, we may charge account against cgroup which has no tasks.
  2528. * So, rmdir()->pre_destroy() can be called while we do this charge.
  2529. * In that case, we need to call pre_destroy() again. check it here.
  2530. */
  2531. cgroup_release_and_wakeup_rmdir(&memcg->css);
  2532. }
  2533. void mem_cgroup_commit_charge_swapin(struct page *page,
  2534. struct mem_cgroup *memcg)
  2535. {
  2536. __mem_cgroup_commit_charge_swapin(page, memcg,
  2537. MEM_CGROUP_CHARGE_TYPE_MAPPED);
  2538. }
  2539. void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
  2540. {
  2541. if (mem_cgroup_disabled())
  2542. return;
  2543. if (!memcg)
  2544. return;
  2545. __mem_cgroup_cancel_charge(memcg, 1);
  2546. }
  2547. static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
  2548. unsigned int nr_pages,
  2549. const enum charge_type ctype)
  2550. {
  2551. struct memcg_batch_info *batch = NULL;
  2552. bool uncharge_memsw = true;
  2553. /* If swapout, usage of swap doesn't decrease */
  2554. if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
  2555. uncharge_memsw = false;
  2556. batch = &current->memcg_batch;
  2557. /*
  2558. * In usual, we do css_get() when we remember memcg pointer.
  2559. * But in this case, we keep res->usage until end of a series of
  2560. * uncharges. Then, it's ok to ignore memcg's refcnt.
  2561. */
  2562. if (!batch->memcg)
  2563. batch->memcg = memcg;
  2564. /*
  2565. * do_batch > 0 when unmapping pages or inode invalidate/truncate.
  2566. * In those cases, all pages freed continuously can be expected to be in
  2567. * the same cgroup and we have chance to coalesce uncharges.
  2568. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
  2569. * because we want to do uncharge as soon as possible.
  2570. */
  2571. if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
  2572. goto direct_uncharge;
  2573. if (nr_pages > 1)
  2574. goto direct_uncharge;
  2575. /*
  2576. * In typical case, batch->memcg == mem. This means we can
  2577. * merge a series of uncharges to an uncharge of res_counter.
  2578. * If not, we uncharge res_counter ony by one.
  2579. */
  2580. if (batch->memcg != memcg)
  2581. goto direct_uncharge;
  2582. /* remember freed charge and uncharge it later */
  2583. batch->nr_pages++;
  2584. if (uncharge_memsw)
  2585. batch->memsw_nr_pages++;
  2586. return;
  2587. direct_uncharge:
  2588. res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
  2589. if (uncharge_memsw)
  2590. res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
  2591. if (unlikely(batch->memcg != memcg))
  2592. memcg_oom_recover(memcg);
  2593. }
  2594. /*
  2595. * uncharge if !page_mapped(page)
  2596. */
  2597. static struct mem_cgroup *
  2598. __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
  2599. {
  2600. struct mem_cgroup *memcg = NULL;
  2601. unsigned int nr_pages = 1;
  2602. struct page_cgroup *pc;
  2603. bool anon;
  2604. if (mem_cgroup_disabled())
  2605. return NULL;
  2606. if (PageSwapCache(page))
  2607. return NULL;
  2608. if (PageTransHuge(page)) {
  2609. nr_pages <<= compound_order(page);
  2610. VM_BUG_ON(!PageTransHuge(page));
  2611. }
  2612. /*
  2613. * Check if our page_cgroup is valid
  2614. */
  2615. pc = lookup_page_cgroup(page);
  2616. if (unlikely(!PageCgroupUsed(pc)))
  2617. return NULL;
  2618. lock_page_cgroup(pc);
  2619. memcg = pc->mem_cgroup;
  2620. if (!PageCgroupUsed(pc))
  2621. goto unlock_out;
  2622. anon = PageAnon(page);
  2623. switch (ctype) {
  2624. case MEM_CGROUP_CHARGE_TYPE_MAPPED:
  2625. /*
  2626. * Generally PageAnon tells if it's the anon statistics to be
  2627. * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
  2628. * used before page reached the stage of being marked PageAnon.
  2629. */
  2630. anon = true;
  2631. /* fallthrough */
  2632. case MEM_CGROUP_CHARGE_TYPE_DROP:
  2633. /* See mem_cgroup_prepare_migration() */
  2634. if (page_mapped(page) || PageCgroupMigration(pc))
  2635. goto unlock_out;
  2636. break;
  2637. case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
  2638. if (!PageAnon(page)) { /* Shared memory */
  2639. if (page->mapping && !page_is_file_cache(page))
  2640. goto unlock_out;
  2641. } else if (page_mapped(page)) /* Anon */
  2642. goto unlock_out;
  2643. break;
  2644. default:
  2645. break;
  2646. }
  2647. mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
  2648. ClearPageCgroupUsed(pc);
  2649. /*
  2650. * pc->mem_cgroup is not cleared here. It will be accessed when it's
  2651. * freed from LRU. This is safe because uncharged page is expected not
  2652. * to be reused (freed soon). Exception is SwapCache, it's handled by
  2653. * special functions.
  2654. */
  2655. unlock_page_cgroup(pc);
  2656. /*
  2657. * even after unlock, we have memcg->res.usage here and this memcg
  2658. * will never be freed.
  2659. */
  2660. memcg_check_events(memcg, page);
  2661. if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
  2662. mem_cgroup_swap_statistics(memcg, true);
  2663. mem_cgroup_get(memcg);
  2664. }
  2665. if (!mem_cgroup_is_root(memcg))
  2666. mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
  2667. return memcg;
  2668. unlock_out:
  2669. unlock_page_cgroup(pc);
  2670. return NULL;
  2671. }
  2672. void mem_cgroup_uncharge_page(struct page *page)
  2673. {
  2674. /* early check. */
  2675. if (page_mapped(page))
  2676. return;
  2677. VM_BUG_ON(page->mapping && !PageAnon(page));
  2678. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
  2679. }
  2680. void mem_cgroup_uncharge_cache_page(struct page *page)
  2681. {
  2682. VM_BUG_ON(page_mapped(page));
  2683. VM_BUG_ON(page->mapping);
  2684. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
  2685. }
  2686. /*
  2687. * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
  2688. * In that cases, pages are freed continuously and we can expect pages
  2689. * are in the same memcg. All these calls itself limits the number of
  2690. * pages freed at once, then uncharge_start/end() is called properly.
  2691. * This may be called prural(2) times in a context,
  2692. */
  2693. void mem_cgroup_uncharge_start(void)
  2694. {
  2695. current->memcg_batch.do_batch++;
  2696. /* We can do nest. */
  2697. if (current->memcg_batch.do_batch == 1) {
  2698. current->memcg_batch.memcg = NULL;
  2699. current->memcg_batch.nr_pages = 0;
  2700. current->memcg_batch.memsw_nr_pages = 0;
  2701. }
  2702. }
  2703. void mem_cgroup_uncharge_end(void)
  2704. {
  2705. struct memcg_batch_info *batch = &current->memcg_batch;
  2706. if (!batch->do_batch)
  2707. return;
  2708. batch->do_batch--;
  2709. if (batch->do_batch) /* If stacked, do nothing. */
  2710. return;
  2711. if (!batch->memcg)
  2712. return;
  2713. /*
  2714. * This "batch->memcg" is valid without any css_get/put etc...
  2715. * bacause we hide charges behind us.
  2716. */
  2717. if (batch->nr_pages)
  2718. res_counter_uncharge(&batch->memcg->res,
  2719. batch->nr_pages * PAGE_SIZE);
  2720. if (batch->memsw_nr_pages)
  2721. res_counter_uncharge(&batch->memcg->memsw,
  2722. batch->memsw_nr_pages * PAGE_SIZE);
  2723. memcg_oom_recover(batch->memcg);
  2724. /* forget this pointer (for sanity check) */
  2725. batch->memcg = NULL;
  2726. }
  2727. #ifdef CONFIG_SWAP
  2728. /*
  2729. * called after __delete_from_swap_cache() and drop "page" account.
  2730. * memcg information is recorded to swap_cgroup of "ent"
  2731. */
  2732. void
  2733. mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
  2734. {
  2735. struct mem_cgroup *memcg;
  2736. int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
  2737. if (!swapout) /* this was a swap cache but the swap is unused ! */
  2738. ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
  2739. memcg = __mem_cgroup_uncharge_common(page, ctype);
  2740. /*
  2741. * record memcg information, if swapout && memcg != NULL,
  2742. * mem_cgroup_get() was called in uncharge().
  2743. */
  2744. if (do_swap_account && swapout && memcg)
  2745. swap_cgroup_record(ent, css_id(&memcg->css));
  2746. }
  2747. #endif
  2748. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  2749. /*
  2750. * called from swap_entry_free(). remove record in swap_cgroup and
  2751. * uncharge "memsw" account.
  2752. */
  2753. void mem_cgroup_uncharge_swap(swp_entry_t ent)
  2754. {
  2755. struct mem_cgroup *memcg;
  2756. unsigned short id;
  2757. if (!do_swap_account)
  2758. return;
  2759. id = swap_cgroup_record(ent, 0);
  2760. rcu_read_lock();
  2761. memcg = mem_cgroup_lookup(id);
  2762. if (memcg) {
  2763. /*
  2764. * We uncharge this because swap is freed.
  2765. * This memcg can be obsolete one. We avoid calling css_tryget
  2766. */
  2767. if (!mem_cgroup_is_root(memcg))
  2768. res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
  2769. mem_cgroup_swap_statistics(memcg, false);
  2770. mem_cgroup_put(memcg);
  2771. }
  2772. rcu_read_unlock();
  2773. }
  2774. /**
  2775. * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
  2776. * @entry: swap entry to be moved
  2777. * @from: mem_cgroup which the entry is moved from
  2778. * @to: mem_cgroup which the entry is moved to
  2779. * @need_fixup: whether we should fixup res_counters and refcounts.
  2780. *
  2781. * It succeeds only when the swap_cgroup's record for this entry is the same
  2782. * as the mem_cgroup's id of @from.
  2783. *
  2784. * Returns 0 on success, -EINVAL on failure.
  2785. *
  2786. * The caller must have charged to @to, IOW, called res_counter_charge() about
  2787. * both res and memsw, and called css_get().
  2788. */
  2789. static int mem_cgroup_move_swap_account(swp_entry_t entry,
  2790. struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
  2791. {
  2792. unsigned short old_id, new_id;
  2793. old_id = css_id(&from->css);
  2794. new_id = css_id(&to->css);
  2795. if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
  2796. mem_cgroup_swap_statistics(from, false);
  2797. mem_cgroup_swap_statistics(to, true);
  2798. /*
  2799. * This function is only called from task migration context now.
  2800. * It postpones res_counter and refcount handling till the end
  2801. * of task migration(mem_cgroup_clear_mc()) for performance
  2802. * improvement. But we cannot postpone mem_cgroup_get(to)
  2803. * because if the process that has been moved to @to does
  2804. * swap-in, the refcount of @to might be decreased to 0.
  2805. */
  2806. mem_cgroup_get(to);
  2807. if (need_fixup) {
  2808. if (!mem_cgroup_is_root(from))
  2809. res_counter_uncharge(&from->memsw, PAGE_SIZE);
  2810. mem_cgroup_put(from);
  2811. /*
  2812. * we charged both to->res and to->memsw, so we should
  2813. * uncharge to->res.
  2814. */
  2815. if (!mem_cgroup_is_root(to))
  2816. res_counter_uncharge(&to->res, PAGE_SIZE);
  2817. }
  2818. return 0;
  2819. }
  2820. return -EINVAL;
  2821. }
  2822. #else
  2823. static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
  2824. struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
  2825. {
  2826. return -EINVAL;
  2827. }
  2828. #endif
  2829. /*
  2830. * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
  2831. * page belongs to.
  2832. */
  2833. int mem_cgroup_prepare_migration(struct page *page,
  2834. struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
  2835. {
  2836. struct mem_cgroup *memcg = NULL;
  2837. struct page_cgroup *pc;
  2838. enum charge_type ctype;
  2839. int ret = 0;
  2840. *memcgp = NULL;
  2841. VM_BUG_ON(PageTransHuge(page));
  2842. if (mem_cgroup_disabled())
  2843. return 0;
  2844. pc = lookup_page_cgroup(page);
  2845. lock_page_cgroup(pc);
  2846. if (PageCgroupUsed(pc)) {
  2847. memcg = pc->mem_cgroup;
  2848. css_get(&memcg->css);
  2849. /*
  2850. * At migrating an anonymous page, its mapcount goes down
  2851. * to 0 and uncharge() will be called. But, even if it's fully
  2852. * unmapped, migration may fail and this page has to be
  2853. * charged again. We set MIGRATION flag here and delay uncharge
  2854. * until end_migration() is called
  2855. *
  2856. * Corner Case Thinking
  2857. * A)
  2858. * When the old page was mapped as Anon and it's unmap-and-freed
  2859. * while migration was ongoing.
  2860. * If unmap finds the old page, uncharge() of it will be delayed
  2861. * until end_migration(). If unmap finds a new page, it's
  2862. * uncharged when it make mapcount to be 1->0. If unmap code
  2863. * finds swap_migration_entry, the new page will not be mapped
  2864. * and end_migration() will find it(mapcount==0).
  2865. *
  2866. * B)
  2867. * When the old page was mapped but migraion fails, the kernel
  2868. * remaps it. A charge for it is kept by MIGRATION flag even
  2869. * if mapcount goes down to 0. We can do remap successfully
  2870. * without charging it again.
  2871. *
  2872. * C)
  2873. * The "old" page is under lock_page() until the end of
  2874. * migration, so, the old page itself will not be swapped-out.
  2875. * If the new page is swapped out before end_migraton, our
  2876. * hook to usual swap-out path will catch the event.
  2877. */
  2878. if (PageAnon(page))
  2879. SetPageCgroupMigration(pc);
  2880. }
  2881. unlock_page_cgroup(pc);
  2882. /*
  2883. * If the page is not charged at this point,
  2884. * we return here.
  2885. */
  2886. if (!memcg)
  2887. return 0;
  2888. *memcgp = memcg;
  2889. ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
  2890. css_put(&memcg->css);/* drop extra refcnt */
  2891. if (ret) {
  2892. if (PageAnon(page)) {
  2893. lock_page_cgroup(pc);
  2894. ClearPageCgroupMigration(pc);
  2895. unlock_page_cgroup(pc);
  2896. /*
  2897. * The old page may be fully unmapped while we kept it.
  2898. */
  2899. mem_cgroup_uncharge_page(page);
  2900. }
  2901. /* we'll need to revisit this error code (we have -EINTR) */
  2902. return -ENOMEM;
  2903. }
  2904. /*
  2905. * We charge new page before it's used/mapped. So, even if unlock_page()
  2906. * is called before end_migration, we can catch all events on this new
  2907. * page. In the case new page is migrated but not remapped, new page's
  2908. * mapcount will be finally 0 and we call uncharge in end_migration().
  2909. */
  2910. if (PageAnon(page))
  2911. ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
  2912. else if (page_is_file_cache(page))
  2913. ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  2914. else
  2915. ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
  2916. __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
  2917. return ret;
  2918. }
  2919. /* remove redundant charge if migration failed*/
  2920. void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  2921. struct page *oldpage, struct page *newpage, bool migration_ok)
  2922. {
  2923. struct page *used, *unused;
  2924. struct page_cgroup *pc;
  2925. bool anon;
  2926. if (!memcg)
  2927. return;
  2928. /* blocks rmdir() */
  2929. cgroup_exclude_rmdir(&memcg->css);
  2930. if (!migration_ok) {
  2931. used = oldpage;
  2932. unused = newpage;
  2933. } else {
  2934. used = newpage;
  2935. unused = oldpage;
  2936. }
  2937. /*
  2938. * We disallowed uncharge of pages under migration because mapcount
  2939. * of the page goes down to zero, temporarly.
  2940. * Clear the flag and check the page should be charged.
  2941. */
  2942. pc = lookup_page_cgroup(oldpage);
  2943. lock_page_cgroup(pc);
  2944. ClearPageCgroupMigration(pc);
  2945. unlock_page_cgroup(pc);
  2946. anon = PageAnon(used);
  2947. __mem_cgroup_uncharge_common(unused,
  2948. anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
  2949. : MEM_CGROUP_CHARGE_TYPE_CACHE);
  2950. /*
  2951. * If a page is a file cache, radix-tree replacement is very atomic
  2952. * and we can skip this check. When it was an Anon page, its mapcount
  2953. * goes down to 0. But because we added MIGRATION flage, it's not
  2954. * uncharged yet. There are several case but page->mapcount check
  2955. * and USED bit check in mem_cgroup_uncharge_page() will do enough
  2956. * check. (see prepare_charge() also)
  2957. */
  2958. if (anon)
  2959. mem_cgroup_uncharge_page(used);
  2960. /*
  2961. * At migration, we may charge account against cgroup which has no
  2962. * tasks.
  2963. * So, rmdir()->pre_destroy() can be called while we do this charge.
  2964. * In that case, we need to call pre_destroy() again. check it here.
  2965. */
  2966. cgroup_release_and_wakeup_rmdir(&memcg->css);
  2967. }
  2968. /*
  2969. * At replace page cache, newpage is not under any memcg but it's on
  2970. * LRU. So, this function doesn't touch res_counter but handles LRU
  2971. * in correct way. Both pages are locked so we cannot race with uncharge.
  2972. */
  2973. void mem_cgroup_replace_page_cache(struct page *oldpage,
  2974. struct page *newpage)
  2975. {
  2976. struct mem_cgroup *memcg;
  2977. struct page_cgroup *pc;
  2978. enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
  2979. if (mem_cgroup_disabled())
  2980. return;
  2981. pc = lookup_page_cgroup(oldpage);
  2982. /* fix accounting on old pages */
  2983. lock_page_cgroup(pc);
  2984. memcg = pc->mem_cgroup;
  2985. mem_cgroup_charge_statistics(memcg, false, -1);
  2986. ClearPageCgroupUsed(pc);
  2987. unlock_page_cgroup(pc);
  2988. if (PageSwapBacked(oldpage))
  2989. type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
  2990. /*
  2991. * Even if newpage->mapping was NULL before starting replacement,
  2992. * the newpage may be on LRU(or pagevec for LRU) already. We lock
  2993. * LRU while we overwrite pc->mem_cgroup.
  2994. */
  2995. __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
  2996. }
  2997. #ifdef CONFIG_DEBUG_VM
  2998. static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
  2999. {
  3000. struct page_cgroup *pc;
  3001. pc = lookup_page_cgroup(page);
  3002. /*
  3003. * Can be NULL while feeding pages into the page allocator for
  3004. * the first time, i.e. during boot or memory hotplug;
  3005. * or when mem_cgroup_disabled().
  3006. */
  3007. if (likely(pc) && PageCgroupUsed(pc))
  3008. return pc;
  3009. return NULL;
  3010. }
  3011. bool mem_cgroup_bad_page_check(struct page *page)
  3012. {
  3013. if (mem_cgroup_disabled())
  3014. return false;
  3015. return lookup_page_cgroup_used(page) != NULL;
  3016. }
  3017. void mem_cgroup_print_bad_page(struct page *page)
  3018. {
  3019. struct page_cgroup *pc;
  3020. pc = lookup_page_cgroup_used(page);
  3021. if (pc) {
  3022. printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
  3023. pc, pc->flags, pc->mem_cgroup);
  3024. }
  3025. }
  3026. #endif
  3027. static DEFINE_MUTEX(set_limit_mutex);
  3028. static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
  3029. unsigned long long val)
  3030. {
  3031. int retry_count;
  3032. u64 memswlimit, memlimit;
  3033. int ret = 0;
  3034. int children = mem_cgroup_count_children(memcg);
  3035. u64 curusage, oldusage;
  3036. int enlarge;
  3037. /*
  3038. * For keeping hierarchical_reclaim simple, how long we should retry
  3039. * is depends on callers. We set our retry-count to be function
  3040. * of # of children which we should visit in this loop.
  3041. */
  3042. retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
  3043. oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  3044. enlarge = 0;
  3045. while (retry_count) {
  3046. if (signal_pending(current)) {
  3047. ret = -EINTR;
  3048. break;
  3049. }
  3050. /*
  3051. * Rather than hide all in some function, I do this in
  3052. * open coded manner. You see what this really does.
  3053. * We have to guarantee memcg->res.limit < memcg->memsw.limit.
  3054. */
  3055. mutex_lock(&set_limit_mutex);
  3056. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3057. if (memswlimit < val) {
  3058. ret = -EINVAL;
  3059. mutex_unlock(&set_limit_mutex);
  3060. break;
  3061. }
  3062. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3063. if (memlimit < val)
  3064. enlarge = 1;
  3065. ret = res_counter_set_limit(&memcg->res, val);
  3066. if (!ret) {
  3067. if (memswlimit == val)
  3068. memcg->memsw_is_minimum = true;
  3069. else
  3070. memcg->memsw_is_minimum = false;
  3071. }
  3072. mutex_unlock(&set_limit_mutex);
  3073. if (!ret)
  3074. break;
  3075. mem_cgroup_reclaim(memcg, GFP_KERNEL,
  3076. MEM_CGROUP_RECLAIM_SHRINK);
  3077. curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  3078. /* Usage is reduced ? */
  3079. if (curusage >= oldusage)
  3080. retry_count--;
  3081. else
  3082. oldusage = curusage;
  3083. }
  3084. if (!ret && enlarge)
  3085. memcg_oom_recover(memcg);
  3086. return ret;
  3087. }
  3088. static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
  3089. unsigned long long val)
  3090. {
  3091. int retry_count;
  3092. u64 memlimit, memswlimit, oldusage, curusage;
  3093. int children = mem_cgroup_count_children(memcg);
  3094. int ret = -EBUSY;
  3095. int enlarge = 0;
  3096. /* see mem_cgroup_resize_res_limit */
  3097. retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
  3098. oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  3099. while (retry_count) {
  3100. if (signal_pending(current)) {
  3101. ret = -EINTR;
  3102. break;
  3103. }
  3104. /*
  3105. * Rather than hide all in some function, I do this in
  3106. * open coded manner. You see what this really does.
  3107. * We have to guarantee memcg->res.limit < memcg->memsw.limit.
  3108. */
  3109. mutex_lock(&set_limit_mutex);
  3110. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3111. if (memlimit > val) {
  3112. ret = -EINVAL;
  3113. mutex_unlock(&set_limit_mutex);
  3114. break;
  3115. }
  3116. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3117. if (memswlimit < val)
  3118. enlarge = 1;
  3119. ret = res_counter_set_limit(&memcg->memsw, val);
  3120. if (!ret) {
  3121. if (memlimit == val)
  3122. memcg->memsw_is_minimum = true;
  3123. else
  3124. memcg->memsw_is_minimum = false;
  3125. }
  3126. mutex_unlock(&set_limit_mutex);
  3127. if (!ret)
  3128. break;
  3129. mem_cgroup_reclaim(memcg, GFP_KERNEL,
  3130. MEM_CGROUP_RECLAIM_NOSWAP |
  3131. MEM_CGROUP_RECLAIM_SHRINK);
  3132. curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  3133. /* Usage is reduced ? */
  3134. if (curusage >= oldusage)
  3135. retry_count--;
  3136. else
  3137. oldusage = curusage;
  3138. }
  3139. if (!ret && enlarge)
  3140. memcg_oom_recover(memcg);
  3141. return ret;
  3142. }
  3143. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  3144. gfp_t gfp_mask,
  3145. unsigned long *total_scanned)
  3146. {
  3147. unsigned long nr_reclaimed = 0;
  3148. struct mem_cgroup_per_zone *mz, *next_mz = NULL;
  3149. unsigned long reclaimed;
  3150. int loop = 0;
  3151. struct mem_cgroup_tree_per_zone *mctz;
  3152. unsigned long long excess;
  3153. unsigned long nr_scanned;
  3154. if (order > 0)
  3155. return 0;
  3156. mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
  3157. /*
  3158. * This loop can run a while, specially if mem_cgroup's continuously
  3159. * keep exceeding their soft limit and putting the system under
  3160. * pressure
  3161. */
  3162. do {
  3163. if (next_mz)
  3164. mz = next_mz;
  3165. else
  3166. mz = mem_cgroup_largest_soft_limit_node(mctz);
  3167. if (!mz)
  3168. break;
  3169. nr_scanned = 0;
  3170. reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
  3171. gfp_mask, &nr_scanned);
  3172. nr_reclaimed += reclaimed;
  3173. *total_scanned += nr_scanned;
  3174. spin_lock(&mctz->lock);
  3175. /*
  3176. * If we failed to reclaim anything from this memory cgroup
  3177. * it is time to move on to the next cgroup
  3178. */
  3179. next_mz = NULL;
  3180. if (!reclaimed) {
  3181. do {
  3182. /*
  3183. * Loop until we find yet another one.
  3184. *
  3185. * By the time we get the soft_limit lock
  3186. * again, someone might have aded the
  3187. * group back on the RB tree. Iterate to
  3188. * make sure we get a different mem.
  3189. * mem_cgroup_largest_soft_limit_node returns
  3190. * NULL if no other cgroup is present on
  3191. * the tree
  3192. */
  3193. next_mz =
  3194. __mem_cgroup_largest_soft_limit_node(mctz);
  3195. if (next_mz == mz)
  3196. css_put(&next_mz->memcg->css);
  3197. else /* next_mz == NULL or other memcg */
  3198. break;
  3199. } while (1);
  3200. }
  3201. __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
  3202. excess = res_counter_soft_limit_excess(&mz->memcg->res);
  3203. /*
  3204. * One school of thought says that we should not add
  3205. * back the node to the tree if reclaim returns 0.
  3206. * But our reclaim could return 0, simply because due
  3207. * to priority we are exposing a smaller subset of
  3208. * memory to reclaim from. Consider this as a longer
  3209. * term TODO.
  3210. */
  3211. /* If excess == 0, no tree ops */
  3212. __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
  3213. spin_unlock(&mctz->lock);
  3214. css_put(&mz->memcg->css);
  3215. loop++;
  3216. /*
  3217. * Could not reclaim anything and there are no more
  3218. * mem cgroups to try or we seem to be looping without
  3219. * reclaiming anything.
  3220. */
  3221. if (!nr_reclaimed &&
  3222. (next_mz == NULL ||
  3223. loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
  3224. break;
  3225. } while (!nr_reclaimed);
  3226. if (next_mz)
  3227. css_put(&next_mz->memcg->css);
  3228. return nr_reclaimed;
  3229. }
  3230. /*
  3231. * This routine traverse page_cgroup in given list and drop them all.
  3232. * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  3233. */
  3234. static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
  3235. int node, int zid, enum lru_list lru)
  3236. {
  3237. struct mem_cgroup_per_zone *mz;
  3238. unsigned long flags, loop;
  3239. struct list_head *list;
  3240. struct page *busy;
  3241. struct zone *zone;
  3242. int ret = 0;
  3243. zone = &NODE_DATA(node)->node_zones[zid];
  3244. mz = mem_cgroup_zoneinfo(memcg, node, zid);
  3245. list = &mz->lruvec.lists[lru];
  3246. loop = mz->lru_size[lru];
  3247. /* give some margin against EBUSY etc...*/
  3248. loop += 256;
  3249. busy = NULL;
  3250. while (loop--) {
  3251. struct page_cgroup *pc;
  3252. struct page *page;
  3253. ret = 0;
  3254. spin_lock_irqsave(&zone->lru_lock, flags);
  3255. if (list_empty(list)) {
  3256. spin_unlock_irqrestore(&zone->lru_lock, flags);
  3257. break;
  3258. }
  3259. page = list_entry(list->prev, struct page, lru);
  3260. if (busy == page) {
  3261. list_move(&page->lru, list);
  3262. busy = NULL;
  3263. spin_unlock_irqrestore(&zone->lru_lock, flags);
  3264. continue;
  3265. }
  3266. spin_unlock_irqrestore(&zone->lru_lock, flags);
  3267. pc = lookup_page_cgroup(page);
  3268. ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
  3269. if (ret == -ENOMEM || ret == -EINTR)
  3270. break;
  3271. if (ret == -EBUSY || ret == -EINVAL) {
  3272. /* found lock contention or "pc" is obsolete. */
  3273. busy = page;
  3274. cond_resched();
  3275. } else
  3276. busy = NULL;
  3277. }
  3278. if (!ret && !list_empty(list))
  3279. return -EBUSY;
  3280. return ret;
  3281. }
  3282. /*
  3283. * make mem_cgroup's charge to be 0 if there is no task.
  3284. * This enables deleting this mem_cgroup.
  3285. */
  3286. static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
  3287. {
  3288. int ret;
  3289. int node, zid, shrink;
  3290. int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
  3291. struct cgroup *cgrp = memcg->css.cgroup;
  3292. css_get(&memcg->css);
  3293. shrink = 0;
  3294. /* should free all ? */
  3295. if (free_all)
  3296. goto try_to_free;
  3297. move_account:
  3298. do {
  3299. ret = -EBUSY;
  3300. if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
  3301. goto out;
  3302. ret = -EINTR;
  3303. if (signal_pending(current))
  3304. goto out;
  3305. /* This is for making all *used* pages to be on LRU. */
  3306. lru_add_drain_all();
  3307. drain_all_stock_sync(memcg);
  3308. ret = 0;
  3309. mem_cgroup_start_move(memcg);
  3310. for_each_node_state(node, N_HIGH_MEMORY) {
  3311. for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
  3312. enum lru_list lru;
  3313. for_each_lru(lru) {
  3314. ret = mem_cgroup_force_empty_list(memcg,
  3315. node, zid, lru);
  3316. if (ret)
  3317. break;
  3318. }
  3319. }
  3320. if (ret)
  3321. break;
  3322. }
  3323. mem_cgroup_end_move(memcg);
  3324. memcg_oom_recover(memcg);
  3325. /* it seems parent cgroup doesn't have enough mem */
  3326. if (ret == -ENOMEM)
  3327. goto try_to_free;
  3328. cond_resched();
  3329. /* "ret" should also be checked to ensure all lists are empty. */
  3330. } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
  3331. out:
  3332. css_put(&memcg->css);
  3333. return ret;
  3334. try_to_free:
  3335. /* returns EBUSY if there is a task or if we come here twice. */
  3336. if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
  3337. ret = -EBUSY;
  3338. goto out;
  3339. }
  3340. /* we call try-to-free pages for make this cgroup empty */
  3341. lru_add_drain_all();
  3342. /* try to free all pages in this cgroup */
  3343. shrink = 1;
  3344. while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
  3345. int progress;
  3346. if (signal_pending(current)) {
  3347. ret = -EINTR;
  3348. goto out;
  3349. }
  3350. progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
  3351. false);
  3352. if (!progress) {
  3353. nr_retries--;
  3354. /* maybe some writeback is necessary */
  3355. congestion_wait(BLK_RW_ASYNC, HZ/10);
  3356. }
  3357. }
  3358. lru_add_drain();
  3359. /* try move_account...there may be some *locked* pages. */
  3360. goto move_account;
  3361. }
  3362. int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
  3363. {
  3364. return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
  3365. }
  3366. static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
  3367. {
  3368. return mem_cgroup_from_cont(cont)->use_hierarchy;
  3369. }
  3370. static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
  3371. u64 val)
  3372. {
  3373. int retval = 0;
  3374. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  3375. struct cgroup *parent = cont->parent;
  3376. struct mem_cgroup *parent_memcg = NULL;
  3377. if (parent)
  3378. parent_memcg = mem_cgroup_from_cont(parent);
  3379. cgroup_lock();
  3380. /*
  3381. * If parent's use_hierarchy is set, we can't make any modifications
  3382. * in the child subtrees. If it is unset, then the change can
  3383. * occur, provided the current cgroup has no children.
  3384. *
  3385. * For the root cgroup, parent_mem is NULL, we allow value to be
  3386. * set if there are no children.
  3387. */
  3388. if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
  3389. (val == 1 || val == 0)) {
  3390. if (list_empty(&cont->children))
  3391. memcg->use_hierarchy = val;
  3392. else
  3393. retval = -EBUSY;
  3394. } else
  3395. retval = -EINVAL;
  3396. cgroup_unlock();
  3397. return retval;
  3398. }
  3399. static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
  3400. enum mem_cgroup_stat_index idx)
  3401. {
  3402. struct mem_cgroup *iter;
  3403. long val = 0;
  3404. /* Per-cpu values can be negative, use a signed accumulator */
  3405. for_each_mem_cgroup_tree(iter, memcg)
  3406. val += mem_cgroup_read_stat(iter, idx);
  3407. if (val < 0) /* race ? */
  3408. val = 0;
  3409. return val;
  3410. }
  3411. static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
  3412. {
  3413. u64 val;
  3414. if (!mem_cgroup_is_root(memcg)) {
  3415. if (!swap)
  3416. return res_counter_read_u64(&memcg->res, RES_USAGE);
  3417. else
  3418. return res_counter_read_u64(&memcg->memsw, RES_USAGE);
  3419. }
  3420. val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
  3421. val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
  3422. if (swap)
  3423. val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
  3424. return val << PAGE_SHIFT;
  3425. }
  3426. static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
  3427. {
  3428. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  3429. u64 val;
  3430. int type, name;
  3431. type = MEMFILE_TYPE(cft->private);
  3432. name = MEMFILE_ATTR(cft->private);
  3433. switch (type) {
  3434. case _MEM:
  3435. if (name == RES_USAGE)
  3436. val = mem_cgroup_usage(memcg, false);
  3437. else
  3438. val = res_counter_read_u64(&memcg->res, name);
  3439. break;
  3440. case _MEMSWAP:
  3441. if (name == RES_USAGE)
  3442. val = mem_cgroup_usage(memcg, true);
  3443. else
  3444. val = res_counter_read_u64(&memcg->memsw, name);
  3445. break;
  3446. default:
  3447. BUG();
  3448. }
  3449. return val;
  3450. }
  3451. /*
  3452. * The user of this function is...
  3453. * RES_LIMIT.
  3454. */
  3455. static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  3456. const char *buffer)
  3457. {
  3458. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  3459. int type, name;
  3460. unsigned long long val;
  3461. int ret;
  3462. type = MEMFILE_TYPE(cft->private);
  3463. name = MEMFILE_ATTR(cft->private);
  3464. switch (name) {
  3465. case RES_LIMIT:
  3466. if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
  3467. ret = -EINVAL;
  3468. break;
  3469. }
  3470. /* This function does all necessary parse...reuse it */
  3471. ret = res_counter_memparse_write_strategy(buffer, &val);
  3472. if (ret)
  3473. break;
  3474. if (type == _MEM)
  3475. ret = mem_cgroup_resize_limit(memcg, val);
  3476. else
  3477. ret = mem_cgroup_resize_memsw_limit(memcg, val);
  3478. break;
  3479. case RES_SOFT_LIMIT:
  3480. ret = res_counter_memparse_write_strategy(buffer, &val);
  3481. if (ret)
  3482. break;
  3483. /*
  3484. * For memsw, soft limits are hard to implement in terms
  3485. * of semantics, for now, we support soft limits for
  3486. * control without swap
  3487. */
  3488. if (type == _MEM)
  3489. ret = res_counter_set_soft_limit(&memcg->res, val);
  3490. else
  3491. ret = -EINVAL;
  3492. break;
  3493. default:
  3494. ret = -EINVAL; /* should be BUG() ? */
  3495. break;
  3496. }
  3497. return ret;
  3498. }
  3499. static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
  3500. unsigned long long *mem_limit, unsigned long long *memsw_limit)
  3501. {
  3502. struct cgroup *cgroup;
  3503. unsigned long long min_limit, min_memsw_limit, tmp;
  3504. min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3505. min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3506. cgroup = memcg->css.cgroup;
  3507. if (!memcg->use_hierarchy)
  3508. goto out;
  3509. while (cgroup->parent) {
  3510. cgroup = cgroup->parent;
  3511. memcg = mem_cgroup_from_cont(cgroup);
  3512. if (!memcg->use_hierarchy)
  3513. break;
  3514. tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3515. min_limit = min(min_limit, tmp);
  3516. tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3517. min_memsw_limit = min(min_memsw_limit, tmp);
  3518. }
  3519. out:
  3520. *mem_limit = min_limit;
  3521. *memsw_limit = min_memsw_limit;
  3522. }
  3523. static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  3524. {
  3525. struct mem_cgroup *memcg;
  3526. int type, name;
  3527. memcg = mem_cgroup_from_cont(cont);
  3528. type = MEMFILE_TYPE(event);
  3529. name = MEMFILE_ATTR(event);
  3530. switch (name) {
  3531. case RES_MAX_USAGE:
  3532. if (type == _MEM)
  3533. res_counter_reset_max(&memcg->res);
  3534. else
  3535. res_counter_reset_max(&memcg->memsw);
  3536. break;
  3537. case RES_FAILCNT:
  3538. if (type == _MEM)
  3539. res_counter_reset_failcnt(&memcg->res);
  3540. else
  3541. res_counter_reset_failcnt(&memcg->memsw);
  3542. break;
  3543. }
  3544. return 0;
  3545. }
  3546. static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
  3547. struct cftype *cft)
  3548. {
  3549. return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
  3550. }
  3551. #ifdef CONFIG_MMU
  3552. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  3553. struct cftype *cft, u64 val)
  3554. {
  3555. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3556. if (val >= (1 << NR_MOVE_TYPE))
  3557. return -EINVAL;
  3558. /*
  3559. * We check this value several times in both in can_attach() and
  3560. * attach(), so we need cgroup lock to prevent this value from being
  3561. * inconsistent.
  3562. */
  3563. cgroup_lock();
  3564. memcg->move_charge_at_immigrate = val;
  3565. cgroup_unlock();
  3566. return 0;
  3567. }
  3568. #else
  3569. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  3570. struct cftype *cft, u64 val)
  3571. {
  3572. return -ENOSYS;
  3573. }
  3574. #endif
  3575. /* For read statistics */
  3576. enum {
  3577. MCS_CACHE,
  3578. MCS_RSS,
  3579. MCS_FILE_MAPPED,
  3580. MCS_PGPGIN,
  3581. MCS_PGPGOUT,
  3582. MCS_SWAP,
  3583. MCS_PGFAULT,
  3584. MCS_PGMAJFAULT,
  3585. MCS_INACTIVE_ANON,
  3586. MCS_ACTIVE_ANON,
  3587. MCS_INACTIVE_FILE,
  3588. MCS_ACTIVE_FILE,
  3589. MCS_UNEVICTABLE,
  3590. NR_MCS_STAT,
  3591. };
  3592. struct mcs_total_stat {
  3593. s64 stat[NR_MCS_STAT];
  3594. };
  3595. struct {
  3596. char *local_name;
  3597. char *total_name;
  3598. } memcg_stat_strings[NR_MCS_STAT] = {
  3599. {"cache", "total_cache"},
  3600. {"rss", "total_rss"},
  3601. {"mapped_file", "total_mapped_file"},
  3602. {"pgpgin", "total_pgpgin"},
  3603. {"pgpgout", "total_pgpgout"},
  3604. {"swap", "total_swap"},
  3605. {"pgfault", "total_pgfault"},
  3606. {"pgmajfault", "total_pgmajfault"},
  3607. {"inactive_anon", "total_inactive_anon"},
  3608. {"active_anon", "total_active_anon"},
  3609. {"inactive_file", "total_inactive_file"},
  3610. {"active_file", "total_active_file"},
  3611. {"unevictable", "total_unevictable"}
  3612. };
  3613. static void
  3614. mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
  3615. {
  3616. s64 val;
  3617. /* per cpu stat */
  3618. val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
  3619. s->stat[MCS_CACHE] += val * PAGE_SIZE;
  3620. val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
  3621. s->stat[MCS_RSS] += val * PAGE_SIZE;
  3622. val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
  3623. s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
  3624. val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
  3625. s->stat[MCS_PGPGIN] += val;
  3626. val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
  3627. s->stat[MCS_PGPGOUT] += val;
  3628. if (do_swap_account) {
  3629. val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
  3630. s->stat[MCS_SWAP] += val * PAGE_SIZE;
  3631. }
  3632. val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
  3633. s->stat[MCS_PGFAULT] += val;
  3634. val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
  3635. s->stat[MCS_PGMAJFAULT] += val;
  3636. /* per zone stat */
  3637. val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
  3638. s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
  3639. val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
  3640. s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
  3641. val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
  3642. s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
  3643. val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
  3644. s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
  3645. val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
  3646. s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
  3647. }
  3648. static void
  3649. mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
  3650. {
  3651. struct mem_cgroup *iter;
  3652. for_each_mem_cgroup_tree(iter, memcg)
  3653. mem_cgroup_get_local_stat(iter, s);
  3654. }
  3655. #ifdef CONFIG_NUMA
  3656. static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
  3657. {
  3658. int nid;
  3659. unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
  3660. unsigned long node_nr;
  3661. struct cgroup *cont = m->private;
  3662. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  3663. total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
  3664. seq_printf(m, "total=%lu", total_nr);
  3665. for_each_node_state(nid, N_HIGH_MEMORY) {
  3666. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
  3667. seq_printf(m, " N%d=%lu", nid, node_nr);
  3668. }
  3669. seq_putc(m, '\n');
  3670. file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
  3671. seq_printf(m, "file=%lu", file_nr);
  3672. for_each_node_state(nid, N_HIGH_MEMORY) {
  3673. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  3674. LRU_ALL_FILE);
  3675. seq_printf(m, " N%d=%lu", nid, node_nr);
  3676. }
  3677. seq_putc(m, '\n');
  3678. anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
  3679. seq_printf(m, "anon=%lu", anon_nr);
  3680. for_each_node_state(nid, N_HIGH_MEMORY) {
  3681. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  3682. LRU_ALL_ANON);
  3683. seq_printf(m, " N%d=%lu", nid, node_nr);
  3684. }
  3685. seq_putc(m, '\n');
  3686. unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
  3687. seq_printf(m, "unevictable=%lu", unevictable_nr);
  3688. for_each_node_state(nid, N_HIGH_MEMORY) {
  3689. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  3690. BIT(LRU_UNEVICTABLE));
  3691. seq_printf(m, " N%d=%lu", nid, node_nr);
  3692. }
  3693. seq_putc(m, '\n');
  3694. return 0;
  3695. }
  3696. #endif /* CONFIG_NUMA */
  3697. static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
  3698. struct cgroup_map_cb *cb)
  3699. {
  3700. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  3701. struct mcs_total_stat mystat;
  3702. int i;
  3703. memset(&mystat, 0, sizeof(mystat));
  3704. mem_cgroup_get_local_stat(memcg, &mystat);
  3705. for (i = 0; i < NR_MCS_STAT; i++) {
  3706. if (i == MCS_SWAP && !do_swap_account)
  3707. continue;
  3708. cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
  3709. }
  3710. /* Hierarchical information */
  3711. {
  3712. unsigned long long limit, memsw_limit;
  3713. memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
  3714. cb->fill(cb, "hierarchical_memory_limit", limit);
  3715. if (do_swap_account)
  3716. cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
  3717. }
  3718. memset(&mystat, 0, sizeof(mystat));
  3719. mem_cgroup_get_total_stat(memcg, &mystat);
  3720. for (i = 0; i < NR_MCS_STAT; i++) {
  3721. if (i == MCS_SWAP && !do_swap_account)
  3722. continue;
  3723. cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
  3724. }
  3725. #ifdef CONFIG_DEBUG_VM
  3726. {
  3727. int nid, zid;
  3728. struct mem_cgroup_per_zone *mz;
  3729. unsigned long recent_rotated[2] = {0, 0};
  3730. unsigned long recent_scanned[2] = {0, 0};
  3731. for_each_online_node(nid)
  3732. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  3733. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  3734. recent_rotated[0] +=
  3735. mz->reclaim_stat.recent_rotated[0];
  3736. recent_rotated[1] +=
  3737. mz->reclaim_stat.recent_rotated[1];
  3738. recent_scanned[0] +=
  3739. mz->reclaim_stat.recent_scanned[0];
  3740. recent_scanned[1] +=
  3741. mz->reclaim_stat.recent_scanned[1];
  3742. }
  3743. cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
  3744. cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
  3745. cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
  3746. cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
  3747. }
  3748. #endif
  3749. return 0;
  3750. }
  3751. static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
  3752. {
  3753. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3754. return mem_cgroup_swappiness(memcg);
  3755. }
  3756. static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
  3757. u64 val)
  3758. {
  3759. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3760. struct mem_cgroup *parent;
  3761. if (val > 100)
  3762. return -EINVAL;
  3763. if (cgrp->parent == NULL)
  3764. return -EINVAL;
  3765. parent = mem_cgroup_from_cont(cgrp->parent);
  3766. cgroup_lock();
  3767. /* If under hierarchy, only empty-root can set this value */
  3768. if ((parent->use_hierarchy) ||
  3769. (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
  3770. cgroup_unlock();
  3771. return -EINVAL;
  3772. }
  3773. memcg->swappiness = val;
  3774. cgroup_unlock();
  3775. return 0;
  3776. }
  3777. static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
  3778. {
  3779. struct mem_cgroup_threshold_ary *t;
  3780. u64 usage;
  3781. int i;
  3782. rcu_read_lock();
  3783. if (!swap)
  3784. t = rcu_dereference(memcg->thresholds.primary);
  3785. else
  3786. t = rcu_dereference(memcg->memsw_thresholds.primary);
  3787. if (!t)
  3788. goto unlock;
  3789. usage = mem_cgroup_usage(memcg, swap);
  3790. /*
  3791. * current_threshold points to threshold just below usage.
  3792. * If it's not true, a threshold was crossed after last
  3793. * call of __mem_cgroup_threshold().
  3794. */
  3795. i = t->current_threshold;
  3796. /*
  3797. * Iterate backward over array of thresholds starting from
  3798. * current_threshold and check if a threshold is crossed.
  3799. * If none of thresholds below usage is crossed, we read
  3800. * only one element of the array here.
  3801. */
  3802. for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
  3803. eventfd_signal(t->entries[i].eventfd, 1);
  3804. /* i = current_threshold + 1 */
  3805. i++;
  3806. /*
  3807. * Iterate forward over array of thresholds starting from
  3808. * current_threshold+1 and check if a threshold is crossed.
  3809. * If none of thresholds above usage is crossed, we read
  3810. * only one element of the array here.
  3811. */
  3812. for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
  3813. eventfd_signal(t->entries[i].eventfd, 1);
  3814. /* Update current_threshold */
  3815. t->current_threshold = i - 1;
  3816. unlock:
  3817. rcu_read_unlock();
  3818. }
  3819. static void mem_cgroup_threshold(struct mem_cgroup *memcg)
  3820. {
  3821. while (memcg) {
  3822. __mem_cgroup_threshold(memcg, false);
  3823. if (do_swap_account)
  3824. __mem_cgroup_threshold(memcg, true);
  3825. memcg = parent_mem_cgroup(memcg);
  3826. }
  3827. }
  3828. static int compare_thresholds(const void *a, const void *b)
  3829. {
  3830. const struct mem_cgroup_threshold *_a = a;
  3831. const struct mem_cgroup_threshold *_b = b;
  3832. return _a->threshold - _b->threshold;
  3833. }
  3834. static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
  3835. {
  3836. struct mem_cgroup_eventfd_list *ev;
  3837. list_for_each_entry(ev, &memcg->oom_notify, list)
  3838. eventfd_signal(ev->eventfd, 1);
  3839. return 0;
  3840. }
  3841. static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
  3842. {
  3843. struct mem_cgroup *iter;
  3844. for_each_mem_cgroup_tree(iter, memcg)
  3845. mem_cgroup_oom_notify_cb(iter);
  3846. }
  3847. static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
  3848. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  3849. {
  3850. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3851. struct mem_cgroup_thresholds *thresholds;
  3852. struct mem_cgroup_threshold_ary *new;
  3853. int type = MEMFILE_TYPE(cft->private);
  3854. u64 threshold, usage;
  3855. int i, size, ret;
  3856. ret = res_counter_memparse_write_strategy(args, &threshold);
  3857. if (ret)
  3858. return ret;
  3859. mutex_lock(&memcg->thresholds_lock);
  3860. if (type == _MEM)
  3861. thresholds = &memcg->thresholds;
  3862. else if (type == _MEMSWAP)
  3863. thresholds = &memcg->memsw_thresholds;
  3864. else
  3865. BUG();
  3866. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  3867. /* Check if a threshold crossed before adding a new one */
  3868. if (thresholds->primary)
  3869. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  3870. size = thresholds->primary ? thresholds->primary->size + 1 : 1;
  3871. /* Allocate memory for new array of thresholds */
  3872. new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
  3873. GFP_KERNEL);
  3874. if (!new) {
  3875. ret = -ENOMEM;
  3876. goto unlock;
  3877. }
  3878. new->size = size;
  3879. /* Copy thresholds (if any) to new array */
  3880. if (thresholds->primary) {
  3881. memcpy(new->entries, thresholds->primary->entries, (size - 1) *
  3882. sizeof(struct mem_cgroup_threshold));
  3883. }
  3884. /* Add new threshold */
  3885. new->entries[size - 1].eventfd = eventfd;
  3886. new->entries[size - 1].threshold = threshold;
  3887. /* Sort thresholds. Registering of new threshold isn't time-critical */
  3888. sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
  3889. compare_thresholds, NULL);
  3890. /* Find current threshold */
  3891. new->current_threshold = -1;
  3892. for (i = 0; i < size; i++) {
  3893. if (new->entries[i].threshold < usage) {
  3894. /*
  3895. * new->current_threshold will not be used until
  3896. * rcu_assign_pointer(), so it's safe to increment
  3897. * it here.
  3898. */
  3899. ++new->current_threshold;
  3900. }
  3901. }
  3902. /* Free old spare buffer and save old primary buffer as spare */
  3903. kfree(thresholds->spare);
  3904. thresholds->spare = thresholds->primary;
  3905. rcu_assign_pointer(thresholds->primary, new);
  3906. /* To be sure that nobody uses thresholds */
  3907. synchronize_rcu();
  3908. unlock:
  3909. mutex_unlock(&memcg->thresholds_lock);
  3910. return ret;
  3911. }
  3912. static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
  3913. struct cftype *cft, struct eventfd_ctx *eventfd)
  3914. {
  3915. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3916. struct mem_cgroup_thresholds *thresholds;
  3917. struct mem_cgroup_threshold_ary *new;
  3918. int type = MEMFILE_TYPE(cft->private);
  3919. u64 usage;
  3920. int i, j, size;
  3921. mutex_lock(&memcg->thresholds_lock);
  3922. if (type == _MEM)
  3923. thresholds = &memcg->thresholds;
  3924. else if (type == _MEMSWAP)
  3925. thresholds = &memcg->memsw_thresholds;
  3926. else
  3927. BUG();
  3928. if (!thresholds->primary)
  3929. goto unlock;
  3930. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  3931. /* Check if a threshold crossed before removing */
  3932. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  3933. /* Calculate new number of threshold */
  3934. size = 0;
  3935. for (i = 0; i < thresholds->primary->size; i++) {
  3936. if (thresholds->primary->entries[i].eventfd != eventfd)
  3937. size++;
  3938. }
  3939. new = thresholds->spare;
  3940. /* Set thresholds array to NULL if we don't have thresholds */
  3941. if (!size) {
  3942. kfree(new);
  3943. new = NULL;
  3944. goto swap_buffers;
  3945. }
  3946. new->size = size;
  3947. /* Copy thresholds and find current threshold */
  3948. new->current_threshold = -1;
  3949. for (i = 0, j = 0; i < thresholds->primary->size; i++) {
  3950. if (thresholds->primary->entries[i].eventfd == eventfd)
  3951. continue;
  3952. new->entries[j] = thresholds->primary->entries[i];
  3953. if (new->entries[j].threshold < usage) {
  3954. /*
  3955. * new->current_threshold will not be used
  3956. * until rcu_assign_pointer(), so it's safe to increment
  3957. * it here.
  3958. */
  3959. ++new->current_threshold;
  3960. }
  3961. j++;
  3962. }
  3963. swap_buffers:
  3964. /* Swap primary and spare array */
  3965. thresholds->spare = thresholds->primary;
  3966. rcu_assign_pointer(thresholds->primary, new);
  3967. /* To be sure that nobody uses thresholds */
  3968. synchronize_rcu();
  3969. unlock:
  3970. mutex_unlock(&memcg->thresholds_lock);
  3971. }
  3972. static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
  3973. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  3974. {
  3975. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3976. struct mem_cgroup_eventfd_list *event;
  3977. int type = MEMFILE_TYPE(cft->private);
  3978. BUG_ON(type != _OOM_TYPE);
  3979. event = kmalloc(sizeof(*event), GFP_KERNEL);
  3980. if (!event)
  3981. return -ENOMEM;
  3982. spin_lock(&memcg_oom_lock);
  3983. event->eventfd = eventfd;
  3984. list_add(&event->list, &memcg->oom_notify);
  3985. /* already in OOM ? */
  3986. if (atomic_read(&memcg->under_oom))
  3987. eventfd_signal(eventfd, 1);
  3988. spin_unlock(&memcg_oom_lock);
  3989. return 0;
  3990. }
  3991. static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
  3992. struct cftype *cft, struct eventfd_ctx *eventfd)
  3993. {
  3994. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  3995. struct mem_cgroup_eventfd_list *ev, *tmp;
  3996. int type = MEMFILE_TYPE(cft->private);
  3997. BUG_ON(type != _OOM_TYPE);
  3998. spin_lock(&memcg_oom_lock);
  3999. list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
  4000. if (ev->eventfd == eventfd) {
  4001. list_del(&ev->list);
  4002. kfree(ev);
  4003. }
  4004. }
  4005. spin_unlock(&memcg_oom_lock);
  4006. }
  4007. static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
  4008. struct cftype *cft, struct cgroup_map_cb *cb)
  4009. {
  4010. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4011. cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
  4012. if (atomic_read(&memcg->under_oom))
  4013. cb->fill(cb, "under_oom", 1);
  4014. else
  4015. cb->fill(cb, "under_oom", 0);
  4016. return 0;
  4017. }
  4018. static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
  4019. struct cftype *cft, u64 val)
  4020. {
  4021. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4022. struct mem_cgroup *parent;
  4023. /* cannot set to root cgroup and only 0 and 1 are allowed */
  4024. if (!cgrp->parent || !((val == 0) || (val == 1)))
  4025. return -EINVAL;
  4026. parent = mem_cgroup_from_cont(cgrp->parent);
  4027. cgroup_lock();
  4028. /* oom-kill-disable is a flag for subhierarchy. */
  4029. if ((parent->use_hierarchy) ||
  4030. (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
  4031. cgroup_unlock();
  4032. return -EINVAL;
  4033. }
  4034. memcg->oom_kill_disable = val;
  4035. if (!val)
  4036. memcg_oom_recover(memcg);
  4037. cgroup_unlock();
  4038. return 0;
  4039. }
  4040. #ifdef CONFIG_NUMA
  4041. static const struct file_operations mem_control_numa_stat_file_operations = {
  4042. .read = seq_read,
  4043. .llseek = seq_lseek,
  4044. .release = single_release,
  4045. };
  4046. static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
  4047. {
  4048. struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
  4049. file->f_op = &mem_control_numa_stat_file_operations;
  4050. return single_open(file, mem_control_numa_stat_show, cont);
  4051. }
  4052. #endif /* CONFIG_NUMA */
  4053. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
  4054. static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
  4055. {
  4056. /*
  4057. * Part of this would be better living in a separate allocation
  4058. * function, leaving us with just the cgroup tree population work.
  4059. * We, however, depend on state such as network's proto_list that
  4060. * is only initialized after cgroup creation. I found the less
  4061. * cumbersome way to deal with it to defer it all to populate time
  4062. */
  4063. return mem_cgroup_sockets_init(cont, ss);
  4064. };
  4065. static void kmem_cgroup_destroy(struct cgroup *cont)
  4066. {
  4067. mem_cgroup_sockets_destroy(cont);
  4068. }
  4069. #else
  4070. static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
  4071. {
  4072. return 0;
  4073. }
  4074. static void kmem_cgroup_destroy(struct cgroup *cont)
  4075. {
  4076. }
  4077. #endif
  4078. static struct cftype mem_cgroup_files[] = {
  4079. {
  4080. .name = "usage_in_bytes",
  4081. .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
  4082. .read_u64 = mem_cgroup_read,
  4083. .register_event = mem_cgroup_usage_register_event,
  4084. .unregister_event = mem_cgroup_usage_unregister_event,
  4085. },
  4086. {
  4087. .name = "max_usage_in_bytes",
  4088. .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
  4089. .trigger = mem_cgroup_reset,
  4090. .read_u64 = mem_cgroup_read,
  4091. },
  4092. {
  4093. .name = "limit_in_bytes",
  4094. .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
  4095. .write_string = mem_cgroup_write,
  4096. .read_u64 = mem_cgroup_read,
  4097. },
  4098. {
  4099. .name = "soft_limit_in_bytes",
  4100. .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
  4101. .write_string = mem_cgroup_write,
  4102. .read_u64 = mem_cgroup_read,
  4103. },
  4104. {
  4105. .name = "failcnt",
  4106. .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
  4107. .trigger = mem_cgroup_reset,
  4108. .read_u64 = mem_cgroup_read,
  4109. },
  4110. {
  4111. .name = "stat",
  4112. .read_map = mem_control_stat_show,
  4113. },
  4114. {
  4115. .name = "force_empty",
  4116. .trigger = mem_cgroup_force_empty_write,
  4117. },
  4118. {
  4119. .name = "use_hierarchy",
  4120. .write_u64 = mem_cgroup_hierarchy_write,
  4121. .read_u64 = mem_cgroup_hierarchy_read,
  4122. },
  4123. {
  4124. .name = "swappiness",
  4125. .read_u64 = mem_cgroup_swappiness_read,
  4126. .write_u64 = mem_cgroup_swappiness_write,
  4127. },
  4128. {
  4129. .name = "move_charge_at_immigrate",
  4130. .read_u64 = mem_cgroup_move_charge_read,
  4131. .write_u64 = mem_cgroup_move_charge_write,
  4132. },
  4133. {
  4134. .name = "oom_control",
  4135. .read_map = mem_cgroup_oom_control_read,
  4136. .write_u64 = mem_cgroup_oom_control_write,
  4137. .register_event = mem_cgroup_oom_register_event,
  4138. .unregister_event = mem_cgroup_oom_unregister_event,
  4139. .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
  4140. },
  4141. #ifdef CONFIG_NUMA
  4142. {
  4143. .name = "numa_stat",
  4144. .open = mem_control_numa_stat_open,
  4145. .mode = S_IRUGO,
  4146. },
  4147. #endif
  4148. };
  4149. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  4150. static struct cftype memsw_cgroup_files[] = {
  4151. {
  4152. .name = "memsw.usage_in_bytes",
  4153. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
  4154. .read_u64 = mem_cgroup_read,
  4155. .register_event = mem_cgroup_usage_register_event,
  4156. .unregister_event = mem_cgroup_usage_unregister_event,
  4157. },
  4158. {
  4159. .name = "memsw.max_usage_in_bytes",
  4160. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
  4161. .trigger = mem_cgroup_reset,
  4162. .read_u64 = mem_cgroup_read,
  4163. },
  4164. {
  4165. .name = "memsw.limit_in_bytes",
  4166. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
  4167. .write_string = mem_cgroup_write,
  4168. .read_u64 = mem_cgroup_read,
  4169. },
  4170. {
  4171. .name = "memsw.failcnt",
  4172. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
  4173. .trigger = mem_cgroup_reset,
  4174. .read_u64 = mem_cgroup_read,
  4175. },
  4176. };
  4177. static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
  4178. {
  4179. if (!do_swap_account)
  4180. return 0;
  4181. return cgroup_add_files(cont, ss, memsw_cgroup_files,
  4182. ARRAY_SIZE(memsw_cgroup_files));
  4183. };
  4184. #else
  4185. static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
  4186. {
  4187. return 0;
  4188. }
  4189. #endif
  4190. static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  4191. {
  4192. struct mem_cgroup_per_node *pn;
  4193. struct mem_cgroup_per_zone *mz;
  4194. enum lru_list lru;
  4195. int zone, tmp = node;
  4196. /*
  4197. * This routine is called against possible nodes.
  4198. * But it's BUG to call kmalloc() against offline node.
  4199. *
  4200. * TODO: this routine can waste much memory for nodes which will
  4201. * never be onlined. It's better to use memory hotplug callback
  4202. * function.
  4203. */
  4204. if (!node_state(node, N_NORMAL_MEMORY))
  4205. tmp = -1;
  4206. pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
  4207. if (!pn)
  4208. return 1;
  4209. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  4210. mz = &pn->zoneinfo[zone];
  4211. for_each_lru(lru)
  4212. INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
  4213. mz->usage_in_excess = 0;
  4214. mz->on_tree = false;
  4215. mz->memcg = memcg;
  4216. }
  4217. memcg->info.nodeinfo[node] = pn;
  4218. return 0;
  4219. }
  4220. static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  4221. {
  4222. kfree(memcg->info.nodeinfo[node]);
  4223. }
  4224. static struct mem_cgroup *mem_cgroup_alloc(void)
  4225. {
  4226. struct mem_cgroup *memcg;
  4227. int size = sizeof(struct mem_cgroup);
  4228. /* Can be very big if MAX_NUMNODES is very big */
  4229. if (size < PAGE_SIZE)
  4230. memcg = kzalloc(size, GFP_KERNEL);
  4231. else
  4232. memcg = vzalloc(size);
  4233. if (!memcg)
  4234. return NULL;
  4235. memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
  4236. if (!memcg->stat)
  4237. goto out_free;
  4238. spin_lock_init(&memcg->pcp_counter_lock);
  4239. return memcg;
  4240. out_free:
  4241. if (size < PAGE_SIZE)
  4242. kfree(memcg);
  4243. else
  4244. vfree(memcg);
  4245. return NULL;
  4246. }
  4247. /*
  4248. * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
  4249. * but in process context. The work_freeing structure is overlaid
  4250. * on the rcu_freeing structure, which itself is overlaid on memsw.
  4251. */
  4252. static void vfree_work(struct work_struct *work)
  4253. {
  4254. struct mem_cgroup *memcg;
  4255. memcg = container_of(work, struct mem_cgroup, work_freeing);
  4256. vfree(memcg);
  4257. }
  4258. static void vfree_rcu(struct rcu_head *rcu_head)
  4259. {
  4260. struct mem_cgroup *memcg;
  4261. memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
  4262. INIT_WORK(&memcg->work_freeing, vfree_work);
  4263. schedule_work(&memcg->work_freeing);
  4264. }
  4265. /*
  4266. * At destroying mem_cgroup, references from swap_cgroup can remain.
  4267. * (scanning all at force_empty is too costly...)
  4268. *
  4269. * Instead of clearing all references at force_empty, we remember
  4270. * the number of reference from swap_cgroup and free mem_cgroup when
  4271. * it goes down to 0.
  4272. *
  4273. * Removal of cgroup itself succeeds regardless of refs from swap.
  4274. */
  4275. static void __mem_cgroup_free(struct mem_cgroup *memcg)
  4276. {
  4277. int node;
  4278. mem_cgroup_remove_from_trees(memcg);
  4279. free_css_id(&mem_cgroup_subsys, &memcg->css);
  4280. for_each_node(node)
  4281. free_mem_cgroup_per_zone_info(memcg, node);
  4282. free_percpu(memcg->stat);
  4283. if (sizeof(struct mem_cgroup) < PAGE_SIZE)
  4284. kfree_rcu(memcg, rcu_freeing);
  4285. else
  4286. call_rcu(&memcg->rcu_freeing, vfree_rcu);
  4287. }
  4288. static void mem_cgroup_get(struct mem_cgroup *memcg)
  4289. {
  4290. atomic_inc(&memcg->refcnt);
  4291. }
  4292. static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
  4293. {
  4294. if (atomic_sub_and_test(count, &memcg->refcnt)) {
  4295. struct mem_cgroup *parent = parent_mem_cgroup(memcg);
  4296. __mem_cgroup_free(memcg);
  4297. if (parent)
  4298. mem_cgroup_put(parent);
  4299. }
  4300. }
  4301. static void mem_cgroup_put(struct mem_cgroup *memcg)
  4302. {
  4303. __mem_cgroup_put(memcg, 1);
  4304. }
  4305. /*
  4306. * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  4307. */
  4308. struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  4309. {
  4310. if (!memcg->res.parent)
  4311. return NULL;
  4312. return mem_cgroup_from_res_counter(memcg->res.parent, res);
  4313. }
  4314. EXPORT_SYMBOL(parent_mem_cgroup);
  4315. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  4316. static void __init enable_swap_cgroup(void)
  4317. {
  4318. if (!mem_cgroup_disabled() && really_do_swap_account)
  4319. do_swap_account = 1;
  4320. }
  4321. #else
  4322. static void __init enable_swap_cgroup(void)
  4323. {
  4324. }
  4325. #endif
  4326. static int mem_cgroup_soft_limit_tree_init(void)
  4327. {
  4328. struct mem_cgroup_tree_per_node *rtpn;
  4329. struct mem_cgroup_tree_per_zone *rtpz;
  4330. int tmp, node, zone;
  4331. for_each_node(node) {
  4332. tmp = node;
  4333. if (!node_state(node, N_NORMAL_MEMORY))
  4334. tmp = -1;
  4335. rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
  4336. if (!rtpn)
  4337. goto err_cleanup;
  4338. soft_limit_tree.rb_tree_per_node[node] = rtpn;
  4339. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  4340. rtpz = &rtpn->rb_tree_per_zone[zone];
  4341. rtpz->rb_root = RB_ROOT;
  4342. spin_lock_init(&rtpz->lock);
  4343. }
  4344. }
  4345. return 0;
  4346. err_cleanup:
  4347. for_each_node(node) {
  4348. if (!soft_limit_tree.rb_tree_per_node[node])
  4349. break;
  4350. kfree(soft_limit_tree.rb_tree_per_node[node]);
  4351. soft_limit_tree.rb_tree_per_node[node] = NULL;
  4352. }
  4353. return 1;
  4354. }
  4355. static struct cgroup_subsys_state * __ref
  4356. mem_cgroup_create(struct cgroup *cont)
  4357. {
  4358. struct mem_cgroup *memcg, *parent;
  4359. long error = -ENOMEM;
  4360. int node;
  4361. memcg = mem_cgroup_alloc();
  4362. if (!memcg)
  4363. return ERR_PTR(error);
  4364. for_each_node(node)
  4365. if (alloc_mem_cgroup_per_zone_info(memcg, node))
  4366. goto free_out;
  4367. /* root ? */
  4368. if (cont->parent == NULL) {
  4369. int cpu;
  4370. enable_swap_cgroup();
  4371. parent = NULL;
  4372. if (mem_cgroup_soft_limit_tree_init())
  4373. goto free_out;
  4374. root_mem_cgroup = memcg;
  4375. for_each_possible_cpu(cpu) {
  4376. struct memcg_stock_pcp *stock =
  4377. &per_cpu(memcg_stock, cpu);
  4378. INIT_WORK(&stock->work, drain_local_stock);
  4379. }
  4380. hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
  4381. } else {
  4382. parent = mem_cgroup_from_cont(cont->parent);
  4383. memcg->use_hierarchy = parent->use_hierarchy;
  4384. memcg->oom_kill_disable = parent->oom_kill_disable;
  4385. }
  4386. if (parent && parent->use_hierarchy) {
  4387. res_counter_init(&memcg->res, &parent->res);
  4388. res_counter_init(&memcg->memsw, &parent->memsw);
  4389. /*
  4390. * We increment refcnt of the parent to ensure that we can
  4391. * safely access it on res_counter_charge/uncharge.
  4392. * This refcnt will be decremented when freeing this
  4393. * mem_cgroup(see mem_cgroup_put).
  4394. */
  4395. mem_cgroup_get(parent);
  4396. } else {
  4397. res_counter_init(&memcg->res, NULL);
  4398. res_counter_init(&memcg->memsw, NULL);
  4399. }
  4400. memcg->last_scanned_node = MAX_NUMNODES;
  4401. INIT_LIST_HEAD(&memcg->oom_notify);
  4402. if (parent)
  4403. memcg->swappiness = mem_cgroup_swappiness(parent);
  4404. atomic_set(&memcg->refcnt, 1);
  4405. memcg->move_charge_at_immigrate = 0;
  4406. mutex_init(&memcg->thresholds_lock);
  4407. spin_lock_init(&memcg->move_lock);
  4408. return &memcg->css;
  4409. free_out:
  4410. __mem_cgroup_free(memcg);
  4411. return ERR_PTR(error);
  4412. }
  4413. static int mem_cgroup_pre_destroy(struct cgroup *cont)
  4414. {
  4415. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4416. return mem_cgroup_force_empty(memcg, false);
  4417. }
  4418. static void mem_cgroup_destroy(struct cgroup *cont)
  4419. {
  4420. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4421. kmem_cgroup_destroy(cont);
  4422. mem_cgroup_put(memcg);
  4423. }
  4424. static int mem_cgroup_populate(struct cgroup_subsys *ss,
  4425. struct cgroup *cont)
  4426. {
  4427. int ret;
  4428. ret = cgroup_add_files(cont, ss, mem_cgroup_files,
  4429. ARRAY_SIZE(mem_cgroup_files));
  4430. if (!ret)
  4431. ret = register_memsw_files(cont, ss);
  4432. if (!ret)
  4433. ret = register_kmem_files(cont, ss);
  4434. return ret;
  4435. }
  4436. #ifdef CONFIG_MMU
  4437. /* Handlers for move charge at task migration. */
  4438. #define PRECHARGE_COUNT_AT_ONCE 256
  4439. static int mem_cgroup_do_precharge(unsigned long count)
  4440. {
  4441. int ret = 0;
  4442. int batch_count = PRECHARGE_COUNT_AT_ONCE;
  4443. struct mem_cgroup *memcg = mc.to;
  4444. if (mem_cgroup_is_root(memcg)) {
  4445. mc.precharge += count;
  4446. /* we don't need css_get for root */
  4447. return ret;
  4448. }
  4449. /* try to charge at once */
  4450. if (count > 1) {
  4451. struct res_counter *dummy;
  4452. /*
  4453. * "memcg" cannot be under rmdir() because we've already checked
  4454. * by cgroup_lock_live_cgroup() that it is not removed and we
  4455. * are still under the same cgroup_mutex. So we can postpone
  4456. * css_get().
  4457. */
  4458. if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
  4459. goto one_by_one;
  4460. if (do_swap_account && res_counter_charge(&memcg->memsw,
  4461. PAGE_SIZE * count, &dummy)) {
  4462. res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
  4463. goto one_by_one;
  4464. }
  4465. mc.precharge += count;
  4466. return ret;
  4467. }
  4468. one_by_one:
  4469. /* fall back to one by one charge */
  4470. while (count--) {
  4471. if (signal_pending(current)) {
  4472. ret = -EINTR;
  4473. break;
  4474. }
  4475. if (!batch_count--) {
  4476. batch_count = PRECHARGE_COUNT_AT_ONCE;
  4477. cond_resched();
  4478. }
  4479. ret = __mem_cgroup_try_charge(NULL,
  4480. GFP_KERNEL, 1, &memcg, false);
  4481. if (ret)
  4482. /* mem_cgroup_clear_mc() will do uncharge later */
  4483. return ret;
  4484. mc.precharge++;
  4485. }
  4486. return ret;
  4487. }
  4488. /**
  4489. * get_mctgt_type - get target type of moving charge
  4490. * @vma: the vma the pte to be checked belongs
  4491. * @addr: the address corresponding to the pte to be checked
  4492. * @ptent: the pte to be checked
  4493. * @target: the pointer the target page or swap ent will be stored(can be NULL)
  4494. *
  4495. * Returns
  4496. * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
  4497. * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
  4498. * move charge. if @target is not NULL, the page is stored in target->page
  4499. * with extra refcnt got(Callers should handle it).
  4500. * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
  4501. * target for charge migration. if @target is not NULL, the entry is stored
  4502. * in target->ent.
  4503. *
  4504. * Called with pte lock held.
  4505. */
  4506. union mc_target {
  4507. struct page *page;
  4508. swp_entry_t ent;
  4509. };
  4510. enum mc_target_type {
  4511. MC_TARGET_NONE = 0,
  4512. MC_TARGET_PAGE,
  4513. MC_TARGET_SWAP,
  4514. };
  4515. static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
  4516. unsigned long addr, pte_t ptent)
  4517. {
  4518. struct page *page = vm_normal_page(vma, addr, ptent);
  4519. if (!page || !page_mapped(page))
  4520. return NULL;
  4521. if (PageAnon(page)) {
  4522. /* we don't move shared anon */
  4523. if (!move_anon() || page_mapcount(page) > 2)
  4524. return NULL;
  4525. } else if (!move_file())
  4526. /* we ignore mapcount for file pages */
  4527. return NULL;
  4528. if (!get_page_unless_zero(page))
  4529. return NULL;
  4530. return page;
  4531. }
  4532. static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  4533. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  4534. {
  4535. int usage_count;
  4536. struct page *page = NULL;
  4537. swp_entry_t ent = pte_to_swp_entry(ptent);
  4538. if (!move_anon() || non_swap_entry(ent))
  4539. return NULL;
  4540. usage_count = mem_cgroup_count_swap_user(ent, &page);
  4541. if (usage_count > 1) { /* we don't move shared anon */
  4542. if (page)
  4543. put_page(page);
  4544. return NULL;
  4545. }
  4546. if (do_swap_account)
  4547. entry->val = ent.val;
  4548. return page;
  4549. }
  4550. static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
  4551. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  4552. {
  4553. struct page *page = NULL;
  4554. struct inode *inode;
  4555. struct address_space *mapping;
  4556. pgoff_t pgoff;
  4557. if (!vma->vm_file) /* anonymous vma */
  4558. return NULL;
  4559. if (!move_file())
  4560. return NULL;
  4561. inode = vma->vm_file->f_path.dentry->d_inode;
  4562. mapping = vma->vm_file->f_mapping;
  4563. if (pte_none(ptent))
  4564. pgoff = linear_page_index(vma, addr);
  4565. else /* pte_file(ptent) is true */
  4566. pgoff = pte_to_pgoff(ptent);
  4567. /* page is moved even if it's not RSS of this task(page-faulted). */
  4568. page = find_get_page(mapping, pgoff);
  4569. #ifdef CONFIG_SWAP
  4570. /* shmem/tmpfs may report page out on swap: account for that too. */
  4571. if (radix_tree_exceptional_entry(page)) {
  4572. swp_entry_t swap = radix_to_swp_entry(page);
  4573. if (do_swap_account)
  4574. *entry = swap;
  4575. page = find_get_page(&swapper_space, swap.val);
  4576. }
  4577. #endif
  4578. return page;
  4579. }
  4580. static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
  4581. unsigned long addr, pte_t ptent, union mc_target *target)
  4582. {
  4583. struct page *page = NULL;
  4584. struct page_cgroup *pc;
  4585. enum mc_target_type ret = MC_TARGET_NONE;
  4586. swp_entry_t ent = { .val = 0 };
  4587. if (pte_present(ptent))
  4588. page = mc_handle_present_pte(vma, addr, ptent);
  4589. else if (is_swap_pte(ptent))
  4590. page = mc_handle_swap_pte(vma, addr, ptent, &ent);
  4591. else if (pte_none(ptent) || pte_file(ptent))
  4592. page = mc_handle_file_pte(vma, addr, ptent, &ent);
  4593. if (!page && !ent.val)
  4594. return ret;
  4595. if (page) {
  4596. pc = lookup_page_cgroup(page);
  4597. /*
  4598. * Do only loose check w/o page_cgroup lock.
  4599. * mem_cgroup_move_account() checks the pc is valid or not under
  4600. * the lock.
  4601. */
  4602. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  4603. ret = MC_TARGET_PAGE;
  4604. if (target)
  4605. target->page = page;
  4606. }
  4607. if (!ret || !target)
  4608. put_page(page);
  4609. }
  4610. /* There is a swap entry and a page doesn't exist or isn't charged */
  4611. if (ent.val && !ret &&
  4612. css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
  4613. ret = MC_TARGET_SWAP;
  4614. if (target)
  4615. target->ent = ent;
  4616. }
  4617. return ret;
  4618. }
  4619. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4620. /*
  4621. * We don't consider swapping or file mapped pages because THP does not
  4622. * support them for now.
  4623. * Caller should make sure that pmd_trans_huge(pmd) is true.
  4624. */
  4625. static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
  4626. unsigned long addr, pmd_t pmd, union mc_target *target)
  4627. {
  4628. struct page *page = NULL;
  4629. struct page_cgroup *pc;
  4630. enum mc_target_type ret = MC_TARGET_NONE;
  4631. page = pmd_page(pmd);
  4632. VM_BUG_ON(!page || !PageHead(page));
  4633. if (!move_anon())
  4634. return ret;
  4635. pc = lookup_page_cgroup(page);
  4636. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  4637. ret = MC_TARGET_PAGE;
  4638. if (target) {
  4639. get_page(page);
  4640. target->page = page;
  4641. }
  4642. }
  4643. return ret;
  4644. }
  4645. #else
  4646. static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
  4647. unsigned long addr, pmd_t pmd, union mc_target *target)
  4648. {
  4649. return MC_TARGET_NONE;
  4650. }
  4651. #endif
  4652. static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
  4653. unsigned long addr, unsigned long end,
  4654. struct mm_walk *walk)
  4655. {
  4656. struct vm_area_struct *vma = walk->private;
  4657. pte_t *pte;
  4658. spinlock_t *ptl;
  4659. if (pmd_trans_huge_lock(pmd, vma) == 1) {
  4660. if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
  4661. mc.precharge += HPAGE_PMD_NR;
  4662. spin_unlock(&vma->vm_mm->page_table_lock);
  4663. return 0;
  4664. }
  4665. if (pmd_trans_unstable(pmd))
  4666. return 0;
  4667. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  4668. for (; addr != end; pte++, addr += PAGE_SIZE)
  4669. if (get_mctgt_type(vma, addr, *pte, NULL))
  4670. mc.precharge++; /* increment precharge temporarily */
  4671. pte_unmap_unlock(pte - 1, ptl);
  4672. cond_resched();
  4673. return 0;
  4674. }
  4675. static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
  4676. {
  4677. unsigned long precharge;
  4678. struct vm_area_struct *vma;
  4679. down_read(&mm->mmap_sem);
  4680. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  4681. struct mm_walk mem_cgroup_count_precharge_walk = {
  4682. .pmd_entry = mem_cgroup_count_precharge_pte_range,
  4683. .mm = mm,
  4684. .private = vma,
  4685. };
  4686. if (is_vm_hugetlb_page(vma))
  4687. continue;
  4688. walk_page_range(vma->vm_start, vma->vm_end,
  4689. &mem_cgroup_count_precharge_walk);
  4690. }
  4691. up_read(&mm->mmap_sem);
  4692. precharge = mc.precharge;
  4693. mc.precharge = 0;
  4694. return precharge;
  4695. }
  4696. static int mem_cgroup_precharge_mc(struct mm_struct *mm)
  4697. {
  4698. unsigned long precharge = mem_cgroup_count_precharge(mm);
  4699. VM_BUG_ON(mc.moving_task);
  4700. mc.moving_task = current;
  4701. return mem_cgroup_do_precharge(precharge);
  4702. }
  4703. /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
  4704. static void __mem_cgroup_clear_mc(void)
  4705. {
  4706. struct mem_cgroup *from = mc.from;
  4707. struct mem_cgroup *to = mc.to;
  4708. /* we must uncharge all the leftover precharges from mc.to */
  4709. if (mc.precharge) {
  4710. __mem_cgroup_cancel_charge(mc.to, mc.precharge);
  4711. mc.precharge = 0;
  4712. }
  4713. /*
  4714. * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
  4715. * we must uncharge here.
  4716. */
  4717. if (mc.moved_charge) {
  4718. __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
  4719. mc.moved_charge = 0;
  4720. }
  4721. /* we must fixup refcnts and charges */
  4722. if (mc.moved_swap) {
  4723. /* uncharge swap account from the old cgroup */
  4724. if (!mem_cgroup_is_root(mc.from))
  4725. res_counter_uncharge(&mc.from->memsw,
  4726. PAGE_SIZE * mc.moved_swap);
  4727. __mem_cgroup_put(mc.from, mc.moved_swap);
  4728. if (!mem_cgroup_is_root(mc.to)) {
  4729. /*
  4730. * we charged both to->res and to->memsw, so we should
  4731. * uncharge to->res.
  4732. */
  4733. res_counter_uncharge(&mc.to->res,
  4734. PAGE_SIZE * mc.moved_swap);
  4735. }
  4736. /* we've already done mem_cgroup_get(mc.to) */
  4737. mc.moved_swap = 0;
  4738. }
  4739. memcg_oom_recover(from);
  4740. memcg_oom_recover(to);
  4741. wake_up_all(&mc.waitq);
  4742. }
  4743. static void mem_cgroup_clear_mc(void)
  4744. {
  4745. struct mem_cgroup *from = mc.from;
  4746. /*
  4747. * we must clear moving_task before waking up waiters at the end of
  4748. * task migration.
  4749. */
  4750. mc.moving_task = NULL;
  4751. __mem_cgroup_clear_mc();
  4752. spin_lock(&mc.lock);
  4753. mc.from = NULL;
  4754. mc.to = NULL;
  4755. spin_unlock(&mc.lock);
  4756. mem_cgroup_end_move(from);
  4757. }
  4758. static int mem_cgroup_can_attach(struct cgroup *cgroup,
  4759. struct cgroup_taskset *tset)
  4760. {
  4761. struct task_struct *p = cgroup_taskset_first(tset);
  4762. int ret = 0;
  4763. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
  4764. if (memcg->move_charge_at_immigrate) {
  4765. struct mm_struct *mm;
  4766. struct mem_cgroup *from = mem_cgroup_from_task(p);
  4767. VM_BUG_ON(from == memcg);
  4768. mm = get_task_mm(p);
  4769. if (!mm)
  4770. return 0;
  4771. /* We move charges only when we move a owner of the mm */
  4772. if (mm->owner == p) {
  4773. VM_BUG_ON(mc.from);
  4774. VM_BUG_ON(mc.to);
  4775. VM_BUG_ON(mc.precharge);
  4776. VM_BUG_ON(mc.moved_charge);
  4777. VM_BUG_ON(mc.moved_swap);
  4778. mem_cgroup_start_move(from);
  4779. spin_lock(&mc.lock);
  4780. mc.from = from;
  4781. mc.to = memcg;
  4782. spin_unlock(&mc.lock);
  4783. /* We set mc.moving_task later */
  4784. ret = mem_cgroup_precharge_mc(mm);
  4785. if (ret)
  4786. mem_cgroup_clear_mc();
  4787. }
  4788. mmput(mm);
  4789. }
  4790. return ret;
  4791. }
  4792. static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
  4793. struct cgroup_taskset *tset)
  4794. {
  4795. mem_cgroup_clear_mc();
  4796. }
  4797. static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
  4798. unsigned long addr, unsigned long end,
  4799. struct mm_walk *walk)
  4800. {
  4801. int ret = 0;
  4802. struct vm_area_struct *vma = walk->private;
  4803. pte_t *pte;
  4804. spinlock_t *ptl;
  4805. enum mc_target_type target_type;
  4806. union mc_target target;
  4807. struct page *page;
  4808. struct page_cgroup *pc;
  4809. /*
  4810. * We don't take compound_lock() here but no race with splitting thp
  4811. * happens because:
  4812. * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
  4813. * under splitting, which means there's no concurrent thp split,
  4814. * - if another thread runs into split_huge_page() just after we
  4815. * entered this if-block, the thread must wait for page table lock
  4816. * to be unlocked in __split_huge_page_splitting(), where the main
  4817. * part of thp split is not executed yet.
  4818. */
  4819. if (pmd_trans_huge_lock(pmd, vma) == 1) {
  4820. if (!mc.precharge) {
  4821. spin_unlock(&vma->vm_mm->page_table_lock);
  4822. return 0;
  4823. }
  4824. target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
  4825. if (target_type == MC_TARGET_PAGE) {
  4826. page = target.page;
  4827. if (!isolate_lru_page(page)) {
  4828. pc = lookup_page_cgroup(page);
  4829. if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
  4830. pc, mc.from, mc.to,
  4831. false)) {
  4832. mc.precharge -= HPAGE_PMD_NR;
  4833. mc.moved_charge += HPAGE_PMD_NR;
  4834. }
  4835. putback_lru_page(page);
  4836. }
  4837. put_page(page);
  4838. }
  4839. spin_unlock(&vma->vm_mm->page_table_lock);
  4840. return 0;
  4841. }
  4842. if (pmd_trans_unstable(pmd))
  4843. return 0;
  4844. retry:
  4845. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  4846. for (; addr != end; addr += PAGE_SIZE) {
  4847. pte_t ptent = *(pte++);
  4848. swp_entry_t ent;
  4849. if (!mc.precharge)
  4850. break;
  4851. switch (get_mctgt_type(vma, addr, ptent, &target)) {
  4852. case MC_TARGET_PAGE:
  4853. page = target.page;
  4854. if (isolate_lru_page(page))
  4855. goto put;
  4856. pc = lookup_page_cgroup(page);
  4857. if (!mem_cgroup_move_account(page, 1, pc,
  4858. mc.from, mc.to, false)) {
  4859. mc.precharge--;
  4860. /* we uncharge from mc.from later. */
  4861. mc.moved_charge++;
  4862. }
  4863. putback_lru_page(page);
  4864. put: /* get_mctgt_type() gets the page */
  4865. put_page(page);
  4866. break;
  4867. case MC_TARGET_SWAP:
  4868. ent = target.ent;
  4869. if (!mem_cgroup_move_swap_account(ent,
  4870. mc.from, mc.to, false)) {
  4871. mc.precharge--;
  4872. /* we fixup refcnts and charges later. */
  4873. mc.moved_swap++;
  4874. }
  4875. break;
  4876. default:
  4877. break;
  4878. }
  4879. }
  4880. pte_unmap_unlock(pte - 1, ptl);
  4881. cond_resched();
  4882. if (addr != end) {
  4883. /*
  4884. * We have consumed all precharges we got in can_attach().
  4885. * We try charge one by one, but don't do any additional
  4886. * charges to mc.to if we have failed in charge once in attach()
  4887. * phase.
  4888. */
  4889. ret = mem_cgroup_do_precharge(1);
  4890. if (!ret)
  4891. goto retry;
  4892. }
  4893. return ret;
  4894. }
  4895. static void mem_cgroup_move_charge(struct mm_struct *mm)
  4896. {
  4897. struct vm_area_struct *vma;
  4898. lru_add_drain_all();
  4899. retry:
  4900. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  4901. /*
  4902. * Someone who are holding the mmap_sem might be waiting in
  4903. * waitq. So we cancel all extra charges, wake up all waiters,
  4904. * and retry. Because we cancel precharges, we might not be able
  4905. * to move enough charges, but moving charge is a best-effort
  4906. * feature anyway, so it wouldn't be a big problem.
  4907. */
  4908. __mem_cgroup_clear_mc();
  4909. cond_resched();
  4910. goto retry;
  4911. }
  4912. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  4913. int ret;
  4914. struct mm_walk mem_cgroup_move_charge_walk = {
  4915. .pmd_entry = mem_cgroup_move_charge_pte_range,
  4916. .mm = mm,
  4917. .private = vma,
  4918. };
  4919. if (is_vm_hugetlb_page(vma))
  4920. continue;
  4921. ret = walk_page_range(vma->vm_start, vma->vm_end,
  4922. &mem_cgroup_move_charge_walk);
  4923. if (ret)
  4924. /*
  4925. * means we have consumed all precharges and failed in
  4926. * doing additional charge. Just abandon here.
  4927. */
  4928. break;
  4929. }
  4930. up_read(&mm->mmap_sem);
  4931. }
  4932. static void mem_cgroup_move_task(struct cgroup *cont,
  4933. struct cgroup_taskset *tset)
  4934. {
  4935. struct task_struct *p = cgroup_taskset_first(tset);
  4936. struct mm_struct *mm = get_task_mm(p);
  4937. if (mm) {
  4938. if (mc.to)
  4939. mem_cgroup_move_charge(mm);
  4940. put_swap_token(mm);
  4941. mmput(mm);
  4942. }
  4943. if (mc.to)
  4944. mem_cgroup_clear_mc();
  4945. }
  4946. #else /* !CONFIG_MMU */
  4947. static int mem_cgroup_can_attach(struct cgroup *cgroup,
  4948. struct cgroup_taskset *tset)
  4949. {
  4950. return 0;
  4951. }
  4952. static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
  4953. struct cgroup_taskset *tset)
  4954. {
  4955. }
  4956. static void mem_cgroup_move_task(struct cgroup *cont,
  4957. struct cgroup_taskset *tset)
  4958. {
  4959. }
  4960. #endif
  4961. struct cgroup_subsys mem_cgroup_subsys = {
  4962. .name = "memory",
  4963. .subsys_id = mem_cgroup_subsys_id,
  4964. .create = mem_cgroup_create,
  4965. .pre_destroy = mem_cgroup_pre_destroy,
  4966. .destroy = mem_cgroup_destroy,
  4967. .populate = mem_cgroup_populate,
  4968. .can_attach = mem_cgroup_can_attach,
  4969. .cancel_attach = mem_cgroup_cancel_attach,
  4970. .attach = mem_cgroup_move_task,
  4971. .early_init = 0,
  4972. .use_id = 1,
  4973. };
  4974. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  4975. static int __init enable_swap_account(char *s)
  4976. {
  4977. /* consider enabled if no parameter or 1 is given */
  4978. if (!strcmp(s, "1"))
  4979. really_do_swap_account = 1;
  4980. else if (!strcmp(s, "0"))
  4981. really_do_swap_account = 0;
  4982. return 1;
  4983. }
  4984. __setup("swapaccount=", enable_swap_account);
  4985. #endif