memcontrol.c 181 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878
  1. /* memcontrol.c - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * Memory thresholds
  10. * Copyright (C) 2009 Nokia Corporation
  11. * Author: Kirill A. Shutemov
  12. *
  13. * Kernel Memory Controller
  14. * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15. * Authors: Glauber Costa and Suleiman Souhlal
  16. *
  17. * This program is free software; you can redistribute it and/or modify
  18. * it under the terms of the GNU General Public License as published by
  19. * the Free Software Foundation; either version 2 of the License, or
  20. * (at your option) any later version.
  21. *
  22. * This program is distributed in the hope that it will be useful,
  23. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  25. * GNU General Public License for more details.
  26. */
  27. #include <linux/res_counter.h>
  28. #include <linux/memcontrol.h>
  29. #include <linux/cgroup.h>
  30. #include <linux/mm.h>
  31. #include <linux/hugetlb.h>
  32. #include <linux/pagemap.h>
  33. #include <linux/smp.h>
  34. #include <linux/page-flags.h>
  35. #include <linux/backing-dev.h>
  36. #include <linux/bit_spinlock.h>
  37. #include <linux/rcupdate.h>
  38. #include <linux/limits.h>
  39. #include <linux/export.h>
  40. #include <linux/mutex.h>
  41. #include <linux/rbtree.h>
  42. #include <linux/slab.h>
  43. #include <linux/swap.h>
  44. #include <linux/swapops.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/eventfd.h>
  47. #include <linux/sort.h>
  48. #include <linux/fs.h>
  49. #include <linux/seq_file.h>
  50. #include <linux/vmalloc.h>
  51. #include <linux/mm_inline.h>
  52. #include <linux/page_cgroup.h>
  53. #include <linux/cpu.h>
  54. #include <linux/oom.h>
  55. #include "internal.h"
  56. #include <net/sock.h>
  57. #include <net/ip.h>
  58. #include <net/tcp_memcontrol.h>
  59. #include <asm/uaccess.h>
  60. #include <trace/events/vmscan.h>
  61. struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  62. EXPORT_SYMBOL(mem_cgroup_subsys);
  63. #define MEM_CGROUP_RECLAIM_RETRIES 5
  64. static struct mem_cgroup *root_mem_cgroup __read_mostly;
  65. #ifdef CONFIG_MEMCG_SWAP
  66. /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  67. int do_swap_account __read_mostly;
  68. /* for remember boot option*/
  69. #ifdef CONFIG_MEMCG_SWAP_ENABLED
  70. static int really_do_swap_account __initdata = 1;
  71. #else
  72. static int really_do_swap_account __initdata = 0;
  73. #endif
  74. #else
  75. #define do_swap_account 0
  76. #endif
  77. /*
  78. * Statistics for memory cgroup.
  79. */
  80. enum mem_cgroup_stat_index {
  81. /*
  82. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  83. */
  84. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  85. MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
  86. MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
  87. MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
  88. MEM_CGROUP_STAT_NSTATS,
  89. };
  90. static const char * const mem_cgroup_stat_names[] = {
  91. "cache",
  92. "rss",
  93. "mapped_file",
  94. "swap",
  95. };
  96. enum mem_cgroup_events_index {
  97. MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
  98. MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
  99. MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
  100. MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
  101. MEM_CGROUP_EVENTS_NSTATS,
  102. };
  103. static const char * const mem_cgroup_events_names[] = {
  104. "pgpgin",
  105. "pgpgout",
  106. "pgfault",
  107. "pgmajfault",
  108. };
  109. static const char * const mem_cgroup_lru_names[] = {
  110. "inactive_anon",
  111. "active_anon",
  112. "inactive_file",
  113. "active_file",
  114. "unevictable",
  115. };
  116. /*
  117. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  118. * it will be incremated by the number of pages. This counter is used for
  119. * for trigger some periodic events. This is straightforward and better
  120. * than using jiffies etc. to handle periodic memcg event.
  121. */
  122. enum mem_cgroup_events_target {
  123. MEM_CGROUP_TARGET_THRESH,
  124. MEM_CGROUP_TARGET_SOFTLIMIT,
  125. MEM_CGROUP_TARGET_NUMAINFO,
  126. MEM_CGROUP_NTARGETS,
  127. };
  128. #define THRESHOLDS_EVENTS_TARGET 128
  129. #define SOFTLIMIT_EVENTS_TARGET 1024
  130. #define NUMAINFO_EVENTS_TARGET 1024
  131. struct mem_cgroup_stat_cpu {
  132. long count[MEM_CGROUP_STAT_NSTATS];
  133. unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
  134. unsigned long nr_page_events;
  135. unsigned long targets[MEM_CGROUP_NTARGETS];
  136. };
  137. struct mem_cgroup_reclaim_iter {
  138. /* css_id of the last scanned hierarchy member */
  139. int position;
  140. /* scan generation, increased every round-trip */
  141. unsigned int generation;
  142. };
  143. /*
  144. * per-zone information in memory controller.
  145. */
  146. struct mem_cgroup_per_zone {
  147. struct lruvec lruvec;
  148. unsigned long lru_size[NR_LRU_LISTS];
  149. struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
  150. struct rb_node tree_node; /* RB tree node */
  151. unsigned long long usage_in_excess;/* Set to the value by which */
  152. /* the soft limit is exceeded*/
  153. bool on_tree;
  154. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  155. /* use container_of */
  156. };
  157. struct mem_cgroup_per_node {
  158. struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  159. };
  160. struct mem_cgroup_lru_info {
  161. struct mem_cgroup_per_node *nodeinfo[0];
  162. };
  163. /*
  164. * Cgroups above their limits are maintained in a RB-Tree, independent of
  165. * their hierarchy representation
  166. */
  167. struct mem_cgroup_tree_per_zone {
  168. struct rb_root rb_root;
  169. spinlock_t lock;
  170. };
  171. struct mem_cgroup_tree_per_node {
  172. struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
  173. };
  174. struct mem_cgroup_tree {
  175. struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
  176. };
  177. static struct mem_cgroup_tree soft_limit_tree __read_mostly;
  178. struct mem_cgroup_threshold {
  179. struct eventfd_ctx *eventfd;
  180. u64 threshold;
  181. };
  182. /* For threshold */
  183. struct mem_cgroup_threshold_ary {
  184. /* An array index points to threshold just below or equal to usage. */
  185. int current_threshold;
  186. /* Size of entries[] */
  187. unsigned int size;
  188. /* Array of thresholds */
  189. struct mem_cgroup_threshold entries[0];
  190. };
  191. struct mem_cgroup_thresholds {
  192. /* Primary thresholds array */
  193. struct mem_cgroup_threshold_ary *primary;
  194. /*
  195. * Spare threshold array.
  196. * This is needed to make mem_cgroup_unregister_event() "never fail".
  197. * It must be able to store at least primary->size - 1 entries.
  198. */
  199. struct mem_cgroup_threshold_ary *spare;
  200. };
  201. /* for OOM */
  202. struct mem_cgroup_eventfd_list {
  203. struct list_head list;
  204. struct eventfd_ctx *eventfd;
  205. };
  206. static void mem_cgroup_threshold(struct mem_cgroup *memcg);
  207. static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
  208. /*
  209. * The memory controller data structure. The memory controller controls both
  210. * page cache and RSS per cgroup. We would eventually like to provide
  211. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  212. * to help the administrator determine what knobs to tune.
  213. *
  214. * TODO: Add a water mark for the memory controller. Reclaim will begin when
  215. * we hit the water mark. May be even add a low water mark, such that
  216. * no reclaim occurs from a cgroup at it's low water mark, this is
  217. * a feature that will be implemented much later in the future.
  218. */
  219. struct mem_cgroup {
  220. struct cgroup_subsys_state css;
  221. /*
  222. * the counter to account for memory usage
  223. */
  224. struct res_counter res;
  225. union {
  226. /*
  227. * the counter to account for mem+swap usage.
  228. */
  229. struct res_counter memsw;
  230. /*
  231. * rcu_freeing is used only when freeing struct mem_cgroup,
  232. * so put it into a union to avoid wasting more memory.
  233. * It must be disjoint from the css field. It could be
  234. * in a union with the res field, but res plays a much
  235. * larger part in mem_cgroup life than memsw, and might
  236. * be of interest, even at time of free, when debugging.
  237. * So share rcu_head with the less interesting memsw.
  238. */
  239. struct rcu_head rcu_freeing;
  240. /*
  241. * We also need some space for a worker in deferred freeing.
  242. * By the time we call it, rcu_freeing is no longer in use.
  243. */
  244. struct work_struct work_freeing;
  245. };
  246. /*
  247. * the counter to account for kernel memory usage.
  248. */
  249. struct res_counter kmem;
  250. /*
  251. * Should the accounting and control be hierarchical, per subtree?
  252. */
  253. bool use_hierarchy;
  254. unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
  255. bool oom_lock;
  256. atomic_t under_oom;
  257. atomic_t refcnt;
  258. int swappiness;
  259. /* OOM-Killer disable */
  260. int oom_kill_disable;
  261. /* set when res.limit == memsw.limit */
  262. bool memsw_is_minimum;
  263. /* protect arrays of thresholds */
  264. struct mutex thresholds_lock;
  265. /* thresholds for memory usage. RCU-protected */
  266. struct mem_cgroup_thresholds thresholds;
  267. /* thresholds for mem+swap usage. RCU-protected */
  268. struct mem_cgroup_thresholds memsw_thresholds;
  269. /* For oom notifier event fd */
  270. struct list_head oom_notify;
  271. /*
  272. * Should we move charges of a task when a task is moved into this
  273. * mem_cgroup ? And what type of charges should we move ?
  274. */
  275. unsigned long move_charge_at_immigrate;
  276. /*
  277. * set > 0 if pages under this cgroup are moving to other cgroup.
  278. */
  279. atomic_t moving_account;
  280. /* taken only while moving_account > 0 */
  281. spinlock_t move_lock;
  282. /*
  283. * percpu counter.
  284. */
  285. struct mem_cgroup_stat_cpu __percpu *stat;
  286. /*
  287. * used when a cpu is offlined or other synchronizations
  288. * See mem_cgroup_read_stat().
  289. */
  290. struct mem_cgroup_stat_cpu nocpu_base;
  291. spinlock_t pcp_counter_lock;
  292. #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
  293. struct tcp_memcontrol tcp_mem;
  294. #endif
  295. #if defined(CONFIG_MEMCG_KMEM)
  296. /* analogous to slab_common's slab_caches list. per-memcg */
  297. struct list_head memcg_slab_caches;
  298. /* Not a spinlock, we can take a lot of time walking the list */
  299. struct mutex slab_caches_mutex;
  300. /* Index in the kmem_cache->memcg_params->memcg_caches array */
  301. int kmemcg_id;
  302. #endif
  303. int last_scanned_node;
  304. #if MAX_NUMNODES > 1
  305. nodemask_t scan_nodes;
  306. atomic_t numainfo_events;
  307. atomic_t numainfo_updating;
  308. #endif
  309. /*
  310. * Per cgroup active and inactive list, similar to the
  311. * per zone LRU lists.
  312. *
  313. * WARNING: This has to be the last element of the struct. Don't
  314. * add new fields after this point.
  315. */
  316. struct mem_cgroup_lru_info info;
  317. };
  318. static size_t memcg_size(void)
  319. {
  320. return sizeof(struct mem_cgroup) +
  321. nr_node_ids * sizeof(struct mem_cgroup_per_node);
  322. }
  323. /* internal only representation about the status of kmem accounting. */
  324. enum {
  325. KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
  326. KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
  327. KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
  328. };
  329. /* We account when limit is on, but only after call sites are patched */
  330. #define KMEM_ACCOUNTED_MASK \
  331. ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
  332. #ifdef CONFIG_MEMCG_KMEM
  333. static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
  334. {
  335. set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
  336. }
  337. static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
  338. {
  339. return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
  340. }
  341. static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
  342. {
  343. set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
  344. }
  345. static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
  346. {
  347. clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
  348. }
  349. static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
  350. {
  351. if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
  352. set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
  353. }
  354. static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
  355. {
  356. return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
  357. &memcg->kmem_account_flags);
  358. }
  359. #endif
  360. /* Stuffs for move charges at task migration. */
  361. /*
  362. * Types of charges to be moved. "move_charge_at_immitgrate" and
  363. * "immigrate_flags" are treated as a left-shifted bitmap of these types.
  364. */
  365. enum move_type {
  366. MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
  367. MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
  368. NR_MOVE_TYPE,
  369. };
  370. /* "mc" and its members are protected by cgroup_mutex */
  371. static struct move_charge_struct {
  372. spinlock_t lock; /* for from, to */
  373. struct mem_cgroup *from;
  374. struct mem_cgroup *to;
  375. unsigned long immigrate_flags;
  376. unsigned long precharge;
  377. unsigned long moved_charge;
  378. unsigned long moved_swap;
  379. struct task_struct *moving_task; /* a task moving charges */
  380. wait_queue_head_t waitq; /* a waitq for other context */
  381. } mc = {
  382. .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
  383. .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
  384. };
  385. static bool move_anon(void)
  386. {
  387. return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
  388. }
  389. static bool move_file(void)
  390. {
  391. return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
  392. }
  393. /*
  394. * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  395. * limit reclaim to prevent infinite loops, if they ever occur.
  396. */
  397. #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
  398. #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
  399. enum charge_type {
  400. MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  401. MEM_CGROUP_CHARGE_TYPE_ANON,
  402. MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
  403. MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
  404. NR_CHARGE_TYPE,
  405. };
  406. /* for encoding cft->private value on file */
  407. enum res_type {
  408. _MEM,
  409. _MEMSWAP,
  410. _OOM_TYPE,
  411. _KMEM,
  412. };
  413. #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
  414. #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
  415. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  416. /* Used for OOM nofiier */
  417. #define OOM_CONTROL (0)
  418. /*
  419. * Reclaim flags for mem_cgroup_hierarchical_reclaim
  420. */
  421. #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
  422. #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
  423. #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
  424. #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
  425. static void mem_cgroup_get(struct mem_cgroup *memcg);
  426. static void mem_cgroup_put(struct mem_cgroup *memcg);
  427. static inline
  428. struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
  429. {
  430. return container_of(s, struct mem_cgroup, css);
  431. }
  432. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  433. {
  434. return (memcg == root_mem_cgroup);
  435. }
  436. /* Writing them here to avoid exposing memcg's inner layout */
  437. #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
  438. void sock_update_memcg(struct sock *sk)
  439. {
  440. if (mem_cgroup_sockets_enabled) {
  441. struct mem_cgroup *memcg;
  442. struct cg_proto *cg_proto;
  443. BUG_ON(!sk->sk_prot->proto_cgroup);
  444. /* Socket cloning can throw us here with sk_cgrp already
  445. * filled. It won't however, necessarily happen from
  446. * process context. So the test for root memcg given
  447. * the current task's memcg won't help us in this case.
  448. *
  449. * Respecting the original socket's memcg is a better
  450. * decision in this case.
  451. */
  452. if (sk->sk_cgrp) {
  453. BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
  454. mem_cgroup_get(sk->sk_cgrp->memcg);
  455. return;
  456. }
  457. rcu_read_lock();
  458. memcg = mem_cgroup_from_task(current);
  459. cg_proto = sk->sk_prot->proto_cgroup(memcg);
  460. if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
  461. mem_cgroup_get(memcg);
  462. sk->sk_cgrp = cg_proto;
  463. }
  464. rcu_read_unlock();
  465. }
  466. }
  467. EXPORT_SYMBOL(sock_update_memcg);
  468. void sock_release_memcg(struct sock *sk)
  469. {
  470. if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
  471. struct mem_cgroup *memcg;
  472. WARN_ON(!sk->sk_cgrp->memcg);
  473. memcg = sk->sk_cgrp->memcg;
  474. mem_cgroup_put(memcg);
  475. }
  476. }
  477. struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
  478. {
  479. if (!memcg || mem_cgroup_is_root(memcg))
  480. return NULL;
  481. return &memcg->tcp_mem.cg_proto;
  482. }
  483. EXPORT_SYMBOL(tcp_proto_cgroup);
  484. static void disarm_sock_keys(struct mem_cgroup *memcg)
  485. {
  486. if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
  487. return;
  488. static_key_slow_dec(&memcg_socket_limit_enabled);
  489. }
  490. #else
  491. static void disarm_sock_keys(struct mem_cgroup *memcg)
  492. {
  493. }
  494. #endif
  495. #ifdef CONFIG_MEMCG_KMEM
  496. /*
  497. * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
  498. * There are two main reasons for not using the css_id for this:
  499. * 1) this works better in sparse environments, where we have a lot of memcgs,
  500. * but only a few kmem-limited. Or also, if we have, for instance, 200
  501. * memcgs, and none but the 200th is kmem-limited, we'd have to have a
  502. * 200 entry array for that.
  503. *
  504. * 2) In order not to violate the cgroup API, we would like to do all memory
  505. * allocation in ->create(). At that point, we haven't yet allocated the
  506. * css_id. Having a separate index prevents us from messing with the cgroup
  507. * core for this
  508. *
  509. * The current size of the caches array is stored in
  510. * memcg_limited_groups_array_size. It will double each time we have to
  511. * increase it.
  512. */
  513. static DEFINE_IDA(kmem_limited_groups);
  514. int memcg_limited_groups_array_size;
  515. /*
  516. * MIN_SIZE is different than 1, because we would like to avoid going through
  517. * the alloc/free process all the time. In a small machine, 4 kmem-limited
  518. * cgroups is a reasonable guess. In the future, it could be a parameter or
  519. * tunable, but that is strictly not necessary.
  520. *
  521. * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
  522. * this constant directly from cgroup, but it is understandable that this is
  523. * better kept as an internal representation in cgroup.c. In any case, the
  524. * css_id space is not getting any smaller, and we don't have to necessarily
  525. * increase ours as well if it increases.
  526. */
  527. #define MEMCG_CACHES_MIN_SIZE 4
  528. #define MEMCG_CACHES_MAX_SIZE 65535
  529. /*
  530. * A lot of the calls to the cache allocation functions are expected to be
  531. * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
  532. * conditional to this static branch, we'll have to allow modules that does
  533. * kmem_cache_alloc and the such to see this symbol as well
  534. */
  535. struct static_key memcg_kmem_enabled_key;
  536. EXPORT_SYMBOL(memcg_kmem_enabled_key);
  537. static void disarm_kmem_keys(struct mem_cgroup *memcg)
  538. {
  539. if (memcg_kmem_is_active(memcg)) {
  540. static_key_slow_dec(&memcg_kmem_enabled_key);
  541. ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
  542. }
  543. /*
  544. * This check can't live in kmem destruction function,
  545. * since the charges will outlive the cgroup
  546. */
  547. WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
  548. }
  549. #else
  550. static void disarm_kmem_keys(struct mem_cgroup *memcg)
  551. {
  552. }
  553. #endif /* CONFIG_MEMCG_KMEM */
  554. static void disarm_static_keys(struct mem_cgroup *memcg)
  555. {
  556. disarm_sock_keys(memcg);
  557. disarm_kmem_keys(memcg);
  558. }
  559. static void drain_all_stock_async(struct mem_cgroup *memcg);
  560. static struct mem_cgroup_per_zone *
  561. mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
  562. {
  563. VM_BUG_ON((unsigned)nid >= nr_node_ids);
  564. return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
  565. }
  566. struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
  567. {
  568. return &memcg->css;
  569. }
  570. static struct mem_cgroup_per_zone *
  571. page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
  572. {
  573. int nid = page_to_nid(page);
  574. int zid = page_zonenum(page);
  575. return mem_cgroup_zoneinfo(memcg, nid, zid);
  576. }
  577. static struct mem_cgroup_tree_per_zone *
  578. soft_limit_tree_node_zone(int nid, int zid)
  579. {
  580. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  581. }
  582. static struct mem_cgroup_tree_per_zone *
  583. soft_limit_tree_from_page(struct page *page)
  584. {
  585. int nid = page_to_nid(page);
  586. int zid = page_zonenum(page);
  587. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  588. }
  589. static void
  590. __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
  591. struct mem_cgroup_per_zone *mz,
  592. struct mem_cgroup_tree_per_zone *mctz,
  593. unsigned long long new_usage_in_excess)
  594. {
  595. struct rb_node **p = &mctz->rb_root.rb_node;
  596. struct rb_node *parent = NULL;
  597. struct mem_cgroup_per_zone *mz_node;
  598. if (mz->on_tree)
  599. return;
  600. mz->usage_in_excess = new_usage_in_excess;
  601. if (!mz->usage_in_excess)
  602. return;
  603. while (*p) {
  604. parent = *p;
  605. mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
  606. tree_node);
  607. if (mz->usage_in_excess < mz_node->usage_in_excess)
  608. p = &(*p)->rb_left;
  609. /*
  610. * We can't avoid mem cgroups that are over their soft
  611. * limit by the same amount
  612. */
  613. else if (mz->usage_in_excess >= mz_node->usage_in_excess)
  614. p = &(*p)->rb_right;
  615. }
  616. rb_link_node(&mz->tree_node, parent, p);
  617. rb_insert_color(&mz->tree_node, &mctz->rb_root);
  618. mz->on_tree = true;
  619. }
  620. static void
  621. __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
  622. struct mem_cgroup_per_zone *mz,
  623. struct mem_cgroup_tree_per_zone *mctz)
  624. {
  625. if (!mz->on_tree)
  626. return;
  627. rb_erase(&mz->tree_node, &mctz->rb_root);
  628. mz->on_tree = false;
  629. }
  630. static void
  631. mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
  632. struct mem_cgroup_per_zone *mz,
  633. struct mem_cgroup_tree_per_zone *mctz)
  634. {
  635. spin_lock(&mctz->lock);
  636. __mem_cgroup_remove_exceeded(memcg, mz, mctz);
  637. spin_unlock(&mctz->lock);
  638. }
  639. static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
  640. {
  641. unsigned long long excess;
  642. struct mem_cgroup_per_zone *mz;
  643. struct mem_cgroup_tree_per_zone *mctz;
  644. int nid = page_to_nid(page);
  645. int zid = page_zonenum(page);
  646. mctz = soft_limit_tree_from_page(page);
  647. /*
  648. * Necessary to update all ancestors when hierarchy is used.
  649. * because their event counter is not touched.
  650. */
  651. for (; memcg; memcg = parent_mem_cgroup(memcg)) {
  652. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  653. excess = res_counter_soft_limit_excess(&memcg->res);
  654. /*
  655. * We have to update the tree if mz is on RB-tree or
  656. * mem is over its softlimit.
  657. */
  658. if (excess || mz->on_tree) {
  659. spin_lock(&mctz->lock);
  660. /* if on-tree, remove it */
  661. if (mz->on_tree)
  662. __mem_cgroup_remove_exceeded(memcg, mz, mctz);
  663. /*
  664. * Insert again. mz->usage_in_excess will be updated.
  665. * If excess is 0, no tree ops.
  666. */
  667. __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
  668. spin_unlock(&mctz->lock);
  669. }
  670. }
  671. }
  672. static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
  673. {
  674. int node, zone;
  675. struct mem_cgroup_per_zone *mz;
  676. struct mem_cgroup_tree_per_zone *mctz;
  677. for_each_node(node) {
  678. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  679. mz = mem_cgroup_zoneinfo(memcg, node, zone);
  680. mctz = soft_limit_tree_node_zone(node, zone);
  681. mem_cgroup_remove_exceeded(memcg, mz, mctz);
  682. }
  683. }
  684. }
  685. static struct mem_cgroup_per_zone *
  686. __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  687. {
  688. struct rb_node *rightmost = NULL;
  689. struct mem_cgroup_per_zone *mz;
  690. retry:
  691. mz = NULL;
  692. rightmost = rb_last(&mctz->rb_root);
  693. if (!rightmost)
  694. goto done; /* Nothing to reclaim from */
  695. mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
  696. /*
  697. * Remove the node now but someone else can add it back,
  698. * we will to add it back at the end of reclaim to its correct
  699. * position in the tree.
  700. */
  701. __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
  702. if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
  703. !css_tryget(&mz->memcg->css))
  704. goto retry;
  705. done:
  706. return mz;
  707. }
  708. static struct mem_cgroup_per_zone *
  709. mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  710. {
  711. struct mem_cgroup_per_zone *mz;
  712. spin_lock(&mctz->lock);
  713. mz = __mem_cgroup_largest_soft_limit_node(mctz);
  714. spin_unlock(&mctz->lock);
  715. return mz;
  716. }
  717. /*
  718. * Implementation Note: reading percpu statistics for memcg.
  719. *
  720. * Both of vmstat[] and percpu_counter has threshold and do periodic
  721. * synchronization to implement "quick" read. There are trade-off between
  722. * reading cost and precision of value. Then, we may have a chance to implement
  723. * a periodic synchronizion of counter in memcg's counter.
  724. *
  725. * But this _read() function is used for user interface now. The user accounts
  726. * memory usage by memory cgroup and he _always_ requires exact value because
  727. * he accounts memory. Even if we provide quick-and-fuzzy read, we always
  728. * have to visit all online cpus and make sum. So, for now, unnecessary
  729. * synchronization is not implemented. (just implemented for cpu hotplug)
  730. *
  731. * If there are kernel internal actions which can make use of some not-exact
  732. * value, and reading all cpu value can be performance bottleneck in some
  733. * common workload, threashold and synchonization as vmstat[] should be
  734. * implemented.
  735. */
  736. static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
  737. enum mem_cgroup_stat_index idx)
  738. {
  739. long val = 0;
  740. int cpu;
  741. get_online_cpus();
  742. for_each_online_cpu(cpu)
  743. val += per_cpu(memcg->stat->count[idx], cpu);
  744. #ifdef CONFIG_HOTPLUG_CPU
  745. spin_lock(&memcg->pcp_counter_lock);
  746. val += memcg->nocpu_base.count[idx];
  747. spin_unlock(&memcg->pcp_counter_lock);
  748. #endif
  749. put_online_cpus();
  750. return val;
  751. }
  752. static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
  753. bool charge)
  754. {
  755. int val = (charge) ? 1 : -1;
  756. this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
  757. }
  758. static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
  759. enum mem_cgroup_events_index idx)
  760. {
  761. unsigned long val = 0;
  762. int cpu;
  763. for_each_online_cpu(cpu)
  764. val += per_cpu(memcg->stat->events[idx], cpu);
  765. #ifdef CONFIG_HOTPLUG_CPU
  766. spin_lock(&memcg->pcp_counter_lock);
  767. val += memcg->nocpu_base.events[idx];
  768. spin_unlock(&memcg->pcp_counter_lock);
  769. #endif
  770. return val;
  771. }
  772. static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
  773. bool anon, int nr_pages)
  774. {
  775. preempt_disable();
  776. /*
  777. * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
  778. * counted as CACHE even if it's on ANON LRU.
  779. */
  780. if (anon)
  781. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
  782. nr_pages);
  783. else
  784. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
  785. nr_pages);
  786. /* pagein of a big page is an event. So, ignore page size */
  787. if (nr_pages > 0)
  788. __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
  789. else {
  790. __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
  791. nr_pages = -nr_pages; /* for event */
  792. }
  793. __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
  794. preempt_enable();
  795. }
  796. unsigned long
  797. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  798. {
  799. struct mem_cgroup_per_zone *mz;
  800. mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
  801. return mz->lru_size[lru];
  802. }
  803. static unsigned long
  804. mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
  805. unsigned int lru_mask)
  806. {
  807. struct mem_cgroup_per_zone *mz;
  808. enum lru_list lru;
  809. unsigned long ret = 0;
  810. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  811. for_each_lru(lru) {
  812. if (BIT(lru) & lru_mask)
  813. ret += mz->lru_size[lru];
  814. }
  815. return ret;
  816. }
  817. static unsigned long
  818. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  819. int nid, unsigned int lru_mask)
  820. {
  821. u64 total = 0;
  822. int zid;
  823. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  824. total += mem_cgroup_zone_nr_lru_pages(memcg,
  825. nid, zid, lru_mask);
  826. return total;
  827. }
  828. static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
  829. unsigned int lru_mask)
  830. {
  831. int nid;
  832. u64 total = 0;
  833. for_each_node_state(nid, N_MEMORY)
  834. total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
  835. return total;
  836. }
  837. static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
  838. enum mem_cgroup_events_target target)
  839. {
  840. unsigned long val, next;
  841. val = __this_cpu_read(memcg->stat->nr_page_events);
  842. next = __this_cpu_read(memcg->stat->targets[target]);
  843. /* from time_after() in jiffies.h */
  844. if ((long)next - (long)val < 0) {
  845. switch (target) {
  846. case MEM_CGROUP_TARGET_THRESH:
  847. next = val + THRESHOLDS_EVENTS_TARGET;
  848. break;
  849. case MEM_CGROUP_TARGET_SOFTLIMIT:
  850. next = val + SOFTLIMIT_EVENTS_TARGET;
  851. break;
  852. case MEM_CGROUP_TARGET_NUMAINFO:
  853. next = val + NUMAINFO_EVENTS_TARGET;
  854. break;
  855. default:
  856. break;
  857. }
  858. __this_cpu_write(memcg->stat->targets[target], next);
  859. return true;
  860. }
  861. return false;
  862. }
  863. /*
  864. * Check events in order.
  865. *
  866. */
  867. static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
  868. {
  869. preempt_disable();
  870. /* threshold event is triggered in finer grain than soft limit */
  871. if (unlikely(mem_cgroup_event_ratelimit(memcg,
  872. MEM_CGROUP_TARGET_THRESH))) {
  873. bool do_softlimit;
  874. bool do_numainfo __maybe_unused;
  875. do_softlimit = mem_cgroup_event_ratelimit(memcg,
  876. MEM_CGROUP_TARGET_SOFTLIMIT);
  877. #if MAX_NUMNODES > 1
  878. do_numainfo = mem_cgroup_event_ratelimit(memcg,
  879. MEM_CGROUP_TARGET_NUMAINFO);
  880. #endif
  881. preempt_enable();
  882. mem_cgroup_threshold(memcg);
  883. if (unlikely(do_softlimit))
  884. mem_cgroup_update_tree(memcg, page);
  885. #if MAX_NUMNODES > 1
  886. if (unlikely(do_numainfo))
  887. atomic_inc(&memcg->numainfo_events);
  888. #endif
  889. } else
  890. preempt_enable();
  891. }
  892. struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
  893. {
  894. return mem_cgroup_from_css(
  895. cgroup_subsys_state(cont, mem_cgroup_subsys_id));
  896. }
  897. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
  898. {
  899. /*
  900. * mm_update_next_owner() may clear mm->owner to NULL
  901. * if it races with swapoff, page migration, etc.
  902. * So this can be called with p == NULL.
  903. */
  904. if (unlikely(!p))
  905. return NULL;
  906. return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
  907. }
  908. struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  909. {
  910. struct mem_cgroup *memcg = NULL;
  911. if (!mm)
  912. return NULL;
  913. /*
  914. * Because we have no locks, mm->owner's may be being moved to other
  915. * cgroup. We use css_tryget() here even if this looks
  916. * pessimistic (rather than adding locks here).
  917. */
  918. rcu_read_lock();
  919. do {
  920. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  921. if (unlikely(!memcg))
  922. break;
  923. } while (!css_tryget(&memcg->css));
  924. rcu_read_unlock();
  925. return memcg;
  926. }
  927. /**
  928. * mem_cgroup_iter - iterate over memory cgroup hierarchy
  929. * @root: hierarchy root
  930. * @prev: previously returned memcg, NULL on first invocation
  931. * @reclaim: cookie for shared reclaim walks, NULL for full walks
  932. *
  933. * Returns references to children of the hierarchy below @root, or
  934. * @root itself, or %NULL after a full round-trip.
  935. *
  936. * Caller must pass the return value in @prev on subsequent
  937. * invocations for reference counting, or use mem_cgroup_iter_break()
  938. * to cancel a hierarchy walk before the round-trip is complete.
  939. *
  940. * Reclaimers can specify a zone and a priority level in @reclaim to
  941. * divide up the memcgs in the hierarchy among all concurrent
  942. * reclaimers operating on the same zone and priority.
  943. */
  944. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
  945. struct mem_cgroup *prev,
  946. struct mem_cgroup_reclaim_cookie *reclaim)
  947. {
  948. struct mem_cgroup *memcg = NULL;
  949. int id = 0;
  950. if (mem_cgroup_disabled())
  951. return NULL;
  952. if (!root)
  953. root = root_mem_cgroup;
  954. if (prev && !reclaim)
  955. id = css_id(&prev->css);
  956. if (prev && prev != root)
  957. css_put(&prev->css);
  958. if (!root->use_hierarchy && root != root_mem_cgroup) {
  959. if (prev)
  960. return NULL;
  961. return root;
  962. }
  963. while (!memcg) {
  964. struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
  965. struct cgroup_subsys_state *css;
  966. if (reclaim) {
  967. int nid = zone_to_nid(reclaim->zone);
  968. int zid = zone_idx(reclaim->zone);
  969. struct mem_cgroup_per_zone *mz;
  970. mz = mem_cgroup_zoneinfo(root, nid, zid);
  971. iter = &mz->reclaim_iter[reclaim->priority];
  972. if (prev && reclaim->generation != iter->generation)
  973. return NULL;
  974. id = iter->position;
  975. }
  976. rcu_read_lock();
  977. css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
  978. if (css) {
  979. if (css == &root->css || css_tryget(css))
  980. memcg = mem_cgroup_from_css(css);
  981. } else
  982. id = 0;
  983. rcu_read_unlock();
  984. if (reclaim) {
  985. iter->position = id;
  986. if (!css)
  987. iter->generation++;
  988. else if (!prev && memcg)
  989. reclaim->generation = iter->generation;
  990. }
  991. if (prev && !css)
  992. return NULL;
  993. }
  994. return memcg;
  995. }
  996. /**
  997. * mem_cgroup_iter_break - abort a hierarchy walk prematurely
  998. * @root: hierarchy root
  999. * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
  1000. */
  1001. void mem_cgroup_iter_break(struct mem_cgroup *root,
  1002. struct mem_cgroup *prev)
  1003. {
  1004. if (!root)
  1005. root = root_mem_cgroup;
  1006. if (prev && prev != root)
  1007. css_put(&prev->css);
  1008. }
  1009. /*
  1010. * Iteration constructs for visiting all cgroups (under a tree). If
  1011. * loops are exited prematurely (break), mem_cgroup_iter_break() must
  1012. * be used for reference counting.
  1013. */
  1014. #define for_each_mem_cgroup_tree(iter, root) \
  1015. for (iter = mem_cgroup_iter(root, NULL, NULL); \
  1016. iter != NULL; \
  1017. iter = mem_cgroup_iter(root, iter, NULL))
  1018. #define for_each_mem_cgroup(iter) \
  1019. for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
  1020. iter != NULL; \
  1021. iter = mem_cgroup_iter(NULL, iter, NULL))
  1022. void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  1023. {
  1024. struct mem_cgroup *memcg;
  1025. rcu_read_lock();
  1026. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  1027. if (unlikely(!memcg))
  1028. goto out;
  1029. switch (idx) {
  1030. case PGFAULT:
  1031. this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
  1032. break;
  1033. case PGMAJFAULT:
  1034. this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
  1035. break;
  1036. default:
  1037. BUG();
  1038. }
  1039. out:
  1040. rcu_read_unlock();
  1041. }
  1042. EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
  1043. /**
  1044. * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
  1045. * @zone: zone of the wanted lruvec
  1046. * @memcg: memcg of the wanted lruvec
  1047. *
  1048. * Returns the lru list vector holding pages for the given @zone and
  1049. * @mem. This can be the global zone lruvec, if the memory controller
  1050. * is disabled.
  1051. */
  1052. struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  1053. struct mem_cgroup *memcg)
  1054. {
  1055. struct mem_cgroup_per_zone *mz;
  1056. struct lruvec *lruvec;
  1057. if (mem_cgroup_disabled()) {
  1058. lruvec = &zone->lruvec;
  1059. goto out;
  1060. }
  1061. mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
  1062. lruvec = &mz->lruvec;
  1063. out:
  1064. /*
  1065. * Since a node can be onlined after the mem_cgroup was created,
  1066. * we have to be prepared to initialize lruvec->zone here;
  1067. * and if offlined then reonlined, we need to reinitialize it.
  1068. */
  1069. if (unlikely(lruvec->zone != zone))
  1070. lruvec->zone = zone;
  1071. return lruvec;
  1072. }
  1073. /*
  1074. * Following LRU functions are allowed to be used without PCG_LOCK.
  1075. * Operations are called by routine of global LRU independently from memcg.
  1076. * What we have to take care of here is validness of pc->mem_cgroup.
  1077. *
  1078. * Changes to pc->mem_cgroup happens when
  1079. * 1. charge
  1080. * 2. moving account
  1081. * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
  1082. * It is added to LRU before charge.
  1083. * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
  1084. * When moving account, the page is not on LRU. It's isolated.
  1085. */
  1086. /**
  1087. * mem_cgroup_page_lruvec - return lruvec for adding an lru page
  1088. * @page: the page
  1089. * @zone: zone of the page
  1090. */
  1091. struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
  1092. {
  1093. struct mem_cgroup_per_zone *mz;
  1094. struct mem_cgroup *memcg;
  1095. struct page_cgroup *pc;
  1096. struct lruvec *lruvec;
  1097. if (mem_cgroup_disabled()) {
  1098. lruvec = &zone->lruvec;
  1099. goto out;
  1100. }
  1101. pc = lookup_page_cgroup(page);
  1102. memcg = pc->mem_cgroup;
  1103. /*
  1104. * Surreptitiously switch any uncharged offlist page to root:
  1105. * an uncharged page off lru does nothing to secure
  1106. * its former mem_cgroup from sudden removal.
  1107. *
  1108. * Our caller holds lru_lock, and PageCgroupUsed is updated
  1109. * under page_cgroup lock: between them, they make all uses
  1110. * of pc->mem_cgroup safe.
  1111. */
  1112. if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
  1113. pc->mem_cgroup = memcg = root_mem_cgroup;
  1114. mz = page_cgroup_zoneinfo(memcg, page);
  1115. lruvec = &mz->lruvec;
  1116. out:
  1117. /*
  1118. * Since a node can be onlined after the mem_cgroup was created,
  1119. * we have to be prepared to initialize lruvec->zone here;
  1120. * and if offlined then reonlined, we need to reinitialize it.
  1121. */
  1122. if (unlikely(lruvec->zone != zone))
  1123. lruvec->zone = zone;
  1124. return lruvec;
  1125. }
  1126. /**
  1127. * mem_cgroup_update_lru_size - account for adding or removing an lru page
  1128. * @lruvec: mem_cgroup per zone lru vector
  1129. * @lru: index of lru list the page is sitting on
  1130. * @nr_pages: positive when adding or negative when removing
  1131. *
  1132. * This function must be called when a page is added to or removed from an
  1133. * lru list.
  1134. */
  1135. void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  1136. int nr_pages)
  1137. {
  1138. struct mem_cgroup_per_zone *mz;
  1139. unsigned long *lru_size;
  1140. if (mem_cgroup_disabled())
  1141. return;
  1142. mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
  1143. lru_size = mz->lru_size + lru;
  1144. *lru_size += nr_pages;
  1145. VM_BUG_ON((long)(*lru_size) < 0);
  1146. }
  1147. /*
  1148. * Checks whether given mem is same or in the root_mem_cgroup's
  1149. * hierarchy subtree
  1150. */
  1151. bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  1152. struct mem_cgroup *memcg)
  1153. {
  1154. if (root_memcg == memcg)
  1155. return true;
  1156. if (!root_memcg->use_hierarchy || !memcg)
  1157. return false;
  1158. return css_is_ancestor(&memcg->css, &root_memcg->css);
  1159. }
  1160. static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  1161. struct mem_cgroup *memcg)
  1162. {
  1163. bool ret;
  1164. rcu_read_lock();
  1165. ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
  1166. rcu_read_unlock();
  1167. return ret;
  1168. }
  1169. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
  1170. {
  1171. int ret;
  1172. struct mem_cgroup *curr = NULL;
  1173. struct task_struct *p;
  1174. p = find_lock_task_mm(task);
  1175. if (p) {
  1176. curr = try_get_mem_cgroup_from_mm(p->mm);
  1177. task_unlock(p);
  1178. } else {
  1179. /*
  1180. * All threads may have already detached their mm's, but the oom
  1181. * killer still needs to detect if they have already been oom
  1182. * killed to prevent needlessly killing additional tasks.
  1183. */
  1184. task_lock(task);
  1185. curr = mem_cgroup_from_task(task);
  1186. if (curr)
  1187. css_get(&curr->css);
  1188. task_unlock(task);
  1189. }
  1190. if (!curr)
  1191. return 0;
  1192. /*
  1193. * We should check use_hierarchy of "memcg" not "curr". Because checking
  1194. * use_hierarchy of "curr" here make this function true if hierarchy is
  1195. * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
  1196. * hierarchy(even if use_hierarchy is disabled in "memcg").
  1197. */
  1198. ret = mem_cgroup_same_or_subtree(memcg, curr);
  1199. css_put(&curr->css);
  1200. return ret;
  1201. }
  1202. int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
  1203. {
  1204. unsigned long inactive_ratio;
  1205. unsigned long inactive;
  1206. unsigned long active;
  1207. unsigned long gb;
  1208. inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
  1209. active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
  1210. gb = (inactive + active) >> (30 - PAGE_SHIFT);
  1211. if (gb)
  1212. inactive_ratio = int_sqrt(10 * gb);
  1213. else
  1214. inactive_ratio = 1;
  1215. return inactive * inactive_ratio < active;
  1216. }
  1217. int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
  1218. {
  1219. unsigned long active;
  1220. unsigned long inactive;
  1221. inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1222. active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
  1223. return (active > inactive);
  1224. }
  1225. #define mem_cgroup_from_res_counter(counter, member) \
  1226. container_of(counter, struct mem_cgroup, member)
  1227. /**
  1228. * mem_cgroup_margin - calculate chargeable space of a memory cgroup
  1229. * @memcg: the memory cgroup
  1230. *
  1231. * Returns the maximum amount of memory @mem can be charged with, in
  1232. * pages.
  1233. */
  1234. static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
  1235. {
  1236. unsigned long long margin;
  1237. margin = res_counter_margin(&memcg->res);
  1238. if (do_swap_account)
  1239. margin = min(margin, res_counter_margin(&memcg->memsw));
  1240. return margin >> PAGE_SHIFT;
  1241. }
  1242. int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  1243. {
  1244. struct cgroup *cgrp = memcg->css.cgroup;
  1245. /* root ? */
  1246. if (cgrp->parent == NULL)
  1247. return vm_swappiness;
  1248. return memcg->swappiness;
  1249. }
  1250. /*
  1251. * memcg->moving_account is used for checking possibility that some thread is
  1252. * calling move_account(). When a thread on CPU-A starts moving pages under
  1253. * a memcg, other threads should check memcg->moving_account under
  1254. * rcu_read_lock(), like this:
  1255. *
  1256. * CPU-A CPU-B
  1257. * rcu_read_lock()
  1258. * memcg->moving_account+1 if (memcg->mocing_account)
  1259. * take heavy locks.
  1260. * synchronize_rcu() update something.
  1261. * rcu_read_unlock()
  1262. * start move here.
  1263. */
  1264. /* for quick checking without looking up memcg */
  1265. atomic_t memcg_moving __read_mostly;
  1266. static void mem_cgroup_start_move(struct mem_cgroup *memcg)
  1267. {
  1268. atomic_inc(&memcg_moving);
  1269. atomic_inc(&memcg->moving_account);
  1270. synchronize_rcu();
  1271. }
  1272. static void mem_cgroup_end_move(struct mem_cgroup *memcg)
  1273. {
  1274. /*
  1275. * Now, mem_cgroup_clear_mc() may call this function with NULL.
  1276. * We check NULL in callee rather than caller.
  1277. */
  1278. if (memcg) {
  1279. atomic_dec(&memcg_moving);
  1280. atomic_dec(&memcg->moving_account);
  1281. }
  1282. }
  1283. /*
  1284. * 2 routines for checking "mem" is under move_account() or not.
  1285. *
  1286. * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
  1287. * is used for avoiding races in accounting. If true,
  1288. * pc->mem_cgroup may be overwritten.
  1289. *
  1290. * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
  1291. * under hierarchy of moving cgroups. This is for
  1292. * waiting at hith-memory prressure caused by "move".
  1293. */
  1294. static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
  1295. {
  1296. VM_BUG_ON(!rcu_read_lock_held());
  1297. return atomic_read(&memcg->moving_account) > 0;
  1298. }
  1299. static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
  1300. {
  1301. struct mem_cgroup *from;
  1302. struct mem_cgroup *to;
  1303. bool ret = false;
  1304. /*
  1305. * Unlike task_move routines, we access mc.to, mc.from not under
  1306. * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
  1307. */
  1308. spin_lock(&mc.lock);
  1309. from = mc.from;
  1310. to = mc.to;
  1311. if (!from)
  1312. goto unlock;
  1313. ret = mem_cgroup_same_or_subtree(memcg, from)
  1314. || mem_cgroup_same_or_subtree(memcg, to);
  1315. unlock:
  1316. spin_unlock(&mc.lock);
  1317. return ret;
  1318. }
  1319. static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
  1320. {
  1321. if (mc.moving_task && current != mc.moving_task) {
  1322. if (mem_cgroup_under_move(memcg)) {
  1323. DEFINE_WAIT(wait);
  1324. prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
  1325. /* moving charge context might have finished. */
  1326. if (mc.moving_task)
  1327. schedule();
  1328. finish_wait(&mc.waitq, &wait);
  1329. return true;
  1330. }
  1331. }
  1332. return false;
  1333. }
  1334. /*
  1335. * Take this lock when
  1336. * - a code tries to modify page's memcg while it's USED.
  1337. * - a code tries to modify page state accounting in a memcg.
  1338. * see mem_cgroup_stolen(), too.
  1339. */
  1340. static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
  1341. unsigned long *flags)
  1342. {
  1343. spin_lock_irqsave(&memcg->move_lock, *flags);
  1344. }
  1345. static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
  1346. unsigned long *flags)
  1347. {
  1348. spin_unlock_irqrestore(&memcg->move_lock, *flags);
  1349. }
  1350. #define K(x) ((x) << (PAGE_SHIFT-10))
  1351. /**
  1352. * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
  1353. * @memcg: The memory cgroup that went over limit
  1354. * @p: Task that is going to be killed
  1355. *
  1356. * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
  1357. * enabled
  1358. */
  1359. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  1360. {
  1361. struct cgroup *task_cgrp;
  1362. struct cgroup *mem_cgrp;
  1363. /*
  1364. * Need a buffer in BSS, can't rely on allocations. The code relies
  1365. * on the assumption that OOM is serialized for memory controller.
  1366. * If this assumption is broken, revisit this code.
  1367. */
  1368. static char memcg_name[PATH_MAX];
  1369. int ret;
  1370. struct mem_cgroup *iter;
  1371. unsigned int i;
  1372. if (!p)
  1373. return;
  1374. rcu_read_lock();
  1375. mem_cgrp = memcg->css.cgroup;
  1376. task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
  1377. ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
  1378. if (ret < 0) {
  1379. /*
  1380. * Unfortunately, we are unable to convert to a useful name
  1381. * But we'll still print out the usage information
  1382. */
  1383. rcu_read_unlock();
  1384. goto done;
  1385. }
  1386. rcu_read_unlock();
  1387. pr_info("Task in %s killed", memcg_name);
  1388. rcu_read_lock();
  1389. ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
  1390. if (ret < 0) {
  1391. rcu_read_unlock();
  1392. goto done;
  1393. }
  1394. rcu_read_unlock();
  1395. /*
  1396. * Continues from above, so we don't need an KERN_ level
  1397. */
  1398. pr_cont(" as a result of limit of %s\n", memcg_name);
  1399. done:
  1400. pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
  1401. res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
  1402. res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
  1403. res_counter_read_u64(&memcg->res, RES_FAILCNT));
  1404. pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
  1405. res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
  1406. res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
  1407. res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
  1408. pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
  1409. res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
  1410. res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
  1411. res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
  1412. for_each_mem_cgroup_tree(iter, memcg) {
  1413. pr_info("Memory cgroup stats");
  1414. rcu_read_lock();
  1415. ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
  1416. if (!ret)
  1417. pr_cont(" for %s", memcg_name);
  1418. rcu_read_unlock();
  1419. pr_cont(":");
  1420. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  1421. if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
  1422. continue;
  1423. pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
  1424. K(mem_cgroup_read_stat(iter, i)));
  1425. }
  1426. for (i = 0; i < NR_LRU_LISTS; i++)
  1427. pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
  1428. K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
  1429. pr_cont("\n");
  1430. }
  1431. }
  1432. /*
  1433. * This function returns the number of memcg under hierarchy tree. Returns
  1434. * 1(self count) if no children.
  1435. */
  1436. static int mem_cgroup_count_children(struct mem_cgroup *memcg)
  1437. {
  1438. int num = 0;
  1439. struct mem_cgroup *iter;
  1440. for_each_mem_cgroup_tree(iter, memcg)
  1441. num++;
  1442. return num;
  1443. }
  1444. /*
  1445. * Return the memory (and swap, if configured) limit for a memcg.
  1446. */
  1447. static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
  1448. {
  1449. u64 limit;
  1450. limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  1451. /*
  1452. * Do not consider swap space if we cannot swap due to swappiness
  1453. */
  1454. if (mem_cgroup_swappiness(memcg)) {
  1455. u64 memsw;
  1456. limit += total_swap_pages << PAGE_SHIFT;
  1457. memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  1458. /*
  1459. * If memsw is finite and limits the amount of swap space
  1460. * available to this memcg, return that limit.
  1461. */
  1462. limit = min(limit, memsw);
  1463. }
  1464. return limit;
  1465. }
  1466. static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
  1467. int order)
  1468. {
  1469. struct mem_cgroup *iter;
  1470. unsigned long chosen_points = 0;
  1471. unsigned long totalpages;
  1472. unsigned int points = 0;
  1473. struct task_struct *chosen = NULL;
  1474. /*
  1475. * If current has a pending SIGKILL, then automatically select it. The
  1476. * goal is to allow it to allocate so that it may quickly exit and free
  1477. * its memory.
  1478. */
  1479. if (fatal_signal_pending(current)) {
  1480. set_thread_flag(TIF_MEMDIE);
  1481. return;
  1482. }
  1483. check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
  1484. totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
  1485. for_each_mem_cgroup_tree(iter, memcg) {
  1486. struct cgroup *cgroup = iter->css.cgroup;
  1487. struct cgroup_iter it;
  1488. struct task_struct *task;
  1489. cgroup_iter_start(cgroup, &it);
  1490. while ((task = cgroup_iter_next(cgroup, &it))) {
  1491. switch (oom_scan_process_thread(task, totalpages, NULL,
  1492. false)) {
  1493. case OOM_SCAN_SELECT:
  1494. if (chosen)
  1495. put_task_struct(chosen);
  1496. chosen = task;
  1497. chosen_points = ULONG_MAX;
  1498. get_task_struct(chosen);
  1499. /* fall through */
  1500. case OOM_SCAN_CONTINUE:
  1501. continue;
  1502. case OOM_SCAN_ABORT:
  1503. cgroup_iter_end(cgroup, &it);
  1504. mem_cgroup_iter_break(memcg, iter);
  1505. if (chosen)
  1506. put_task_struct(chosen);
  1507. return;
  1508. case OOM_SCAN_OK:
  1509. break;
  1510. };
  1511. points = oom_badness(task, memcg, NULL, totalpages);
  1512. if (points > chosen_points) {
  1513. if (chosen)
  1514. put_task_struct(chosen);
  1515. chosen = task;
  1516. chosen_points = points;
  1517. get_task_struct(chosen);
  1518. }
  1519. }
  1520. cgroup_iter_end(cgroup, &it);
  1521. }
  1522. if (!chosen)
  1523. return;
  1524. points = chosen_points * 1000 / totalpages;
  1525. oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
  1526. NULL, "Memory cgroup out of memory");
  1527. }
  1528. static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
  1529. gfp_t gfp_mask,
  1530. unsigned long flags)
  1531. {
  1532. unsigned long total = 0;
  1533. bool noswap = false;
  1534. int loop;
  1535. if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
  1536. noswap = true;
  1537. if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
  1538. noswap = true;
  1539. for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
  1540. if (loop)
  1541. drain_all_stock_async(memcg);
  1542. total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
  1543. /*
  1544. * Allow limit shrinkers, which are triggered directly
  1545. * by userspace, to catch signals and stop reclaim
  1546. * after minimal progress, regardless of the margin.
  1547. */
  1548. if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
  1549. break;
  1550. if (mem_cgroup_margin(memcg))
  1551. break;
  1552. /*
  1553. * If nothing was reclaimed after two attempts, there
  1554. * may be no reclaimable pages in this hierarchy.
  1555. */
  1556. if (loop && !total)
  1557. break;
  1558. }
  1559. return total;
  1560. }
  1561. /**
  1562. * test_mem_cgroup_node_reclaimable
  1563. * @memcg: the target memcg
  1564. * @nid: the node ID to be checked.
  1565. * @noswap : specify true here if the user wants flle only information.
  1566. *
  1567. * This function returns whether the specified memcg contains any
  1568. * reclaimable pages on a node. Returns true if there are any reclaimable
  1569. * pages in the node.
  1570. */
  1571. static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
  1572. int nid, bool noswap)
  1573. {
  1574. if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
  1575. return true;
  1576. if (noswap || !total_swap_pages)
  1577. return false;
  1578. if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
  1579. return true;
  1580. return false;
  1581. }
  1582. #if MAX_NUMNODES > 1
  1583. /*
  1584. * Always updating the nodemask is not very good - even if we have an empty
  1585. * list or the wrong list here, we can start from some node and traverse all
  1586. * nodes based on the zonelist. So update the list loosely once per 10 secs.
  1587. *
  1588. */
  1589. static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
  1590. {
  1591. int nid;
  1592. /*
  1593. * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
  1594. * pagein/pageout changes since the last update.
  1595. */
  1596. if (!atomic_read(&memcg->numainfo_events))
  1597. return;
  1598. if (atomic_inc_return(&memcg->numainfo_updating) > 1)
  1599. return;
  1600. /* make a nodemask where this memcg uses memory from */
  1601. memcg->scan_nodes = node_states[N_MEMORY];
  1602. for_each_node_mask(nid, node_states[N_MEMORY]) {
  1603. if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
  1604. node_clear(nid, memcg->scan_nodes);
  1605. }
  1606. atomic_set(&memcg->numainfo_events, 0);
  1607. atomic_set(&memcg->numainfo_updating, 0);
  1608. }
  1609. /*
  1610. * Selecting a node where we start reclaim from. Because what we need is just
  1611. * reducing usage counter, start from anywhere is O,K. Considering
  1612. * memory reclaim from current node, there are pros. and cons.
  1613. *
  1614. * Freeing memory from current node means freeing memory from a node which
  1615. * we'll use or we've used. So, it may make LRU bad. And if several threads
  1616. * hit limits, it will see a contention on a node. But freeing from remote
  1617. * node means more costs for memory reclaim because of memory latency.
  1618. *
  1619. * Now, we use round-robin. Better algorithm is welcomed.
  1620. */
  1621. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  1622. {
  1623. int node;
  1624. mem_cgroup_may_update_nodemask(memcg);
  1625. node = memcg->last_scanned_node;
  1626. node = next_node(node, memcg->scan_nodes);
  1627. if (node == MAX_NUMNODES)
  1628. node = first_node(memcg->scan_nodes);
  1629. /*
  1630. * We call this when we hit limit, not when pages are added to LRU.
  1631. * No LRU may hold pages because all pages are UNEVICTABLE or
  1632. * memcg is too small and all pages are not on LRU. In that case,
  1633. * we use curret node.
  1634. */
  1635. if (unlikely(node == MAX_NUMNODES))
  1636. node = numa_node_id();
  1637. memcg->last_scanned_node = node;
  1638. return node;
  1639. }
  1640. /*
  1641. * Check all nodes whether it contains reclaimable pages or not.
  1642. * For quick scan, we make use of scan_nodes. This will allow us to skip
  1643. * unused nodes. But scan_nodes is lazily updated and may not cotain
  1644. * enough new information. We need to do double check.
  1645. */
  1646. static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  1647. {
  1648. int nid;
  1649. /*
  1650. * quick check...making use of scan_node.
  1651. * We can skip unused nodes.
  1652. */
  1653. if (!nodes_empty(memcg->scan_nodes)) {
  1654. for (nid = first_node(memcg->scan_nodes);
  1655. nid < MAX_NUMNODES;
  1656. nid = next_node(nid, memcg->scan_nodes)) {
  1657. if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
  1658. return true;
  1659. }
  1660. }
  1661. /*
  1662. * Check rest of nodes.
  1663. */
  1664. for_each_node_state(nid, N_MEMORY) {
  1665. if (node_isset(nid, memcg->scan_nodes))
  1666. continue;
  1667. if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
  1668. return true;
  1669. }
  1670. return false;
  1671. }
  1672. #else
  1673. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  1674. {
  1675. return 0;
  1676. }
  1677. static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  1678. {
  1679. return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
  1680. }
  1681. #endif
  1682. static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
  1683. struct zone *zone,
  1684. gfp_t gfp_mask,
  1685. unsigned long *total_scanned)
  1686. {
  1687. struct mem_cgroup *victim = NULL;
  1688. int total = 0;
  1689. int loop = 0;
  1690. unsigned long excess;
  1691. unsigned long nr_scanned;
  1692. struct mem_cgroup_reclaim_cookie reclaim = {
  1693. .zone = zone,
  1694. .priority = 0,
  1695. };
  1696. excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
  1697. while (1) {
  1698. victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
  1699. if (!victim) {
  1700. loop++;
  1701. if (loop >= 2) {
  1702. /*
  1703. * If we have not been able to reclaim
  1704. * anything, it might because there are
  1705. * no reclaimable pages under this hierarchy
  1706. */
  1707. if (!total)
  1708. break;
  1709. /*
  1710. * We want to do more targeted reclaim.
  1711. * excess >> 2 is not to excessive so as to
  1712. * reclaim too much, nor too less that we keep
  1713. * coming back to reclaim from this cgroup
  1714. */
  1715. if (total >= (excess >> 2) ||
  1716. (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
  1717. break;
  1718. }
  1719. continue;
  1720. }
  1721. if (!mem_cgroup_reclaimable(victim, false))
  1722. continue;
  1723. total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
  1724. zone, &nr_scanned);
  1725. *total_scanned += nr_scanned;
  1726. if (!res_counter_soft_limit_excess(&root_memcg->res))
  1727. break;
  1728. }
  1729. mem_cgroup_iter_break(root_memcg, victim);
  1730. return total;
  1731. }
  1732. /*
  1733. * Check OOM-Killer is already running under our hierarchy.
  1734. * If someone is running, return false.
  1735. * Has to be called with memcg_oom_lock
  1736. */
  1737. static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
  1738. {
  1739. struct mem_cgroup *iter, *failed = NULL;
  1740. for_each_mem_cgroup_tree(iter, memcg) {
  1741. if (iter->oom_lock) {
  1742. /*
  1743. * this subtree of our hierarchy is already locked
  1744. * so we cannot give a lock.
  1745. */
  1746. failed = iter;
  1747. mem_cgroup_iter_break(memcg, iter);
  1748. break;
  1749. } else
  1750. iter->oom_lock = true;
  1751. }
  1752. if (!failed)
  1753. return true;
  1754. /*
  1755. * OK, we failed to lock the whole subtree so we have to clean up
  1756. * what we set up to the failing subtree
  1757. */
  1758. for_each_mem_cgroup_tree(iter, memcg) {
  1759. if (iter == failed) {
  1760. mem_cgroup_iter_break(memcg, iter);
  1761. break;
  1762. }
  1763. iter->oom_lock = false;
  1764. }
  1765. return false;
  1766. }
  1767. /*
  1768. * Has to be called with memcg_oom_lock
  1769. */
  1770. static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
  1771. {
  1772. struct mem_cgroup *iter;
  1773. for_each_mem_cgroup_tree(iter, memcg)
  1774. iter->oom_lock = false;
  1775. return 0;
  1776. }
  1777. static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
  1778. {
  1779. struct mem_cgroup *iter;
  1780. for_each_mem_cgroup_tree(iter, memcg)
  1781. atomic_inc(&iter->under_oom);
  1782. }
  1783. static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
  1784. {
  1785. struct mem_cgroup *iter;
  1786. /*
  1787. * When a new child is created while the hierarchy is under oom,
  1788. * mem_cgroup_oom_lock() may not be called. We have to use
  1789. * atomic_add_unless() here.
  1790. */
  1791. for_each_mem_cgroup_tree(iter, memcg)
  1792. atomic_add_unless(&iter->under_oom, -1, 0);
  1793. }
  1794. static DEFINE_SPINLOCK(memcg_oom_lock);
  1795. static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
  1796. struct oom_wait_info {
  1797. struct mem_cgroup *memcg;
  1798. wait_queue_t wait;
  1799. };
  1800. static int memcg_oom_wake_function(wait_queue_t *wait,
  1801. unsigned mode, int sync, void *arg)
  1802. {
  1803. struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
  1804. struct mem_cgroup *oom_wait_memcg;
  1805. struct oom_wait_info *oom_wait_info;
  1806. oom_wait_info = container_of(wait, struct oom_wait_info, wait);
  1807. oom_wait_memcg = oom_wait_info->memcg;
  1808. /*
  1809. * Both of oom_wait_info->memcg and wake_memcg are stable under us.
  1810. * Then we can use css_is_ancestor without taking care of RCU.
  1811. */
  1812. if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
  1813. && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
  1814. return 0;
  1815. return autoremove_wake_function(wait, mode, sync, arg);
  1816. }
  1817. static void memcg_wakeup_oom(struct mem_cgroup *memcg)
  1818. {
  1819. /* for filtering, pass "memcg" as argument. */
  1820. __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
  1821. }
  1822. static void memcg_oom_recover(struct mem_cgroup *memcg)
  1823. {
  1824. if (memcg && atomic_read(&memcg->under_oom))
  1825. memcg_wakeup_oom(memcg);
  1826. }
  1827. /*
  1828. * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  1829. */
  1830. static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
  1831. int order)
  1832. {
  1833. struct oom_wait_info owait;
  1834. bool locked, need_to_kill;
  1835. owait.memcg = memcg;
  1836. owait.wait.flags = 0;
  1837. owait.wait.func = memcg_oom_wake_function;
  1838. owait.wait.private = current;
  1839. INIT_LIST_HEAD(&owait.wait.task_list);
  1840. need_to_kill = true;
  1841. mem_cgroup_mark_under_oom(memcg);
  1842. /* At first, try to OOM lock hierarchy under memcg.*/
  1843. spin_lock(&memcg_oom_lock);
  1844. locked = mem_cgroup_oom_lock(memcg);
  1845. /*
  1846. * Even if signal_pending(), we can't quit charge() loop without
  1847. * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
  1848. * under OOM is always welcomed, use TASK_KILLABLE here.
  1849. */
  1850. prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
  1851. if (!locked || memcg->oom_kill_disable)
  1852. need_to_kill = false;
  1853. if (locked)
  1854. mem_cgroup_oom_notify(memcg);
  1855. spin_unlock(&memcg_oom_lock);
  1856. if (need_to_kill) {
  1857. finish_wait(&memcg_oom_waitq, &owait.wait);
  1858. mem_cgroup_out_of_memory(memcg, mask, order);
  1859. } else {
  1860. schedule();
  1861. finish_wait(&memcg_oom_waitq, &owait.wait);
  1862. }
  1863. spin_lock(&memcg_oom_lock);
  1864. if (locked)
  1865. mem_cgroup_oom_unlock(memcg);
  1866. memcg_wakeup_oom(memcg);
  1867. spin_unlock(&memcg_oom_lock);
  1868. mem_cgroup_unmark_under_oom(memcg);
  1869. if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
  1870. return false;
  1871. /* Give chance to dying process */
  1872. schedule_timeout_uninterruptible(1);
  1873. return true;
  1874. }
  1875. /*
  1876. * Currently used to update mapped file statistics, but the routine can be
  1877. * generalized to update other statistics as well.
  1878. *
  1879. * Notes: Race condition
  1880. *
  1881. * We usually use page_cgroup_lock() for accessing page_cgroup member but
  1882. * it tends to be costly. But considering some conditions, we doesn't need
  1883. * to do so _always_.
  1884. *
  1885. * Considering "charge", lock_page_cgroup() is not required because all
  1886. * file-stat operations happen after a page is attached to radix-tree. There
  1887. * are no race with "charge".
  1888. *
  1889. * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
  1890. * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
  1891. * if there are race with "uncharge". Statistics itself is properly handled
  1892. * by flags.
  1893. *
  1894. * Considering "move", this is an only case we see a race. To make the race
  1895. * small, we check mm->moving_account and detect there are possibility of race
  1896. * If there is, we take a lock.
  1897. */
  1898. void __mem_cgroup_begin_update_page_stat(struct page *page,
  1899. bool *locked, unsigned long *flags)
  1900. {
  1901. struct mem_cgroup *memcg;
  1902. struct page_cgroup *pc;
  1903. pc = lookup_page_cgroup(page);
  1904. again:
  1905. memcg = pc->mem_cgroup;
  1906. if (unlikely(!memcg || !PageCgroupUsed(pc)))
  1907. return;
  1908. /*
  1909. * If this memory cgroup is not under account moving, we don't
  1910. * need to take move_lock_mem_cgroup(). Because we already hold
  1911. * rcu_read_lock(), any calls to move_account will be delayed until
  1912. * rcu_read_unlock() if mem_cgroup_stolen() == true.
  1913. */
  1914. if (!mem_cgroup_stolen(memcg))
  1915. return;
  1916. move_lock_mem_cgroup(memcg, flags);
  1917. if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
  1918. move_unlock_mem_cgroup(memcg, flags);
  1919. goto again;
  1920. }
  1921. *locked = true;
  1922. }
  1923. void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
  1924. {
  1925. struct page_cgroup *pc = lookup_page_cgroup(page);
  1926. /*
  1927. * It's guaranteed that pc->mem_cgroup never changes while
  1928. * lock is held because a routine modifies pc->mem_cgroup
  1929. * should take move_lock_mem_cgroup().
  1930. */
  1931. move_unlock_mem_cgroup(pc->mem_cgroup, flags);
  1932. }
  1933. void mem_cgroup_update_page_stat(struct page *page,
  1934. enum mem_cgroup_page_stat_item idx, int val)
  1935. {
  1936. struct mem_cgroup *memcg;
  1937. struct page_cgroup *pc = lookup_page_cgroup(page);
  1938. unsigned long uninitialized_var(flags);
  1939. if (mem_cgroup_disabled())
  1940. return;
  1941. memcg = pc->mem_cgroup;
  1942. if (unlikely(!memcg || !PageCgroupUsed(pc)))
  1943. return;
  1944. switch (idx) {
  1945. case MEMCG_NR_FILE_MAPPED:
  1946. idx = MEM_CGROUP_STAT_FILE_MAPPED;
  1947. break;
  1948. default:
  1949. BUG();
  1950. }
  1951. this_cpu_add(memcg->stat->count[idx], val);
  1952. }
  1953. /*
  1954. * size of first charge trial. "32" comes from vmscan.c's magic value.
  1955. * TODO: maybe necessary to use big numbers in big irons.
  1956. */
  1957. #define CHARGE_BATCH 32U
  1958. struct memcg_stock_pcp {
  1959. struct mem_cgroup *cached; /* this never be root cgroup */
  1960. unsigned int nr_pages;
  1961. struct work_struct work;
  1962. unsigned long flags;
  1963. #define FLUSHING_CACHED_CHARGE 0
  1964. };
  1965. static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
  1966. static DEFINE_MUTEX(percpu_charge_mutex);
  1967. /**
  1968. * consume_stock: Try to consume stocked charge on this cpu.
  1969. * @memcg: memcg to consume from.
  1970. * @nr_pages: how many pages to charge.
  1971. *
  1972. * The charges will only happen if @memcg matches the current cpu's memcg
  1973. * stock, and at least @nr_pages are available in that stock. Failure to
  1974. * service an allocation will refill the stock.
  1975. *
  1976. * returns true if successful, false otherwise.
  1977. */
  1978. static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  1979. {
  1980. struct memcg_stock_pcp *stock;
  1981. bool ret = true;
  1982. if (nr_pages > CHARGE_BATCH)
  1983. return false;
  1984. stock = &get_cpu_var(memcg_stock);
  1985. if (memcg == stock->cached && stock->nr_pages >= nr_pages)
  1986. stock->nr_pages -= nr_pages;
  1987. else /* need to call res_counter_charge */
  1988. ret = false;
  1989. put_cpu_var(memcg_stock);
  1990. return ret;
  1991. }
  1992. /*
  1993. * Returns stocks cached in percpu to res_counter and reset cached information.
  1994. */
  1995. static void drain_stock(struct memcg_stock_pcp *stock)
  1996. {
  1997. struct mem_cgroup *old = stock->cached;
  1998. if (stock->nr_pages) {
  1999. unsigned long bytes = stock->nr_pages * PAGE_SIZE;
  2000. res_counter_uncharge(&old->res, bytes);
  2001. if (do_swap_account)
  2002. res_counter_uncharge(&old->memsw, bytes);
  2003. stock->nr_pages = 0;
  2004. }
  2005. stock->cached = NULL;
  2006. }
  2007. /*
  2008. * This must be called under preempt disabled or must be called by
  2009. * a thread which is pinned to local cpu.
  2010. */
  2011. static void drain_local_stock(struct work_struct *dummy)
  2012. {
  2013. struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
  2014. drain_stock(stock);
  2015. clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
  2016. }
  2017. /*
  2018. * Cache charges(val) which is from res_counter, to local per_cpu area.
  2019. * This will be consumed by consume_stock() function, later.
  2020. */
  2021. static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  2022. {
  2023. struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
  2024. if (stock->cached != memcg) { /* reset if necessary */
  2025. drain_stock(stock);
  2026. stock->cached = memcg;
  2027. }
  2028. stock->nr_pages += nr_pages;
  2029. put_cpu_var(memcg_stock);
  2030. }
  2031. /*
  2032. * Drains all per-CPU charge caches for given root_memcg resp. subtree
  2033. * of the hierarchy under it. sync flag says whether we should block
  2034. * until the work is done.
  2035. */
  2036. static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
  2037. {
  2038. int cpu, curcpu;
  2039. /* Notify other cpus that system-wide "drain" is running */
  2040. get_online_cpus();
  2041. curcpu = get_cpu();
  2042. for_each_online_cpu(cpu) {
  2043. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  2044. struct mem_cgroup *memcg;
  2045. memcg = stock->cached;
  2046. if (!memcg || !stock->nr_pages)
  2047. continue;
  2048. if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
  2049. continue;
  2050. if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
  2051. if (cpu == curcpu)
  2052. drain_local_stock(&stock->work);
  2053. else
  2054. schedule_work_on(cpu, &stock->work);
  2055. }
  2056. }
  2057. put_cpu();
  2058. if (!sync)
  2059. goto out;
  2060. for_each_online_cpu(cpu) {
  2061. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  2062. if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
  2063. flush_work(&stock->work);
  2064. }
  2065. out:
  2066. put_online_cpus();
  2067. }
  2068. /*
  2069. * Tries to drain stocked charges in other cpus. This function is asynchronous
  2070. * and just put a work per cpu for draining localy on each cpu. Caller can
  2071. * expects some charges will be back to res_counter later but cannot wait for
  2072. * it.
  2073. */
  2074. static void drain_all_stock_async(struct mem_cgroup *root_memcg)
  2075. {
  2076. /*
  2077. * If someone calls draining, avoid adding more kworker runs.
  2078. */
  2079. if (!mutex_trylock(&percpu_charge_mutex))
  2080. return;
  2081. drain_all_stock(root_memcg, false);
  2082. mutex_unlock(&percpu_charge_mutex);
  2083. }
  2084. /* This is a synchronous drain interface. */
  2085. static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
  2086. {
  2087. /* called when force_empty is called */
  2088. mutex_lock(&percpu_charge_mutex);
  2089. drain_all_stock(root_memcg, true);
  2090. mutex_unlock(&percpu_charge_mutex);
  2091. }
  2092. /*
  2093. * This function drains percpu counter value from DEAD cpu and
  2094. * move it to local cpu. Note that this function can be preempted.
  2095. */
  2096. static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
  2097. {
  2098. int i;
  2099. spin_lock(&memcg->pcp_counter_lock);
  2100. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  2101. long x = per_cpu(memcg->stat->count[i], cpu);
  2102. per_cpu(memcg->stat->count[i], cpu) = 0;
  2103. memcg->nocpu_base.count[i] += x;
  2104. }
  2105. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
  2106. unsigned long x = per_cpu(memcg->stat->events[i], cpu);
  2107. per_cpu(memcg->stat->events[i], cpu) = 0;
  2108. memcg->nocpu_base.events[i] += x;
  2109. }
  2110. spin_unlock(&memcg->pcp_counter_lock);
  2111. }
  2112. static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
  2113. unsigned long action,
  2114. void *hcpu)
  2115. {
  2116. int cpu = (unsigned long)hcpu;
  2117. struct memcg_stock_pcp *stock;
  2118. struct mem_cgroup *iter;
  2119. if (action == CPU_ONLINE)
  2120. return NOTIFY_OK;
  2121. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  2122. return NOTIFY_OK;
  2123. for_each_mem_cgroup(iter)
  2124. mem_cgroup_drain_pcp_counter(iter, cpu);
  2125. stock = &per_cpu(memcg_stock, cpu);
  2126. drain_stock(stock);
  2127. return NOTIFY_OK;
  2128. }
  2129. /* See __mem_cgroup_try_charge() for details */
  2130. enum {
  2131. CHARGE_OK, /* success */
  2132. CHARGE_RETRY, /* need to retry but retry is not bad */
  2133. CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
  2134. CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
  2135. CHARGE_OOM_DIE, /* the current is killed because of OOM */
  2136. };
  2137. static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
  2138. unsigned int nr_pages, unsigned int min_pages,
  2139. bool oom_check)
  2140. {
  2141. unsigned long csize = nr_pages * PAGE_SIZE;
  2142. struct mem_cgroup *mem_over_limit;
  2143. struct res_counter *fail_res;
  2144. unsigned long flags = 0;
  2145. int ret;
  2146. ret = res_counter_charge(&memcg->res, csize, &fail_res);
  2147. if (likely(!ret)) {
  2148. if (!do_swap_account)
  2149. return CHARGE_OK;
  2150. ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
  2151. if (likely(!ret))
  2152. return CHARGE_OK;
  2153. res_counter_uncharge(&memcg->res, csize);
  2154. mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
  2155. flags |= MEM_CGROUP_RECLAIM_NOSWAP;
  2156. } else
  2157. mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
  2158. /*
  2159. * Never reclaim on behalf of optional batching, retry with a
  2160. * single page instead.
  2161. */
  2162. if (nr_pages > min_pages)
  2163. return CHARGE_RETRY;
  2164. if (!(gfp_mask & __GFP_WAIT))
  2165. return CHARGE_WOULDBLOCK;
  2166. if (gfp_mask & __GFP_NORETRY)
  2167. return CHARGE_NOMEM;
  2168. ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
  2169. if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
  2170. return CHARGE_RETRY;
  2171. /*
  2172. * Even though the limit is exceeded at this point, reclaim
  2173. * may have been able to free some pages. Retry the charge
  2174. * before killing the task.
  2175. *
  2176. * Only for regular pages, though: huge pages are rather
  2177. * unlikely to succeed so close to the limit, and we fall back
  2178. * to regular pages anyway in case of failure.
  2179. */
  2180. if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
  2181. return CHARGE_RETRY;
  2182. /*
  2183. * At task move, charge accounts can be doubly counted. So, it's
  2184. * better to wait until the end of task_move if something is going on.
  2185. */
  2186. if (mem_cgroup_wait_acct_move(mem_over_limit))
  2187. return CHARGE_RETRY;
  2188. /* If we don't need to call oom-killer at el, return immediately */
  2189. if (!oom_check)
  2190. return CHARGE_NOMEM;
  2191. /* check OOM */
  2192. if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
  2193. return CHARGE_OOM_DIE;
  2194. return CHARGE_RETRY;
  2195. }
  2196. /*
  2197. * __mem_cgroup_try_charge() does
  2198. * 1. detect memcg to be charged against from passed *mm and *ptr,
  2199. * 2. update res_counter
  2200. * 3. call memory reclaim if necessary.
  2201. *
  2202. * In some special case, if the task is fatal, fatal_signal_pending() or
  2203. * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
  2204. * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
  2205. * as possible without any hazards. 2: all pages should have a valid
  2206. * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
  2207. * pointer, that is treated as a charge to root_mem_cgroup.
  2208. *
  2209. * So __mem_cgroup_try_charge() will return
  2210. * 0 ... on success, filling *ptr with a valid memcg pointer.
  2211. * -ENOMEM ... charge failure because of resource limits.
  2212. * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup.
  2213. *
  2214. * Unlike the exported interface, an "oom" parameter is added. if oom==true,
  2215. * the oom-killer can be invoked.
  2216. */
  2217. static int __mem_cgroup_try_charge(struct mm_struct *mm,
  2218. gfp_t gfp_mask,
  2219. unsigned int nr_pages,
  2220. struct mem_cgroup **ptr,
  2221. bool oom)
  2222. {
  2223. unsigned int batch = max(CHARGE_BATCH, nr_pages);
  2224. int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2225. struct mem_cgroup *memcg = NULL;
  2226. int ret;
  2227. /*
  2228. * Unlike gloval-vm's OOM-kill, we're not in memory shortage
  2229. * in system level. So, allow to go ahead dying process in addition to
  2230. * MEMDIE process.
  2231. */
  2232. if (unlikely(test_thread_flag(TIF_MEMDIE)
  2233. || fatal_signal_pending(current)))
  2234. goto bypass;
  2235. /*
  2236. * We always charge the cgroup the mm_struct belongs to.
  2237. * The mm_struct's mem_cgroup changes on task migration if the
  2238. * thread group leader migrates. It's possible that mm is not
  2239. * set, if so charge the root memcg (happens for pagecache usage).
  2240. */
  2241. if (!*ptr && !mm)
  2242. *ptr = root_mem_cgroup;
  2243. again:
  2244. if (*ptr) { /* css should be a valid one */
  2245. memcg = *ptr;
  2246. if (mem_cgroup_is_root(memcg))
  2247. goto done;
  2248. if (consume_stock(memcg, nr_pages))
  2249. goto done;
  2250. css_get(&memcg->css);
  2251. } else {
  2252. struct task_struct *p;
  2253. rcu_read_lock();
  2254. p = rcu_dereference(mm->owner);
  2255. /*
  2256. * Because we don't have task_lock(), "p" can exit.
  2257. * In that case, "memcg" can point to root or p can be NULL with
  2258. * race with swapoff. Then, we have small risk of mis-accouning.
  2259. * But such kind of mis-account by race always happens because
  2260. * we don't have cgroup_mutex(). It's overkill and we allo that
  2261. * small race, here.
  2262. * (*) swapoff at el will charge against mm-struct not against
  2263. * task-struct. So, mm->owner can be NULL.
  2264. */
  2265. memcg = mem_cgroup_from_task(p);
  2266. if (!memcg)
  2267. memcg = root_mem_cgroup;
  2268. if (mem_cgroup_is_root(memcg)) {
  2269. rcu_read_unlock();
  2270. goto done;
  2271. }
  2272. if (consume_stock(memcg, nr_pages)) {
  2273. /*
  2274. * It seems dagerous to access memcg without css_get().
  2275. * But considering how consume_stok works, it's not
  2276. * necessary. If consume_stock success, some charges
  2277. * from this memcg are cached on this cpu. So, we
  2278. * don't need to call css_get()/css_tryget() before
  2279. * calling consume_stock().
  2280. */
  2281. rcu_read_unlock();
  2282. goto done;
  2283. }
  2284. /* after here, we may be blocked. we need to get refcnt */
  2285. if (!css_tryget(&memcg->css)) {
  2286. rcu_read_unlock();
  2287. goto again;
  2288. }
  2289. rcu_read_unlock();
  2290. }
  2291. do {
  2292. bool oom_check;
  2293. /* If killed, bypass charge */
  2294. if (fatal_signal_pending(current)) {
  2295. css_put(&memcg->css);
  2296. goto bypass;
  2297. }
  2298. oom_check = false;
  2299. if (oom && !nr_oom_retries) {
  2300. oom_check = true;
  2301. nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2302. }
  2303. ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
  2304. oom_check);
  2305. switch (ret) {
  2306. case CHARGE_OK:
  2307. break;
  2308. case CHARGE_RETRY: /* not in OOM situation but retry */
  2309. batch = nr_pages;
  2310. css_put(&memcg->css);
  2311. memcg = NULL;
  2312. goto again;
  2313. case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
  2314. css_put(&memcg->css);
  2315. goto nomem;
  2316. case CHARGE_NOMEM: /* OOM routine works */
  2317. if (!oom) {
  2318. css_put(&memcg->css);
  2319. goto nomem;
  2320. }
  2321. /* If oom, we never return -ENOMEM */
  2322. nr_oom_retries--;
  2323. break;
  2324. case CHARGE_OOM_DIE: /* Killed by OOM Killer */
  2325. css_put(&memcg->css);
  2326. goto bypass;
  2327. }
  2328. } while (ret != CHARGE_OK);
  2329. if (batch > nr_pages)
  2330. refill_stock(memcg, batch - nr_pages);
  2331. css_put(&memcg->css);
  2332. done:
  2333. *ptr = memcg;
  2334. return 0;
  2335. nomem:
  2336. *ptr = NULL;
  2337. return -ENOMEM;
  2338. bypass:
  2339. *ptr = root_mem_cgroup;
  2340. return -EINTR;
  2341. }
  2342. /*
  2343. * Somemtimes we have to undo a charge we got by try_charge().
  2344. * This function is for that and do uncharge, put css's refcnt.
  2345. * gotten by try_charge().
  2346. */
  2347. static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
  2348. unsigned int nr_pages)
  2349. {
  2350. if (!mem_cgroup_is_root(memcg)) {
  2351. unsigned long bytes = nr_pages * PAGE_SIZE;
  2352. res_counter_uncharge(&memcg->res, bytes);
  2353. if (do_swap_account)
  2354. res_counter_uncharge(&memcg->memsw, bytes);
  2355. }
  2356. }
  2357. /*
  2358. * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
  2359. * This is useful when moving usage to parent cgroup.
  2360. */
  2361. static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
  2362. unsigned int nr_pages)
  2363. {
  2364. unsigned long bytes = nr_pages * PAGE_SIZE;
  2365. if (mem_cgroup_is_root(memcg))
  2366. return;
  2367. res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
  2368. if (do_swap_account)
  2369. res_counter_uncharge_until(&memcg->memsw,
  2370. memcg->memsw.parent, bytes);
  2371. }
  2372. /*
  2373. * A helper function to get mem_cgroup from ID. must be called under
  2374. * rcu_read_lock(). The caller is responsible for calling css_tryget if
  2375. * the mem_cgroup is used for charging. (dropping refcnt from swap can be
  2376. * called against removed memcg.)
  2377. */
  2378. static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
  2379. {
  2380. struct cgroup_subsys_state *css;
  2381. /* ID 0 is unused ID */
  2382. if (!id)
  2383. return NULL;
  2384. css = css_lookup(&mem_cgroup_subsys, id);
  2385. if (!css)
  2386. return NULL;
  2387. return mem_cgroup_from_css(css);
  2388. }
  2389. struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  2390. {
  2391. struct mem_cgroup *memcg = NULL;
  2392. struct page_cgroup *pc;
  2393. unsigned short id;
  2394. swp_entry_t ent;
  2395. VM_BUG_ON(!PageLocked(page));
  2396. pc = lookup_page_cgroup(page);
  2397. lock_page_cgroup(pc);
  2398. if (PageCgroupUsed(pc)) {
  2399. memcg = pc->mem_cgroup;
  2400. if (memcg && !css_tryget(&memcg->css))
  2401. memcg = NULL;
  2402. } else if (PageSwapCache(page)) {
  2403. ent.val = page_private(page);
  2404. id = lookup_swap_cgroup_id(ent);
  2405. rcu_read_lock();
  2406. memcg = mem_cgroup_lookup(id);
  2407. if (memcg && !css_tryget(&memcg->css))
  2408. memcg = NULL;
  2409. rcu_read_unlock();
  2410. }
  2411. unlock_page_cgroup(pc);
  2412. return memcg;
  2413. }
  2414. static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
  2415. struct page *page,
  2416. unsigned int nr_pages,
  2417. enum charge_type ctype,
  2418. bool lrucare)
  2419. {
  2420. struct page_cgroup *pc = lookup_page_cgroup(page);
  2421. struct zone *uninitialized_var(zone);
  2422. struct lruvec *lruvec;
  2423. bool was_on_lru = false;
  2424. bool anon;
  2425. lock_page_cgroup(pc);
  2426. VM_BUG_ON(PageCgroupUsed(pc));
  2427. /*
  2428. * we don't need page_cgroup_lock about tail pages, becase they are not
  2429. * accessed by any other context at this point.
  2430. */
  2431. /*
  2432. * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
  2433. * may already be on some other mem_cgroup's LRU. Take care of it.
  2434. */
  2435. if (lrucare) {
  2436. zone = page_zone(page);
  2437. spin_lock_irq(&zone->lru_lock);
  2438. if (PageLRU(page)) {
  2439. lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
  2440. ClearPageLRU(page);
  2441. del_page_from_lru_list(page, lruvec, page_lru(page));
  2442. was_on_lru = true;
  2443. }
  2444. }
  2445. pc->mem_cgroup = memcg;
  2446. /*
  2447. * We access a page_cgroup asynchronously without lock_page_cgroup().
  2448. * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
  2449. * is accessed after testing USED bit. To make pc->mem_cgroup visible
  2450. * before USED bit, we need memory barrier here.
  2451. * See mem_cgroup_add_lru_list(), etc.
  2452. */
  2453. smp_wmb();
  2454. SetPageCgroupUsed(pc);
  2455. if (lrucare) {
  2456. if (was_on_lru) {
  2457. lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
  2458. VM_BUG_ON(PageLRU(page));
  2459. SetPageLRU(page);
  2460. add_page_to_lru_list(page, lruvec, page_lru(page));
  2461. }
  2462. spin_unlock_irq(&zone->lru_lock);
  2463. }
  2464. if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
  2465. anon = true;
  2466. else
  2467. anon = false;
  2468. mem_cgroup_charge_statistics(memcg, anon, nr_pages);
  2469. unlock_page_cgroup(pc);
  2470. /*
  2471. * "charge_statistics" updated event counter. Then, check it.
  2472. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
  2473. * if they exceeds softlimit.
  2474. */
  2475. memcg_check_events(memcg, page);
  2476. }
  2477. static DEFINE_MUTEX(set_limit_mutex);
  2478. #ifdef CONFIG_MEMCG_KMEM
  2479. static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
  2480. {
  2481. return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
  2482. (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
  2483. }
  2484. /*
  2485. * This is a bit cumbersome, but it is rarely used and avoids a backpointer
  2486. * in the memcg_cache_params struct.
  2487. */
  2488. static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
  2489. {
  2490. struct kmem_cache *cachep;
  2491. VM_BUG_ON(p->is_root_cache);
  2492. cachep = p->root_cache;
  2493. return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
  2494. }
  2495. #ifdef CONFIG_SLABINFO
  2496. static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
  2497. struct seq_file *m)
  2498. {
  2499. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  2500. struct memcg_cache_params *params;
  2501. if (!memcg_can_account_kmem(memcg))
  2502. return -EIO;
  2503. print_slabinfo_header(m);
  2504. mutex_lock(&memcg->slab_caches_mutex);
  2505. list_for_each_entry(params, &memcg->memcg_slab_caches, list)
  2506. cache_show(memcg_params_to_cache(params), m);
  2507. mutex_unlock(&memcg->slab_caches_mutex);
  2508. return 0;
  2509. }
  2510. #endif
  2511. static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
  2512. {
  2513. struct res_counter *fail_res;
  2514. struct mem_cgroup *_memcg;
  2515. int ret = 0;
  2516. bool may_oom;
  2517. ret = res_counter_charge(&memcg->kmem, size, &fail_res);
  2518. if (ret)
  2519. return ret;
  2520. /*
  2521. * Conditions under which we can wait for the oom_killer. Those are
  2522. * the same conditions tested by the core page allocator
  2523. */
  2524. may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
  2525. _memcg = memcg;
  2526. ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
  2527. &_memcg, may_oom);
  2528. if (ret == -EINTR) {
  2529. /*
  2530. * __mem_cgroup_try_charge() chosed to bypass to root due to
  2531. * OOM kill or fatal signal. Since our only options are to
  2532. * either fail the allocation or charge it to this cgroup, do
  2533. * it as a temporary condition. But we can't fail. From a
  2534. * kmem/slab perspective, the cache has already been selected,
  2535. * by mem_cgroup_kmem_get_cache(), so it is too late to change
  2536. * our minds.
  2537. *
  2538. * This condition will only trigger if the task entered
  2539. * memcg_charge_kmem in a sane state, but was OOM-killed during
  2540. * __mem_cgroup_try_charge() above. Tasks that were already
  2541. * dying when the allocation triggers should have been already
  2542. * directed to the root cgroup in memcontrol.h
  2543. */
  2544. res_counter_charge_nofail(&memcg->res, size, &fail_res);
  2545. if (do_swap_account)
  2546. res_counter_charge_nofail(&memcg->memsw, size,
  2547. &fail_res);
  2548. ret = 0;
  2549. } else if (ret)
  2550. res_counter_uncharge(&memcg->kmem, size);
  2551. return ret;
  2552. }
  2553. static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
  2554. {
  2555. res_counter_uncharge(&memcg->res, size);
  2556. if (do_swap_account)
  2557. res_counter_uncharge(&memcg->memsw, size);
  2558. /* Not down to 0 */
  2559. if (res_counter_uncharge(&memcg->kmem, size))
  2560. return;
  2561. if (memcg_kmem_test_and_clear_dead(memcg))
  2562. mem_cgroup_put(memcg);
  2563. }
  2564. void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
  2565. {
  2566. if (!memcg)
  2567. return;
  2568. mutex_lock(&memcg->slab_caches_mutex);
  2569. list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
  2570. mutex_unlock(&memcg->slab_caches_mutex);
  2571. }
  2572. /*
  2573. * helper for acessing a memcg's index. It will be used as an index in the
  2574. * child cache array in kmem_cache, and also to derive its name. This function
  2575. * will return -1 when this is not a kmem-limited memcg.
  2576. */
  2577. int memcg_cache_id(struct mem_cgroup *memcg)
  2578. {
  2579. return memcg ? memcg->kmemcg_id : -1;
  2580. }
  2581. /*
  2582. * This ends up being protected by the set_limit mutex, during normal
  2583. * operation, because that is its main call site.
  2584. *
  2585. * But when we create a new cache, we can call this as well if its parent
  2586. * is kmem-limited. That will have to hold set_limit_mutex as well.
  2587. */
  2588. int memcg_update_cache_sizes(struct mem_cgroup *memcg)
  2589. {
  2590. int num, ret;
  2591. num = ida_simple_get(&kmem_limited_groups,
  2592. 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
  2593. if (num < 0)
  2594. return num;
  2595. /*
  2596. * After this point, kmem_accounted (that we test atomically in
  2597. * the beginning of this conditional), is no longer 0. This
  2598. * guarantees only one process will set the following boolean
  2599. * to true. We don't need test_and_set because we're protected
  2600. * by the set_limit_mutex anyway.
  2601. */
  2602. memcg_kmem_set_activated(memcg);
  2603. ret = memcg_update_all_caches(num+1);
  2604. if (ret) {
  2605. ida_simple_remove(&kmem_limited_groups, num);
  2606. memcg_kmem_clear_activated(memcg);
  2607. return ret;
  2608. }
  2609. memcg->kmemcg_id = num;
  2610. INIT_LIST_HEAD(&memcg->memcg_slab_caches);
  2611. mutex_init(&memcg->slab_caches_mutex);
  2612. return 0;
  2613. }
  2614. static size_t memcg_caches_array_size(int num_groups)
  2615. {
  2616. ssize_t size;
  2617. if (num_groups <= 0)
  2618. return 0;
  2619. size = 2 * num_groups;
  2620. if (size < MEMCG_CACHES_MIN_SIZE)
  2621. size = MEMCG_CACHES_MIN_SIZE;
  2622. else if (size > MEMCG_CACHES_MAX_SIZE)
  2623. size = MEMCG_CACHES_MAX_SIZE;
  2624. return size;
  2625. }
  2626. /*
  2627. * We should update the current array size iff all caches updates succeed. This
  2628. * can only be done from the slab side. The slab mutex needs to be held when
  2629. * calling this.
  2630. */
  2631. void memcg_update_array_size(int num)
  2632. {
  2633. if (num > memcg_limited_groups_array_size)
  2634. memcg_limited_groups_array_size = memcg_caches_array_size(num);
  2635. }
  2636. int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
  2637. {
  2638. struct memcg_cache_params *cur_params = s->memcg_params;
  2639. VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
  2640. if (num_groups > memcg_limited_groups_array_size) {
  2641. int i;
  2642. ssize_t size = memcg_caches_array_size(num_groups);
  2643. size *= sizeof(void *);
  2644. size += sizeof(struct memcg_cache_params);
  2645. s->memcg_params = kzalloc(size, GFP_KERNEL);
  2646. if (!s->memcg_params) {
  2647. s->memcg_params = cur_params;
  2648. return -ENOMEM;
  2649. }
  2650. s->memcg_params->is_root_cache = true;
  2651. /*
  2652. * There is the chance it will be bigger than
  2653. * memcg_limited_groups_array_size, if we failed an allocation
  2654. * in a cache, in which case all caches updated before it, will
  2655. * have a bigger array.
  2656. *
  2657. * But if that is the case, the data after
  2658. * memcg_limited_groups_array_size is certainly unused
  2659. */
  2660. for (i = 0; i < memcg_limited_groups_array_size; i++) {
  2661. if (!cur_params->memcg_caches[i])
  2662. continue;
  2663. s->memcg_params->memcg_caches[i] =
  2664. cur_params->memcg_caches[i];
  2665. }
  2666. /*
  2667. * Ideally, we would wait until all caches succeed, and only
  2668. * then free the old one. But this is not worth the extra
  2669. * pointer per-cache we'd have to have for this.
  2670. *
  2671. * It is not a big deal if some caches are left with a size
  2672. * bigger than the others. And all updates will reset this
  2673. * anyway.
  2674. */
  2675. kfree(cur_params);
  2676. }
  2677. return 0;
  2678. }
  2679. int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
  2680. struct kmem_cache *root_cache)
  2681. {
  2682. size_t size = sizeof(struct memcg_cache_params);
  2683. if (!memcg_kmem_enabled())
  2684. return 0;
  2685. if (!memcg)
  2686. size += memcg_limited_groups_array_size * sizeof(void *);
  2687. s->memcg_params = kzalloc(size, GFP_KERNEL);
  2688. if (!s->memcg_params)
  2689. return -ENOMEM;
  2690. if (memcg) {
  2691. s->memcg_params->memcg = memcg;
  2692. s->memcg_params->root_cache = root_cache;
  2693. } else
  2694. s->memcg_params->is_root_cache = true;
  2695. return 0;
  2696. }
  2697. void memcg_release_cache(struct kmem_cache *s)
  2698. {
  2699. struct kmem_cache *root;
  2700. struct mem_cgroup *memcg;
  2701. int id;
  2702. /*
  2703. * This happens, for instance, when a root cache goes away before we
  2704. * add any memcg.
  2705. */
  2706. if (!s->memcg_params)
  2707. return;
  2708. if (s->memcg_params->is_root_cache)
  2709. goto out;
  2710. memcg = s->memcg_params->memcg;
  2711. id = memcg_cache_id(memcg);
  2712. root = s->memcg_params->root_cache;
  2713. root->memcg_params->memcg_caches[id] = NULL;
  2714. mem_cgroup_put(memcg);
  2715. mutex_lock(&memcg->slab_caches_mutex);
  2716. list_del(&s->memcg_params->list);
  2717. mutex_unlock(&memcg->slab_caches_mutex);
  2718. out:
  2719. kfree(s->memcg_params);
  2720. }
  2721. /*
  2722. * During the creation a new cache, we need to disable our accounting mechanism
  2723. * altogether. This is true even if we are not creating, but rather just
  2724. * enqueing new caches to be created.
  2725. *
  2726. * This is because that process will trigger allocations; some visible, like
  2727. * explicit kmallocs to auxiliary data structures, name strings and internal
  2728. * cache structures; some well concealed, like INIT_WORK() that can allocate
  2729. * objects during debug.
  2730. *
  2731. * If any allocation happens during memcg_kmem_get_cache, we will recurse back
  2732. * to it. This may not be a bounded recursion: since the first cache creation
  2733. * failed to complete (waiting on the allocation), we'll just try to create the
  2734. * cache again, failing at the same point.
  2735. *
  2736. * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
  2737. * memcg_kmem_skip_account. So we enclose anything that might allocate memory
  2738. * inside the following two functions.
  2739. */
  2740. static inline void memcg_stop_kmem_account(void)
  2741. {
  2742. VM_BUG_ON(!current->mm);
  2743. current->memcg_kmem_skip_account++;
  2744. }
  2745. static inline void memcg_resume_kmem_account(void)
  2746. {
  2747. VM_BUG_ON(!current->mm);
  2748. current->memcg_kmem_skip_account--;
  2749. }
  2750. static void kmem_cache_destroy_work_func(struct work_struct *w)
  2751. {
  2752. struct kmem_cache *cachep;
  2753. struct memcg_cache_params *p;
  2754. p = container_of(w, struct memcg_cache_params, destroy);
  2755. cachep = memcg_params_to_cache(p);
  2756. /*
  2757. * If we get down to 0 after shrink, we could delete right away.
  2758. * However, memcg_release_pages() already puts us back in the workqueue
  2759. * in that case. If we proceed deleting, we'll get a dangling
  2760. * reference, and removing the object from the workqueue in that case
  2761. * is unnecessary complication. We are not a fast path.
  2762. *
  2763. * Note that this case is fundamentally different from racing with
  2764. * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
  2765. * kmem_cache_shrink, not only we would be reinserting a dead cache
  2766. * into the queue, but doing so from inside the worker racing to
  2767. * destroy it.
  2768. *
  2769. * So if we aren't down to zero, we'll just schedule a worker and try
  2770. * again
  2771. */
  2772. if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
  2773. kmem_cache_shrink(cachep);
  2774. if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
  2775. return;
  2776. } else
  2777. kmem_cache_destroy(cachep);
  2778. }
  2779. void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
  2780. {
  2781. if (!cachep->memcg_params->dead)
  2782. return;
  2783. /*
  2784. * There are many ways in which we can get here.
  2785. *
  2786. * We can get to a memory-pressure situation while the delayed work is
  2787. * still pending to run. The vmscan shrinkers can then release all
  2788. * cache memory and get us to destruction. If this is the case, we'll
  2789. * be executed twice, which is a bug (the second time will execute over
  2790. * bogus data). In this case, cancelling the work should be fine.
  2791. *
  2792. * But we can also get here from the worker itself, if
  2793. * kmem_cache_shrink is enough to shake all the remaining objects and
  2794. * get the page count to 0. In this case, we'll deadlock if we try to
  2795. * cancel the work (the worker runs with an internal lock held, which
  2796. * is the same lock we would hold for cancel_work_sync().)
  2797. *
  2798. * Since we can't possibly know who got us here, just refrain from
  2799. * running if there is already work pending
  2800. */
  2801. if (work_pending(&cachep->memcg_params->destroy))
  2802. return;
  2803. /*
  2804. * We have to defer the actual destroying to a workqueue, because
  2805. * we might currently be in a context that cannot sleep.
  2806. */
  2807. schedule_work(&cachep->memcg_params->destroy);
  2808. }
  2809. static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
  2810. {
  2811. char *name;
  2812. struct dentry *dentry;
  2813. rcu_read_lock();
  2814. dentry = rcu_dereference(memcg->css.cgroup->dentry);
  2815. rcu_read_unlock();
  2816. BUG_ON(dentry == NULL);
  2817. name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
  2818. memcg_cache_id(memcg), dentry->d_name.name);
  2819. return name;
  2820. }
  2821. static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
  2822. struct kmem_cache *s)
  2823. {
  2824. char *name;
  2825. struct kmem_cache *new;
  2826. name = memcg_cache_name(memcg, s);
  2827. if (!name)
  2828. return NULL;
  2829. new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
  2830. (s->flags & ~SLAB_PANIC), s->ctor, s);
  2831. if (new)
  2832. new->allocflags |= __GFP_KMEMCG;
  2833. kfree(name);
  2834. return new;
  2835. }
  2836. /*
  2837. * This lock protects updaters, not readers. We want readers to be as fast as
  2838. * they can, and they will either see NULL or a valid cache value. Our model
  2839. * allow them to see NULL, in which case the root memcg will be selected.
  2840. *
  2841. * We need this lock because multiple allocations to the same cache from a non
  2842. * will span more than one worker. Only one of them can create the cache.
  2843. */
  2844. static DEFINE_MUTEX(memcg_cache_mutex);
  2845. static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
  2846. struct kmem_cache *cachep)
  2847. {
  2848. struct kmem_cache *new_cachep;
  2849. int idx;
  2850. BUG_ON(!memcg_can_account_kmem(memcg));
  2851. idx = memcg_cache_id(memcg);
  2852. mutex_lock(&memcg_cache_mutex);
  2853. new_cachep = cachep->memcg_params->memcg_caches[idx];
  2854. if (new_cachep)
  2855. goto out;
  2856. new_cachep = kmem_cache_dup(memcg, cachep);
  2857. if (new_cachep == NULL) {
  2858. new_cachep = cachep;
  2859. goto out;
  2860. }
  2861. mem_cgroup_get(memcg);
  2862. atomic_set(&new_cachep->memcg_params->nr_pages , 0);
  2863. cachep->memcg_params->memcg_caches[idx] = new_cachep;
  2864. /*
  2865. * the readers won't lock, make sure everybody sees the updated value,
  2866. * so they won't put stuff in the queue again for no reason
  2867. */
  2868. wmb();
  2869. out:
  2870. mutex_unlock(&memcg_cache_mutex);
  2871. return new_cachep;
  2872. }
  2873. void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
  2874. {
  2875. struct kmem_cache *c;
  2876. int i;
  2877. if (!s->memcg_params)
  2878. return;
  2879. if (!s->memcg_params->is_root_cache)
  2880. return;
  2881. /*
  2882. * If the cache is being destroyed, we trust that there is no one else
  2883. * requesting objects from it. Even if there are, the sanity checks in
  2884. * kmem_cache_destroy should caught this ill-case.
  2885. *
  2886. * Still, we don't want anyone else freeing memcg_caches under our
  2887. * noses, which can happen if a new memcg comes to life. As usual,
  2888. * we'll take the set_limit_mutex to protect ourselves against this.
  2889. */
  2890. mutex_lock(&set_limit_mutex);
  2891. for (i = 0; i < memcg_limited_groups_array_size; i++) {
  2892. c = s->memcg_params->memcg_caches[i];
  2893. if (!c)
  2894. continue;
  2895. /*
  2896. * We will now manually delete the caches, so to avoid races
  2897. * we need to cancel all pending destruction workers and
  2898. * proceed with destruction ourselves.
  2899. *
  2900. * kmem_cache_destroy() will call kmem_cache_shrink internally,
  2901. * and that could spawn the workers again: it is likely that
  2902. * the cache still have active pages until this very moment.
  2903. * This would lead us back to mem_cgroup_destroy_cache.
  2904. *
  2905. * But that will not execute at all if the "dead" flag is not
  2906. * set, so flip it down to guarantee we are in control.
  2907. */
  2908. c->memcg_params->dead = false;
  2909. cancel_work_sync(&c->memcg_params->destroy);
  2910. kmem_cache_destroy(c);
  2911. }
  2912. mutex_unlock(&set_limit_mutex);
  2913. }
  2914. struct create_work {
  2915. struct mem_cgroup *memcg;
  2916. struct kmem_cache *cachep;
  2917. struct work_struct work;
  2918. };
  2919. static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
  2920. {
  2921. struct kmem_cache *cachep;
  2922. struct memcg_cache_params *params;
  2923. if (!memcg_kmem_is_active(memcg))
  2924. return;
  2925. mutex_lock(&memcg->slab_caches_mutex);
  2926. list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
  2927. cachep = memcg_params_to_cache(params);
  2928. cachep->memcg_params->dead = true;
  2929. INIT_WORK(&cachep->memcg_params->destroy,
  2930. kmem_cache_destroy_work_func);
  2931. schedule_work(&cachep->memcg_params->destroy);
  2932. }
  2933. mutex_unlock(&memcg->slab_caches_mutex);
  2934. }
  2935. static void memcg_create_cache_work_func(struct work_struct *w)
  2936. {
  2937. struct create_work *cw;
  2938. cw = container_of(w, struct create_work, work);
  2939. memcg_create_kmem_cache(cw->memcg, cw->cachep);
  2940. /* Drop the reference gotten when we enqueued. */
  2941. css_put(&cw->memcg->css);
  2942. kfree(cw);
  2943. }
  2944. /*
  2945. * Enqueue the creation of a per-memcg kmem_cache.
  2946. * Called with rcu_read_lock.
  2947. */
  2948. static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
  2949. struct kmem_cache *cachep)
  2950. {
  2951. struct create_work *cw;
  2952. cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
  2953. if (cw == NULL)
  2954. return;
  2955. /* The corresponding put will be done in the workqueue. */
  2956. if (!css_tryget(&memcg->css)) {
  2957. kfree(cw);
  2958. return;
  2959. }
  2960. cw->memcg = memcg;
  2961. cw->cachep = cachep;
  2962. INIT_WORK(&cw->work, memcg_create_cache_work_func);
  2963. schedule_work(&cw->work);
  2964. }
  2965. static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
  2966. struct kmem_cache *cachep)
  2967. {
  2968. /*
  2969. * We need to stop accounting when we kmalloc, because if the
  2970. * corresponding kmalloc cache is not yet created, the first allocation
  2971. * in __memcg_create_cache_enqueue will recurse.
  2972. *
  2973. * However, it is better to enclose the whole function. Depending on
  2974. * the debugging options enabled, INIT_WORK(), for instance, can
  2975. * trigger an allocation. This too, will make us recurse. Because at
  2976. * this point we can't allow ourselves back into memcg_kmem_get_cache,
  2977. * the safest choice is to do it like this, wrapping the whole function.
  2978. */
  2979. memcg_stop_kmem_account();
  2980. __memcg_create_cache_enqueue(memcg, cachep);
  2981. memcg_resume_kmem_account();
  2982. }
  2983. /*
  2984. * Return the kmem_cache we're supposed to use for a slab allocation.
  2985. * We try to use the current memcg's version of the cache.
  2986. *
  2987. * If the cache does not exist yet, if we are the first user of it,
  2988. * we either create it immediately, if possible, or create it asynchronously
  2989. * in a workqueue.
  2990. * In the latter case, we will let the current allocation go through with
  2991. * the original cache.
  2992. *
  2993. * Can't be called in interrupt context or from kernel threads.
  2994. * This function needs to be called with rcu_read_lock() held.
  2995. */
  2996. struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
  2997. gfp_t gfp)
  2998. {
  2999. struct mem_cgroup *memcg;
  3000. int idx;
  3001. VM_BUG_ON(!cachep->memcg_params);
  3002. VM_BUG_ON(!cachep->memcg_params->is_root_cache);
  3003. if (!current->mm || current->memcg_kmem_skip_account)
  3004. return cachep;
  3005. rcu_read_lock();
  3006. memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
  3007. rcu_read_unlock();
  3008. if (!memcg_can_account_kmem(memcg))
  3009. return cachep;
  3010. idx = memcg_cache_id(memcg);
  3011. /*
  3012. * barrier to mare sure we're always seeing the up to date value. The
  3013. * code updating memcg_caches will issue a write barrier to match this.
  3014. */
  3015. read_barrier_depends();
  3016. if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
  3017. /*
  3018. * If we are in a safe context (can wait, and not in interrupt
  3019. * context), we could be be predictable and return right away.
  3020. * This would guarantee that the allocation being performed
  3021. * already belongs in the new cache.
  3022. *
  3023. * However, there are some clashes that can arrive from locking.
  3024. * For instance, because we acquire the slab_mutex while doing
  3025. * kmem_cache_dup, this means no further allocation could happen
  3026. * with the slab_mutex held.
  3027. *
  3028. * Also, because cache creation issue get_online_cpus(), this
  3029. * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
  3030. * that ends up reversed during cpu hotplug. (cpuset allocates
  3031. * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
  3032. * better to defer everything.
  3033. */
  3034. memcg_create_cache_enqueue(memcg, cachep);
  3035. return cachep;
  3036. }
  3037. return cachep->memcg_params->memcg_caches[idx];
  3038. }
  3039. EXPORT_SYMBOL(__memcg_kmem_get_cache);
  3040. /*
  3041. * We need to verify if the allocation against current->mm->owner's memcg is
  3042. * possible for the given order. But the page is not allocated yet, so we'll
  3043. * need a further commit step to do the final arrangements.
  3044. *
  3045. * It is possible for the task to switch cgroups in this mean time, so at
  3046. * commit time, we can't rely on task conversion any longer. We'll then use
  3047. * the handle argument to return to the caller which cgroup we should commit
  3048. * against. We could also return the memcg directly and avoid the pointer
  3049. * passing, but a boolean return value gives better semantics considering
  3050. * the compiled-out case as well.
  3051. *
  3052. * Returning true means the allocation is possible.
  3053. */
  3054. bool
  3055. __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
  3056. {
  3057. struct mem_cgroup *memcg;
  3058. int ret;
  3059. *_memcg = NULL;
  3060. memcg = try_get_mem_cgroup_from_mm(current->mm);
  3061. /*
  3062. * very rare case described in mem_cgroup_from_task. Unfortunately there
  3063. * isn't much we can do without complicating this too much, and it would
  3064. * be gfp-dependent anyway. Just let it go
  3065. */
  3066. if (unlikely(!memcg))
  3067. return true;
  3068. if (!memcg_can_account_kmem(memcg)) {
  3069. css_put(&memcg->css);
  3070. return true;
  3071. }
  3072. ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
  3073. if (!ret)
  3074. *_memcg = memcg;
  3075. css_put(&memcg->css);
  3076. return (ret == 0);
  3077. }
  3078. void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
  3079. int order)
  3080. {
  3081. struct page_cgroup *pc;
  3082. VM_BUG_ON(mem_cgroup_is_root(memcg));
  3083. /* The page allocation failed. Revert */
  3084. if (!page) {
  3085. memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
  3086. return;
  3087. }
  3088. pc = lookup_page_cgroup(page);
  3089. lock_page_cgroup(pc);
  3090. pc->mem_cgroup = memcg;
  3091. SetPageCgroupUsed(pc);
  3092. unlock_page_cgroup(pc);
  3093. }
  3094. void __memcg_kmem_uncharge_pages(struct page *page, int order)
  3095. {
  3096. struct mem_cgroup *memcg = NULL;
  3097. struct page_cgroup *pc;
  3098. pc = lookup_page_cgroup(page);
  3099. /*
  3100. * Fast unlocked return. Theoretically might have changed, have to
  3101. * check again after locking.
  3102. */
  3103. if (!PageCgroupUsed(pc))
  3104. return;
  3105. lock_page_cgroup(pc);
  3106. if (PageCgroupUsed(pc)) {
  3107. memcg = pc->mem_cgroup;
  3108. ClearPageCgroupUsed(pc);
  3109. }
  3110. unlock_page_cgroup(pc);
  3111. /*
  3112. * We trust that only if there is a memcg associated with the page, it
  3113. * is a valid allocation
  3114. */
  3115. if (!memcg)
  3116. return;
  3117. VM_BUG_ON(mem_cgroup_is_root(memcg));
  3118. memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
  3119. }
  3120. #else
  3121. static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
  3122. {
  3123. }
  3124. #endif /* CONFIG_MEMCG_KMEM */
  3125. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3126. #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
  3127. /*
  3128. * Because tail pages are not marked as "used", set it. We're under
  3129. * zone->lru_lock, 'splitting on pmd' and compound_lock.
  3130. * charge/uncharge will be never happen and move_account() is done under
  3131. * compound_lock(), so we don't have to take care of races.
  3132. */
  3133. void mem_cgroup_split_huge_fixup(struct page *head)
  3134. {
  3135. struct page_cgroup *head_pc = lookup_page_cgroup(head);
  3136. struct page_cgroup *pc;
  3137. int i;
  3138. if (mem_cgroup_disabled())
  3139. return;
  3140. for (i = 1; i < HPAGE_PMD_NR; i++) {
  3141. pc = head_pc + i;
  3142. pc->mem_cgroup = head_pc->mem_cgroup;
  3143. smp_wmb();/* see __commit_charge() */
  3144. pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
  3145. }
  3146. }
  3147. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3148. /**
  3149. * mem_cgroup_move_account - move account of the page
  3150. * @page: the page
  3151. * @nr_pages: number of regular pages (>1 for huge pages)
  3152. * @pc: page_cgroup of the page.
  3153. * @from: mem_cgroup which the page is moved from.
  3154. * @to: mem_cgroup which the page is moved to. @from != @to.
  3155. *
  3156. * The caller must confirm following.
  3157. * - page is not on LRU (isolate_page() is useful.)
  3158. * - compound_lock is held when nr_pages > 1
  3159. *
  3160. * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
  3161. * from old cgroup.
  3162. */
  3163. static int mem_cgroup_move_account(struct page *page,
  3164. unsigned int nr_pages,
  3165. struct page_cgroup *pc,
  3166. struct mem_cgroup *from,
  3167. struct mem_cgroup *to)
  3168. {
  3169. unsigned long flags;
  3170. int ret;
  3171. bool anon = PageAnon(page);
  3172. VM_BUG_ON(from == to);
  3173. VM_BUG_ON(PageLRU(page));
  3174. /*
  3175. * The page is isolated from LRU. So, collapse function
  3176. * will not handle this page. But page splitting can happen.
  3177. * Do this check under compound_page_lock(). The caller should
  3178. * hold it.
  3179. */
  3180. ret = -EBUSY;
  3181. if (nr_pages > 1 && !PageTransHuge(page))
  3182. goto out;
  3183. lock_page_cgroup(pc);
  3184. ret = -EINVAL;
  3185. if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
  3186. goto unlock;
  3187. move_lock_mem_cgroup(from, &flags);
  3188. if (!anon && page_mapped(page)) {
  3189. /* Update mapped_file data for mem_cgroup */
  3190. preempt_disable();
  3191. __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  3192. __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  3193. preempt_enable();
  3194. }
  3195. mem_cgroup_charge_statistics(from, anon, -nr_pages);
  3196. /* caller should have done css_get */
  3197. pc->mem_cgroup = to;
  3198. mem_cgroup_charge_statistics(to, anon, nr_pages);
  3199. move_unlock_mem_cgroup(from, &flags);
  3200. ret = 0;
  3201. unlock:
  3202. unlock_page_cgroup(pc);
  3203. /*
  3204. * check events
  3205. */
  3206. memcg_check_events(to, page);
  3207. memcg_check_events(from, page);
  3208. out:
  3209. return ret;
  3210. }
  3211. /**
  3212. * mem_cgroup_move_parent - moves page to the parent group
  3213. * @page: the page to move
  3214. * @pc: page_cgroup of the page
  3215. * @child: page's cgroup
  3216. *
  3217. * move charges to its parent or the root cgroup if the group has no
  3218. * parent (aka use_hierarchy==0).
  3219. * Although this might fail (get_page_unless_zero, isolate_lru_page or
  3220. * mem_cgroup_move_account fails) the failure is always temporary and
  3221. * it signals a race with a page removal/uncharge or migration. In the
  3222. * first case the page is on the way out and it will vanish from the LRU
  3223. * on the next attempt and the call should be retried later.
  3224. * Isolation from the LRU fails only if page has been isolated from
  3225. * the LRU since we looked at it and that usually means either global
  3226. * reclaim or migration going on. The page will either get back to the
  3227. * LRU or vanish.
  3228. * Finaly mem_cgroup_move_account fails only if the page got uncharged
  3229. * (!PageCgroupUsed) or moved to a different group. The page will
  3230. * disappear in the next attempt.
  3231. */
  3232. static int mem_cgroup_move_parent(struct page *page,
  3233. struct page_cgroup *pc,
  3234. struct mem_cgroup *child)
  3235. {
  3236. struct mem_cgroup *parent;
  3237. unsigned int nr_pages;
  3238. unsigned long uninitialized_var(flags);
  3239. int ret;
  3240. VM_BUG_ON(mem_cgroup_is_root(child));
  3241. ret = -EBUSY;
  3242. if (!get_page_unless_zero(page))
  3243. goto out;
  3244. if (isolate_lru_page(page))
  3245. goto put;
  3246. nr_pages = hpage_nr_pages(page);
  3247. parent = parent_mem_cgroup(child);
  3248. /*
  3249. * If no parent, move charges to root cgroup.
  3250. */
  3251. if (!parent)
  3252. parent = root_mem_cgroup;
  3253. if (nr_pages > 1) {
  3254. VM_BUG_ON(!PageTransHuge(page));
  3255. flags = compound_lock_irqsave(page);
  3256. }
  3257. ret = mem_cgroup_move_account(page, nr_pages,
  3258. pc, child, parent);
  3259. if (!ret)
  3260. __mem_cgroup_cancel_local_charge(child, nr_pages);
  3261. if (nr_pages > 1)
  3262. compound_unlock_irqrestore(page, flags);
  3263. putback_lru_page(page);
  3264. put:
  3265. put_page(page);
  3266. out:
  3267. return ret;
  3268. }
  3269. /*
  3270. * Charge the memory controller for page usage.
  3271. * Return
  3272. * 0 if the charge was successful
  3273. * < 0 if the cgroup is over its limit
  3274. */
  3275. static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
  3276. gfp_t gfp_mask, enum charge_type ctype)
  3277. {
  3278. struct mem_cgroup *memcg = NULL;
  3279. unsigned int nr_pages = 1;
  3280. bool oom = true;
  3281. int ret;
  3282. if (PageTransHuge(page)) {
  3283. nr_pages <<= compound_order(page);
  3284. VM_BUG_ON(!PageTransHuge(page));
  3285. /*
  3286. * Never OOM-kill a process for a huge page. The
  3287. * fault handler will fall back to regular pages.
  3288. */
  3289. oom = false;
  3290. }
  3291. ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
  3292. if (ret == -ENOMEM)
  3293. return ret;
  3294. __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
  3295. return 0;
  3296. }
  3297. int mem_cgroup_newpage_charge(struct page *page,
  3298. struct mm_struct *mm, gfp_t gfp_mask)
  3299. {
  3300. if (mem_cgroup_disabled())
  3301. return 0;
  3302. VM_BUG_ON(page_mapped(page));
  3303. VM_BUG_ON(page->mapping && !PageAnon(page));
  3304. VM_BUG_ON(!mm);
  3305. return mem_cgroup_charge_common(page, mm, gfp_mask,
  3306. MEM_CGROUP_CHARGE_TYPE_ANON);
  3307. }
  3308. /*
  3309. * While swap-in, try_charge -> commit or cancel, the page is locked.
  3310. * And when try_charge() successfully returns, one refcnt to memcg without
  3311. * struct page_cgroup is acquired. This refcnt will be consumed by
  3312. * "commit()" or removed by "cancel()"
  3313. */
  3314. static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  3315. struct page *page,
  3316. gfp_t mask,
  3317. struct mem_cgroup **memcgp)
  3318. {
  3319. struct mem_cgroup *memcg;
  3320. struct page_cgroup *pc;
  3321. int ret;
  3322. pc = lookup_page_cgroup(page);
  3323. /*
  3324. * Every swap fault against a single page tries to charge the
  3325. * page, bail as early as possible. shmem_unuse() encounters
  3326. * already charged pages, too. The USED bit is protected by
  3327. * the page lock, which serializes swap cache removal, which
  3328. * in turn serializes uncharging.
  3329. */
  3330. if (PageCgroupUsed(pc))
  3331. return 0;
  3332. if (!do_swap_account)
  3333. goto charge_cur_mm;
  3334. memcg = try_get_mem_cgroup_from_page(page);
  3335. if (!memcg)
  3336. goto charge_cur_mm;
  3337. *memcgp = memcg;
  3338. ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
  3339. css_put(&memcg->css);
  3340. if (ret == -EINTR)
  3341. ret = 0;
  3342. return ret;
  3343. charge_cur_mm:
  3344. ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
  3345. if (ret == -EINTR)
  3346. ret = 0;
  3347. return ret;
  3348. }
  3349. int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
  3350. gfp_t gfp_mask, struct mem_cgroup **memcgp)
  3351. {
  3352. *memcgp = NULL;
  3353. if (mem_cgroup_disabled())
  3354. return 0;
  3355. /*
  3356. * A racing thread's fault, or swapoff, may have already
  3357. * updated the pte, and even removed page from swap cache: in
  3358. * those cases unuse_pte()'s pte_same() test will fail; but
  3359. * there's also a KSM case which does need to charge the page.
  3360. */
  3361. if (!PageSwapCache(page)) {
  3362. int ret;
  3363. ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
  3364. if (ret == -EINTR)
  3365. ret = 0;
  3366. return ret;
  3367. }
  3368. return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
  3369. }
  3370. void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
  3371. {
  3372. if (mem_cgroup_disabled())
  3373. return;
  3374. if (!memcg)
  3375. return;
  3376. __mem_cgroup_cancel_charge(memcg, 1);
  3377. }
  3378. static void
  3379. __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
  3380. enum charge_type ctype)
  3381. {
  3382. if (mem_cgroup_disabled())
  3383. return;
  3384. if (!memcg)
  3385. return;
  3386. __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
  3387. /*
  3388. * Now swap is on-memory. This means this page may be
  3389. * counted both as mem and swap....double count.
  3390. * Fix it by uncharging from memsw. Basically, this SwapCache is stable
  3391. * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
  3392. * may call delete_from_swap_cache() before reach here.
  3393. */
  3394. if (do_swap_account && PageSwapCache(page)) {
  3395. swp_entry_t ent = {.val = page_private(page)};
  3396. mem_cgroup_uncharge_swap(ent);
  3397. }
  3398. }
  3399. void mem_cgroup_commit_charge_swapin(struct page *page,
  3400. struct mem_cgroup *memcg)
  3401. {
  3402. __mem_cgroup_commit_charge_swapin(page, memcg,
  3403. MEM_CGROUP_CHARGE_TYPE_ANON);
  3404. }
  3405. int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  3406. gfp_t gfp_mask)
  3407. {
  3408. struct mem_cgroup *memcg = NULL;
  3409. enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
  3410. int ret;
  3411. if (mem_cgroup_disabled())
  3412. return 0;
  3413. if (PageCompound(page))
  3414. return 0;
  3415. if (!PageSwapCache(page))
  3416. ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
  3417. else { /* page is swapcache/shmem */
  3418. ret = __mem_cgroup_try_charge_swapin(mm, page,
  3419. gfp_mask, &memcg);
  3420. if (!ret)
  3421. __mem_cgroup_commit_charge_swapin(page, memcg, type);
  3422. }
  3423. return ret;
  3424. }
  3425. static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
  3426. unsigned int nr_pages,
  3427. const enum charge_type ctype)
  3428. {
  3429. struct memcg_batch_info *batch = NULL;
  3430. bool uncharge_memsw = true;
  3431. /* If swapout, usage of swap doesn't decrease */
  3432. if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
  3433. uncharge_memsw = false;
  3434. batch = &current->memcg_batch;
  3435. /*
  3436. * In usual, we do css_get() when we remember memcg pointer.
  3437. * But in this case, we keep res->usage until end of a series of
  3438. * uncharges. Then, it's ok to ignore memcg's refcnt.
  3439. */
  3440. if (!batch->memcg)
  3441. batch->memcg = memcg;
  3442. /*
  3443. * do_batch > 0 when unmapping pages or inode invalidate/truncate.
  3444. * In those cases, all pages freed continuously can be expected to be in
  3445. * the same cgroup and we have chance to coalesce uncharges.
  3446. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
  3447. * because we want to do uncharge as soon as possible.
  3448. */
  3449. if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
  3450. goto direct_uncharge;
  3451. if (nr_pages > 1)
  3452. goto direct_uncharge;
  3453. /*
  3454. * In typical case, batch->memcg == mem. This means we can
  3455. * merge a series of uncharges to an uncharge of res_counter.
  3456. * If not, we uncharge res_counter ony by one.
  3457. */
  3458. if (batch->memcg != memcg)
  3459. goto direct_uncharge;
  3460. /* remember freed charge and uncharge it later */
  3461. batch->nr_pages++;
  3462. if (uncharge_memsw)
  3463. batch->memsw_nr_pages++;
  3464. return;
  3465. direct_uncharge:
  3466. res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
  3467. if (uncharge_memsw)
  3468. res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
  3469. if (unlikely(batch->memcg != memcg))
  3470. memcg_oom_recover(memcg);
  3471. }
  3472. /*
  3473. * uncharge if !page_mapped(page)
  3474. */
  3475. static struct mem_cgroup *
  3476. __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
  3477. bool end_migration)
  3478. {
  3479. struct mem_cgroup *memcg = NULL;
  3480. unsigned int nr_pages = 1;
  3481. struct page_cgroup *pc;
  3482. bool anon;
  3483. if (mem_cgroup_disabled())
  3484. return NULL;
  3485. VM_BUG_ON(PageSwapCache(page));
  3486. if (PageTransHuge(page)) {
  3487. nr_pages <<= compound_order(page);
  3488. VM_BUG_ON(!PageTransHuge(page));
  3489. }
  3490. /*
  3491. * Check if our page_cgroup is valid
  3492. */
  3493. pc = lookup_page_cgroup(page);
  3494. if (unlikely(!PageCgroupUsed(pc)))
  3495. return NULL;
  3496. lock_page_cgroup(pc);
  3497. memcg = pc->mem_cgroup;
  3498. if (!PageCgroupUsed(pc))
  3499. goto unlock_out;
  3500. anon = PageAnon(page);
  3501. switch (ctype) {
  3502. case MEM_CGROUP_CHARGE_TYPE_ANON:
  3503. /*
  3504. * Generally PageAnon tells if it's the anon statistics to be
  3505. * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
  3506. * used before page reached the stage of being marked PageAnon.
  3507. */
  3508. anon = true;
  3509. /* fallthrough */
  3510. case MEM_CGROUP_CHARGE_TYPE_DROP:
  3511. /* See mem_cgroup_prepare_migration() */
  3512. if (page_mapped(page))
  3513. goto unlock_out;
  3514. /*
  3515. * Pages under migration may not be uncharged. But
  3516. * end_migration() /must/ be the one uncharging the
  3517. * unused post-migration page and so it has to call
  3518. * here with the migration bit still set. See the
  3519. * res_counter handling below.
  3520. */
  3521. if (!end_migration && PageCgroupMigration(pc))
  3522. goto unlock_out;
  3523. break;
  3524. case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
  3525. if (!PageAnon(page)) { /* Shared memory */
  3526. if (page->mapping && !page_is_file_cache(page))
  3527. goto unlock_out;
  3528. } else if (page_mapped(page)) /* Anon */
  3529. goto unlock_out;
  3530. break;
  3531. default:
  3532. break;
  3533. }
  3534. mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
  3535. ClearPageCgroupUsed(pc);
  3536. /*
  3537. * pc->mem_cgroup is not cleared here. It will be accessed when it's
  3538. * freed from LRU. This is safe because uncharged page is expected not
  3539. * to be reused (freed soon). Exception is SwapCache, it's handled by
  3540. * special functions.
  3541. */
  3542. unlock_page_cgroup(pc);
  3543. /*
  3544. * even after unlock, we have memcg->res.usage here and this memcg
  3545. * will never be freed.
  3546. */
  3547. memcg_check_events(memcg, page);
  3548. if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
  3549. mem_cgroup_swap_statistics(memcg, true);
  3550. mem_cgroup_get(memcg);
  3551. }
  3552. /*
  3553. * Migration does not charge the res_counter for the
  3554. * replacement page, so leave it alone when phasing out the
  3555. * page that is unused after the migration.
  3556. */
  3557. if (!end_migration && !mem_cgroup_is_root(memcg))
  3558. mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
  3559. return memcg;
  3560. unlock_out:
  3561. unlock_page_cgroup(pc);
  3562. return NULL;
  3563. }
  3564. void mem_cgroup_uncharge_page(struct page *page)
  3565. {
  3566. /* early check. */
  3567. if (page_mapped(page))
  3568. return;
  3569. VM_BUG_ON(page->mapping && !PageAnon(page));
  3570. if (PageSwapCache(page))
  3571. return;
  3572. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
  3573. }
  3574. void mem_cgroup_uncharge_cache_page(struct page *page)
  3575. {
  3576. VM_BUG_ON(page_mapped(page));
  3577. VM_BUG_ON(page->mapping);
  3578. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
  3579. }
  3580. /*
  3581. * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
  3582. * In that cases, pages are freed continuously and we can expect pages
  3583. * are in the same memcg. All these calls itself limits the number of
  3584. * pages freed at once, then uncharge_start/end() is called properly.
  3585. * This may be called prural(2) times in a context,
  3586. */
  3587. void mem_cgroup_uncharge_start(void)
  3588. {
  3589. current->memcg_batch.do_batch++;
  3590. /* We can do nest. */
  3591. if (current->memcg_batch.do_batch == 1) {
  3592. current->memcg_batch.memcg = NULL;
  3593. current->memcg_batch.nr_pages = 0;
  3594. current->memcg_batch.memsw_nr_pages = 0;
  3595. }
  3596. }
  3597. void mem_cgroup_uncharge_end(void)
  3598. {
  3599. struct memcg_batch_info *batch = &current->memcg_batch;
  3600. if (!batch->do_batch)
  3601. return;
  3602. batch->do_batch--;
  3603. if (batch->do_batch) /* If stacked, do nothing. */
  3604. return;
  3605. if (!batch->memcg)
  3606. return;
  3607. /*
  3608. * This "batch->memcg" is valid without any css_get/put etc...
  3609. * bacause we hide charges behind us.
  3610. */
  3611. if (batch->nr_pages)
  3612. res_counter_uncharge(&batch->memcg->res,
  3613. batch->nr_pages * PAGE_SIZE);
  3614. if (batch->memsw_nr_pages)
  3615. res_counter_uncharge(&batch->memcg->memsw,
  3616. batch->memsw_nr_pages * PAGE_SIZE);
  3617. memcg_oom_recover(batch->memcg);
  3618. /* forget this pointer (for sanity check) */
  3619. batch->memcg = NULL;
  3620. }
  3621. #ifdef CONFIG_SWAP
  3622. /*
  3623. * called after __delete_from_swap_cache() and drop "page" account.
  3624. * memcg information is recorded to swap_cgroup of "ent"
  3625. */
  3626. void
  3627. mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
  3628. {
  3629. struct mem_cgroup *memcg;
  3630. int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
  3631. if (!swapout) /* this was a swap cache but the swap is unused ! */
  3632. ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
  3633. memcg = __mem_cgroup_uncharge_common(page, ctype, false);
  3634. /*
  3635. * record memcg information, if swapout && memcg != NULL,
  3636. * mem_cgroup_get() was called in uncharge().
  3637. */
  3638. if (do_swap_account && swapout && memcg)
  3639. swap_cgroup_record(ent, css_id(&memcg->css));
  3640. }
  3641. #endif
  3642. #ifdef CONFIG_MEMCG_SWAP
  3643. /*
  3644. * called from swap_entry_free(). remove record in swap_cgroup and
  3645. * uncharge "memsw" account.
  3646. */
  3647. void mem_cgroup_uncharge_swap(swp_entry_t ent)
  3648. {
  3649. struct mem_cgroup *memcg;
  3650. unsigned short id;
  3651. if (!do_swap_account)
  3652. return;
  3653. id = swap_cgroup_record(ent, 0);
  3654. rcu_read_lock();
  3655. memcg = mem_cgroup_lookup(id);
  3656. if (memcg) {
  3657. /*
  3658. * We uncharge this because swap is freed.
  3659. * This memcg can be obsolete one. We avoid calling css_tryget
  3660. */
  3661. if (!mem_cgroup_is_root(memcg))
  3662. res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
  3663. mem_cgroup_swap_statistics(memcg, false);
  3664. mem_cgroup_put(memcg);
  3665. }
  3666. rcu_read_unlock();
  3667. }
  3668. /**
  3669. * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
  3670. * @entry: swap entry to be moved
  3671. * @from: mem_cgroup which the entry is moved from
  3672. * @to: mem_cgroup which the entry is moved to
  3673. *
  3674. * It succeeds only when the swap_cgroup's record for this entry is the same
  3675. * as the mem_cgroup's id of @from.
  3676. *
  3677. * Returns 0 on success, -EINVAL on failure.
  3678. *
  3679. * The caller must have charged to @to, IOW, called res_counter_charge() about
  3680. * both res and memsw, and called css_get().
  3681. */
  3682. static int mem_cgroup_move_swap_account(swp_entry_t entry,
  3683. struct mem_cgroup *from, struct mem_cgroup *to)
  3684. {
  3685. unsigned short old_id, new_id;
  3686. old_id = css_id(&from->css);
  3687. new_id = css_id(&to->css);
  3688. if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
  3689. mem_cgroup_swap_statistics(from, false);
  3690. mem_cgroup_swap_statistics(to, true);
  3691. /*
  3692. * This function is only called from task migration context now.
  3693. * It postpones res_counter and refcount handling till the end
  3694. * of task migration(mem_cgroup_clear_mc()) for performance
  3695. * improvement. But we cannot postpone mem_cgroup_get(to)
  3696. * because if the process that has been moved to @to does
  3697. * swap-in, the refcount of @to might be decreased to 0.
  3698. */
  3699. mem_cgroup_get(to);
  3700. return 0;
  3701. }
  3702. return -EINVAL;
  3703. }
  3704. #else
  3705. static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
  3706. struct mem_cgroup *from, struct mem_cgroup *to)
  3707. {
  3708. return -EINVAL;
  3709. }
  3710. #endif
  3711. /*
  3712. * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
  3713. * page belongs to.
  3714. */
  3715. void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
  3716. struct mem_cgroup **memcgp)
  3717. {
  3718. struct mem_cgroup *memcg = NULL;
  3719. unsigned int nr_pages = 1;
  3720. struct page_cgroup *pc;
  3721. enum charge_type ctype;
  3722. *memcgp = NULL;
  3723. if (mem_cgroup_disabled())
  3724. return;
  3725. if (PageTransHuge(page))
  3726. nr_pages <<= compound_order(page);
  3727. pc = lookup_page_cgroup(page);
  3728. lock_page_cgroup(pc);
  3729. if (PageCgroupUsed(pc)) {
  3730. memcg = pc->mem_cgroup;
  3731. css_get(&memcg->css);
  3732. /*
  3733. * At migrating an anonymous page, its mapcount goes down
  3734. * to 0 and uncharge() will be called. But, even if it's fully
  3735. * unmapped, migration may fail and this page has to be
  3736. * charged again. We set MIGRATION flag here and delay uncharge
  3737. * until end_migration() is called
  3738. *
  3739. * Corner Case Thinking
  3740. * A)
  3741. * When the old page was mapped as Anon and it's unmap-and-freed
  3742. * while migration was ongoing.
  3743. * If unmap finds the old page, uncharge() of it will be delayed
  3744. * until end_migration(). If unmap finds a new page, it's
  3745. * uncharged when it make mapcount to be 1->0. If unmap code
  3746. * finds swap_migration_entry, the new page will not be mapped
  3747. * and end_migration() will find it(mapcount==0).
  3748. *
  3749. * B)
  3750. * When the old page was mapped but migraion fails, the kernel
  3751. * remaps it. A charge for it is kept by MIGRATION flag even
  3752. * if mapcount goes down to 0. We can do remap successfully
  3753. * without charging it again.
  3754. *
  3755. * C)
  3756. * The "old" page is under lock_page() until the end of
  3757. * migration, so, the old page itself will not be swapped-out.
  3758. * If the new page is swapped out before end_migraton, our
  3759. * hook to usual swap-out path will catch the event.
  3760. */
  3761. if (PageAnon(page))
  3762. SetPageCgroupMigration(pc);
  3763. }
  3764. unlock_page_cgroup(pc);
  3765. /*
  3766. * If the page is not charged at this point,
  3767. * we return here.
  3768. */
  3769. if (!memcg)
  3770. return;
  3771. *memcgp = memcg;
  3772. /*
  3773. * We charge new page before it's used/mapped. So, even if unlock_page()
  3774. * is called before end_migration, we can catch all events on this new
  3775. * page. In the case new page is migrated but not remapped, new page's
  3776. * mapcount will be finally 0 and we call uncharge in end_migration().
  3777. */
  3778. if (PageAnon(page))
  3779. ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
  3780. else
  3781. ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  3782. /*
  3783. * The page is committed to the memcg, but it's not actually
  3784. * charged to the res_counter since we plan on replacing the
  3785. * old one and only one page is going to be left afterwards.
  3786. */
  3787. __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
  3788. }
  3789. /* remove redundant charge if migration failed*/
  3790. void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  3791. struct page *oldpage, struct page *newpage, bool migration_ok)
  3792. {
  3793. struct page *used, *unused;
  3794. struct page_cgroup *pc;
  3795. bool anon;
  3796. if (!memcg)
  3797. return;
  3798. if (!migration_ok) {
  3799. used = oldpage;
  3800. unused = newpage;
  3801. } else {
  3802. used = newpage;
  3803. unused = oldpage;
  3804. }
  3805. anon = PageAnon(used);
  3806. __mem_cgroup_uncharge_common(unused,
  3807. anon ? MEM_CGROUP_CHARGE_TYPE_ANON
  3808. : MEM_CGROUP_CHARGE_TYPE_CACHE,
  3809. true);
  3810. css_put(&memcg->css);
  3811. /*
  3812. * We disallowed uncharge of pages under migration because mapcount
  3813. * of the page goes down to zero, temporarly.
  3814. * Clear the flag and check the page should be charged.
  3815. */
  3816. pc = lookup_page_cgroup(oldpage);
  3817. lock_page_cgroup(pc);
  3818. ClearPageCgroupMigration(pc);
  3819. unlock_page_cgroup(pc);
  3820. /*
  3821. * If a page is a file cache, radix-tree replacement is very atomic
  3822. * and we can skip this check. When it was an Anon page, its mapcount
  3823. * goes down to 0. But because we added MIGRATION flage, it's not
  3824. * uncharged yet. There are several case but page->mapcount check
  3825. * and USED bit check in mem_cgroup_uncharge_page() will do enough
  3826. * check. (see prepare_charge() also)
  3827. */
  3828. if (anon)
  3829. mem_cgroup_uncharge_page(used);
  3830. }
  3831. /*
  3832. * At replace page cache, newpage is not under any memcg but it's on
  3833. * LRU. So, this function doesn't touch res_counter but handles LRU
  3834. * in correct way. Both pages are locked so we cannot race with uncharge.
  3835. */
  3836. void mem_cgroup_replace_page_cache(struct page *oldpage,
  3837. struct page *newpage)
  3838. {
  3839. struct mem_cgroup *memcg = NULL;
  3840. struct page_cgroup *pc;
  3841. enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
  3842. if (mem_cgroup_disabled())
  3843. return;
  3844. pc = lookup_page_cgroup(oldpage);
  3845. /* fix accounting on old pages */
  3846. lock_page_cgroup(pc);
  3847. if (PageCgroupUsed(pc)) {
  3848. memcg = pc->mem_cgroup;
  3849. mem_cgroup_charge_statistics(memcg, false, -1);
  3850. ClearPageCgroupUsed(pc);
  3851. }
  3852. unlock_page_cgroup(pc);
  3853. /*
  3854. * When called from shmem_replace_page(), in some cases the
  3855. * oldpage has already been charged, and in some cases not.
  3856. */
  3857. if (!memcg)
  3858. return;
  3859. /*
  3860. * Even if newpage->mapping was NULL before starting replacement,
  3861. * the newpage may be on LRU(or pagevec for LRU) already. We lock
  3862. * LRU while we overwrite pc->mem_cgroup.
  3863. */
  3864. __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
  3865. }
  3866. #ifdef CONFIG_DEBUG_VM
  3867. static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
  3868. {
  3869. struct page_cgroup *pc;
  3870. pc = lookup_page_cgroup(page);
  3871. /*
  3872. * Can be NULL while feeding pages into the page allocator for
  3873. * the first time, i.e. during boot or memory hotplug;
  3874. * or when mem_cgroup_disabled().
  3875. */
  3876. if (likely(pc) && PageCgroupUsed(pc))
  3877. return pc;
  3878. return NULL;
  3879. }
  3880. bool mem_cgroup_bad_page_check(struct page *page)
  3881. {
  3882. if (mem_cgroup_disabled())
  3883. return false;
  3884. return lookup_page_cgroup_used(page) != NULL;
  3885. }
  3886. void mem_cgroup_print_bad_page(struct page *page)
  3887. {
  3888. struct page_cgroup *pc;
  3889. pc = lookup_page_cgroup_used(page);
  3890. if (pc) {
  3891. pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
  3892. pc, pc->flags, pc->mem_cgroup);
  3893. }
  3894. }
  3895. #endif
  3896. static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
  3897. unsigned long long val)
  3898. {
  3899. int retry_count;
  3900. u64 memswlimit, memlimit;
  3901. int ret = 0;
  3902. int children = mem_cgroup_count_children(memcg);
  3903. u64 curusage, oldusage;
  3904. int enlarge;
  3905. /*
  3906. * For keeping hierarchical_reclaim simple, how long we should retry
  3907. * is depends on callers. We set our retry-count to be function
  3908. * of # of children which we should visit in this loop.
  3909. */
  3910. retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
  3911. oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  3912. enlarge = 0;
  3913. while (retry_count) {
  3914. if (signal_pending(current)) {
  3915. ret = -EINTR;
  3916. break;
  3917. }
  3918. /*
  3919. * Rather than hide all in some function, I do this in
  3920. * open coded manner. You see what this really does.
  3921. * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
  3922. */
  3923. mutex_lock(&set_limit_mutex);
  3924. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3925. if (memswlimit < val) {
  3926. ret = -EINVAL;
  3927. mutex_unlock(&set_limit_mutex);
  3928. break;
  3929. }
  3930. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3931. if (memlimit < val)
  3932. enlarge = 1;
  3933. ret = res_counter_set_limit(&memcg->res, val);
  3934. if (!ret) {
  3935. if (memswlimit == val)
  3936. memcg->memsw_is_minimum = true;
  3937. else
  3938. memcg->memsw_is_minimum = false;
  3939. }
  3940. mutex_unlock(&set_limit_mutex);
  3941. if (!ret)
  3942. break;
  3943. mem_cgroup_reclaim(memcg, GFP_KERNEL,
  3944. MEM_CGROUP_RECLAIM_SHRINK);
  3945. curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  3946. /* Usage is reduced ? */
  3947. if (curusage >= oldusage)
  3948. retry_count--;
  3949. else
  3950. oldusage = curusage;
  3951. }
  3952. if (!ret && enlarge)
  3953. memcg_oom_recover(memcg);
  3954. return ret;
  3955. }
  3956. static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
  3957. unsigned long long val)
  3958. {
  3959. int retry_count;
  3960. u64 memlimit, memswlimit, oldusage, curusage;
  3961. int children = mem_cgroup_count_children(memcg);
  3962. int ret = -EBUSY;
  3963. int enlarge = 0;
  3964. /* see mem_cgroup_resize_res_limit */
  3965. retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
  3966. oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  3967. while (retry_count) {
  3968. if (signal_pending(current)) {
  3969. ret = -EINTR;
  3970. break;
  3971. }
  3972. /*
  3973. * Rather than hide all in some function, I do this in
  3974. * open coded manner. You see what this really does.
  3975. * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
  3976. */
  3977. mutex_lock(&set_limit_mutex);
  3978. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  3979. if (memlimit > val) {
  3980. ret = -EINVAL;
  3981. mutex_unlock(&set_limit_mutex);
  3982. break;
  3983. }
  3984. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  3985. if (memswlimit < val)
  3986. enlarge = 1;
  3987. ret = res_counter_set_limit(&memcg->memsw, val);
  3988. if (!ret) {
  3989. if (memlimit == val)
  3990. memcg->memsw_is_minimum = true;
  3991. else
  3992. memcg->memsw_is_minimum = false;
  3993. }
  3994. mutex_unlock(&set_limit_mutex);
  3995. if (!ret)
  3996. break;
  3997. mem_cgroup_reclaim(memcg, GFP_KERNEL,
  3998. MEM_CGROUP_RECLAIM_NOSWAP |
  3999. MEM_CGROUP_RECLAIM_SHRINK);
  4000. curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  4001. /* Usage is reduced ? */
  4002. if (curusage >= oldusage)
  4003. retry_count--;
  4004. else
  4005. oldusage = curusage;
  4006. }
  4007. if (!ret && enlarge)
  4008. memcg_oom_recover(memcg);
  4009. return ret;
  4010. }
  4011. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  4012. gfp_t gfp_mask,
  4013. unsigned long *total_scanned)
  4014. {
  4015. unsigned long nr_reclaimed = 0;
  4016. struct mem_cgroup_per_zone *mz, *next_mz = NULL;
  4017. unsigned long reclaimed;
  4018. int loop = 0;
  4019. struct mem_cgroup_tree_per_zone *mctz;
  4020. unsigned long long excess;
  4021. unsigned long nr_scanned;
  4022. if (order > 0)
  4023. return 0;
  4024. mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
  4025. /*
  4026. * This loop can run a while, specially if mem_cgroup's continuously
  4027. * keep exceeding their soft limit and putting the system under
  4028. * pressure
  4029. */
  4030. do {
  4031. if (next_mz)
  4032. mz = next_mz;
  4033. else
  4034. mz = mem_cgroup_largest_soft_limit_node(mctz);
  4035. if (!mz)
  4036. break;
  4037. nr_scanned = 0;
  4038. reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
  4039. gfp_mask, &nr_scanned);
  4040. nr_reclaimed += reclaimed;
  4041. *total_scanned += nr_scanned;
  4042. spin_lock(&mctz->lock);
  4043. /*
  4044. * If we failed to reclaim anything from this memory cgroup
  4045. * it is time to move on to the next cgroup
  4046. */
  4047. next_mz = NULL;
  4048. if (!reclaimed) {
  4049. do {
  4050. /*
  4051. * Loop until we find yet another one.
  4052. *
  4053. * By the time we get the soft_limit lock
  4054. * again, someone might have aded the
  4055. * group back on the RB tree. Iterate to
  4056. * make sure we get a different mem.
  4057. * mem_cgroup_largest_soft_limit_node returns
  4058. * NULL if no other cgroup is present on
  4059. * the tree
  4060. */
  4061. next_mz =
  4062. __mem_cgroup_largest_soft_limit_node(mctz);
  4063. if (next_mz == mz)
  4064. css_put(&next_mz->memcg->css);
  4065. else /* next_mz == NULL or other memcg */
  4066. break;
  4067. } while (1);
  4068. }
  4069. __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
  4070. excess = res_counter_soft_limit_excess(&mz->memcg->res);
  4071. /*
  4072. * One school of thought says that we should not add
  4073. * back the node to the tree if reclaim returns 0.
  4074. * But our reclaim could return 0, simply because due
  4075. * to priority we are exposing a smaller subset of
  4076. * memory to reclaim from. Consider this as a longer
  4077. * term TODO.
  4078. */
  4079. /* If excess == 0, no tree ops */
  4080. __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
  4081. spin_unlock(&mctz->lock);
  4082. css_put(&mz->memcg->css);
  4083. loop++;
  4084. /*
  4085. * Could not reclaim anything and there are no more
  4086. * mem cgroups to try or we seem to be looping without
  4087. * reclaiming anything.
  4088. */
  4089. if (!nr_reclaimed &&
  4090. (next_mz == NULL ||
  4091. loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
  4092. break;
  4093. } while (!nr_reclaimed);
  4094. if (next_mz)
  4095. css_put(&next_mz->memcg->css);
  4096. return nr_reclaimed;
  4097. }
  4098. /**
  4099. * mem_cgroup_force_empty_list - clears LRU of a group
  4100. * @memcg: group to clear
  4101. * @node: NUMA node
  4102. * @zid: zone id
  4103. * @lru: lru to to clear
  4104. *
  4105. * Traverse a specified page_cgroup list and try to drop them all. This doesn't
  4106. * reclaim the pages page themselves - pages are moved to the parent (or root)
  4107. * group.
  4108. */
  4109. static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
  4110. int node, int zid, enum lru_list lru)
  4111. {
  4112. struct lruvec *lruvec;
  4113. unsigned long flags;
  4114. struct list_head *list;
  4115. struct page *busy;
  4116. struct zone *zone;
  4117. zone = &NODE_DATA(node)->node_zones[zid];
  4118. lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  4119. list = &lruvec->lists[lru];
  4120. busy = NULL;
  4121. do {
  4122. struct page_cgroup *pc;
  4123. struct page *page;
  4124. spin_lock_irqsave(&zone->lru_lock, flags);
  4125. if (list_empty(list)) {
  4126. spin_unlock_irqrestore(&zone->lru_lock, flags);
  4127. break;
  4128. }
  4129. page = list_entry(list->prev, struct page, lru);
  4130. if (busy == page) {
  4131. list_move(&page->lru, list);
  4132. busy = NULL;
  4133. spin_unlock_irqrestore(&zone->lru_lock, flags);
  4134. continue;
  4135. }
  4136. spin_unlock_irqrestore(&zone->lru_lock, flags);
  4137. pc = lookup_page_cgroup(page);
  4138. if (mem_cgroup_move_parent(page, pc, memcg)) {
  4139. /* found lock contention or "pc" is obsolete. */
  4140. busy = page;
  4141. cond_resched();
  4142. } else
  4143. busy = NULL;
  4144. } while (!list_empty(list));
  4145. }
  4146. /*
  4147. * make mem_cgroup's charge to be 0 if there is no task by moving
  4148. * all the charges and pages to the parent.
  4149. * This enables deleting this mem_cgroup.
  4150. *
  4151. * Caller is responsible for holding css reference on the memcg.
  4152. */
  4153. static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
  4154. {
  4155. int node, zid;
  4156. u64 usage;
  4157. do {
  4158. /* This is for making all *used* pages to be on LRU. */
  4159. lru_add_drain_all();
  4160. drain_all_stock_sync(memcg);
  4161. mem_cgroup_start_move(memcg);
  4162. for_each_node_state(node, N_MEMORY) {
  4163. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  4164. enum lru_list lru;
  4165. for_each_lru(lru) {
  4166. mem_cgroup_force_empty_list(memcg,
  4167. node, zid, lru);
  4168. }
  4169. }
  4170. }
  4171. mem_cgroup_end_move(memcg);
  4172. memcg_oom_recover(memcg);
  4173. cond_resched();
  4174. /*
  4175. * Kernel memory may not necessarily be trackable to a specific
  4176. * process. So they are not migrated, and therefore we can't
  4177. * expect their value to drop to 0 here.
  4178. * Having res filled up with kmem only is enough.
  4179. *
  4180. * This is a safety check because mem_cgroup_force_empty_list
  4181. * could have raced with mem_cgroup_replace_page_cache callers
  4182. * so the lru seemed empty but the page could have been added
  4183. * right after the check. RES_USAGE should be safe as we always
  4184. * charge before adding to the LRU.
  4185. */
  4186. usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
  4187. res_counter_read_u64(&memcg->kmem, RES_USAGE);
  4188. } while (usage > 0);
  4189. }
  4190. /*
  4191. * This mainly exists for tests during the setting of set of use_hierarchy.
  4192. * Since this is the very setting we are changing, the current hierarchy value
  4193. * is meaningless
  4194. */
  4195. static inline bool __memcg_has_children(struct mem_cgroup *memcg)
  4196. {
  4197. struct cgroup *pos;
  4198. /* bounce at first found */
  4199. cgroup_for_each_child(pos, memcg->css.cgroup)
  4200. return true;
  4201. return false;
  4202. }
  4203. /*
  4204. * Must be called with cgroup_lock held, unless the cgroup is guaranteed to be
  4205. * already dead (in mem_cgroup_force_empty(), for instance). This is different
  4206. * from mem_cgroup_count_children(), in the sense that we don't really care how
  4207. * many children we have; we only need to know if we have any. It also counts
  4208. * any memcg without hierarchy as infertile.
  4209. */
  4210. static inline bool memcg_has_children(struct mem_cgroup *memcg)
  4211. {
  4212. return memcg->use_hierarchy && __memcg_has_children(memcg);
  4213. }
  4214. /*
  4215. * Reclaims as many pages from the given memcg as possible and moves
  4216. * the rest to the parent.
  4217. *
  4218. * Caller is responsible for holding css reference for memcg.
  4219. */
  4220. static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
  4221. {
  4222. int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
  4223. struct cgroup *cgrp = memcg->css.cgroup;
  4224. /* returns EBUSY if there is a task or if we come here twice. */
  4225. if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
  4226. return -EBUSY;
  4227. /* we call try-to-free pages for make this cgroup empty */
  4228. lru_add_drain_all();
  4229. /* try to free all pages in this cgroup */
  4230. while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
  4231. int progress;
  4232. if (signal_pending(current))
  4233. return -EINTR;
  4234. progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
  4235. false);
  4236. if (!progress) {
  4237. nr_retries--;
  4238. /* maybe some writeback is necessary */
  4239. congestion_wait(BLK_RW_ASYNC, HZ/10);
  4240. }
  4241. }
  4242. lru_add_drain();
  4243. mem_cgroup_reparent_charges(memcg);
  4244. return 0;
  4245. }
  4246. static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
  4247. {
  4248. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4249. int ret;
  4250. if (mem_cgroup_is_root(memcg))
  4251. return -EINVAL;
  4252. css_get(&memcg->css);
  4253. ret = mem_cgroup_force_empty(memcg);
  4254. css_put(&memcg->css);
  4255. return ret;
  4256. }
  4257. static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
  4258. {
  4259. return mem_cgroup_from_cont(cont)->use_hierarchy;
  4260. }
  4261. static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
  4262. u64 val)
  4263. {
  4264. int retval = 0;
  4265. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4266. struct cgroup *parent = cont->parent;
  4267. struct mem_cgroup *parent_memcg = NULL;
  4268. if (parent)
  4269. parent_memcg = mem_cgroup_from_cont(parent);
  4270. cgroup_lock();
  4271. if (memcg->use_hierarchy == val)
  4272. goto out;
  4273. /*
  4274. * If parent's use_hierarchy is set, we can't make any modifications
  4275. * in the child subtrees. If it is unset, then the change can
  4276. * occur, provided the current cgroup has no children.
  4277. *
  4278. * For the root cgroup, parent_mem is NULL, we allow value to be
  4279. * set if there are no children.
  4280. */
  4281. if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
  4282. (val == 1 || val == 0)) {
  4283. if (!__memcg_has_children(memcg))
  4284. memcg->use_hierarchy = val;
  4285. else
  4286. retval = -EBUSY;
  4287. } else
  4288. retval = -EINVAL;
  4289. out:
  4290. cgroup_unlock();
  4291. return retval;
  4292. }
  4293. static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
  4294. enum mem_cgroup_stat_index idx)
  4295. {
  4296. struct mem_cgroup *iter;
  4297. long val = 0;
  4298. /* Per-cpu values can be negative, use a signed accumulator */
  4299. for_each_mem_cgroup_tree(iter, memcg)
  4300. val += mem_cgroup_read_stat(iter, idx);
  4301. if (val < 0) /* race ? */
  4302. val = 0;
  4303. return val;
  4304. }
  4305. static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
  4306. {
  4307. u64 val;
  4308. if (!mem_cgroup_is_root(memcg)) {
  4309. if (!swap)
  4310. return res_counter_read_u64(&memcg->res, RES_USAGE);
  4311. else
  4312. return res_counter_read_u64(&memcg->memsw, RES_USAGE);
  4313. }
  4314. val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
  4315. val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
  4316. if (swap)
  4317. val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
  4318. return val << PAGE_SHIFT;
  4319. }
  4320. static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
  4321. struct file *file, char __user *buf,
  4322. size_t nbytes, loff_t *ppos)
  4323. {
  4324. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4325. char str[64];
  4326. u64 val;
  4327. int name, len;
  4328. enum res_type type;
  4329. type = MEMFILE_TYPE(cft->private);
  4330. name = MEMFILE_ATTR(cft->private);
  4331. if (!do_swap_account && type == _MEMSWAP)
  4332. return -EOPNOTSUPP;
  4333. switch (type) {
  4334. case _MEM:
  4335. if (name == RES_USAGE)
  4336. val = mem_cgroup_usage(memcg, false);
  4337. else
  4338. val = res_counter_read_u64(&memcg->res, name);
  4339. break;
  4340. case _MEMSWAP:
  4341. if (name == RES_USAGE)
  4342. val = mem_cgroup_usage(memcg, true);
  4343. else
  4344. val = res_counter_read_u64(&memcg->memsw, name);
  4345. break;
  4346. case _KMEM:
  4347. val = res_counter_read_u64(&memcg->kmem, name);
  4348. break;
  4349. default:
  4350. BUG();
  4351. }
  4352. len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
  4353. return simple_read_from_buffer(buf, nbytes, ppos, str, len);
  4354. }
  4355. static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
  4356. {
  4357. int ret = -EINVAL;
  4358. #ifdef CONFIG_MEMCG_KMEM
  4359. bool must_inc_static_branch = false;
  4360. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4361. /*
  4362. * For simplicity, we won't allow this to be disabled. It also can't
  4363. * be changed if the cgroup has children already, or if tasks had
  4364. * already joined.
  4365. *
  4366. * If tasks join before we set the limit, a person looking at
  4367. * kmem.usage_in_bytes will have no way to determine when it took
  4368. * place, which makes the value quite meaningless.
  4369. *
  4370. * After it first became limited, changes in the value of the limit are
  4371. * of course permitted.
  4372. *
  4373. * Taking the cgroup_lock is really offensive, but it is so far the only
  4374. * way to guarantee that no children will appear. There are plenty of
  4375. * other offenders, and they should all go away. Fine grained locking
  4376. * is probably the way to go here. When we are fully hierarchical, we
  4377. * can also get rid of the use_hierarchy check.
  4378. */
  4379. cgroup_lock();
  4380. mutex_lock(&set_limit_mutex);
  4381. if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
  4382. if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
  4383. ret = -EBUSY;
  4384. goto out;
  4385. }
  4386. ret = res_counter_set_limit(&memcg->kmem, val);
  4387. VM_BUG_ON(ret);
  4388. ret = memcg_update_cache_sizes(memcg);
  4389. if (ret) {
  4390. res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
  4391. goto out;
  4392. }
  4393. must_inc_static_branch = true;
  4394. /*
  4395. * kmem charges can outlive the cgroup. In the case of slab
  4396. * pages, for instance, a page contain objects from various
  4397. * processes, so it is unfeasible to migrate them away. We
  4398. * need to reference count the memcg because of that.
  4399. */
  4400. mem_cgroup_get(memcg);
  4401. } else
  4402. ret = res_counter_set_limit(&memcg->kmem, val);
  4403. out:
  4404. mutex_unlock(&set_limit_mutex);
  4405. cgroup_unlock();
  4406. /*
  4407. * We are by now familiar with the fact that we can't inc the static
  4408. * branch inside cgroup_lock. See disarm functions for details. A
  4409. * worker here is overkill, but also wrong: After the limit is set, we
  4410. * must start accounting right away. Since this operation can't fail,
  4411. * we can safely defer it to here - no rollback will be needed.
  4412. *
  4413. * The boolean used to control this is also safe, because
  4414. * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
  4415. * able to set it to true;
  4416. */
  4417. if (must_inc_static_branch) {
  4418. static_key_slow_inc(&memcg_kmem_enabled_key);
  4419. /*
  4420. * setting the active bit after the inc will guarantee no one
  4421. * starts accounting before all call sites are patched
  4422. */
  4423. memcg_kmem_set_active(memcg);
  4424. }
  4425. #endif
  4426. return ret;
  4427. }
  4428. static int memcg_propagate_kmem(struct mem_cgroup *memcg)
  4429. {
  4430. int ret = 0;
  4431. struct mem_cgroup *parent = parent_mem_cgroup(memcg);
  4432. if (!parent)
  4433. goto out;
  4434. memcg->kmem_account_flags = parent->kmem_account_flags;
  4435. #ifdef CONFIG_MEMCG_KMEM
  4436. /*
  4437. * When that happen, we need to disable the static branch only on those
  4438. * memcgs that enabled it. To achieve this, we would be forced to
  4439. * complicate the code by keeping track of which memcgs were the ones
  4440. * that actually enabled limits, and which ones got it from its
  4441. * parents.
  4442. *
  4443. * It is a lot simpler just to do static_key_slow_inc() on every child
  4444. * that is accounted.
  4445. */
  4446. if (!memcg_kmem_is_active(memcg))
  4447. goto out;
  4448. /*
  4449. * destroy(), called if we fail, will issue static_key_slow_inc() and
  4450. * mem_cgroup_put() if kmem is enabled. We have to either call them
  4451. * unconditionally, or clear the KMEM_ACTIVE flag. I personally find
  4452. * this more consistent, since it always leads to the same destroy path
  4453. */
  4454. mem_cgroup_get(memcg);
  4455. static_key_slow_inc(&memcg_kmem_enabled_key);
  4456. mutex_lock(&set_limit_mutex);
  4457. ret = memcg_update_cache_sizes(memcg);
  4458. mutex_unlock(&set_limit_mutex);
  4459. #endif
  4460. out:
  4461. return ret;
  4462. }
  4463. /*
  4464. * The user of this function is...
  4465. * RES_LIMIT.
  4466. */
  4467. static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  4468. const char *buffer)
  4469. {
  4470. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4471. enum res_type type;
  4472. int name;
  4473. unsigned long long val;
  4474. int ret;
  4475. type = MEMFILE_TYPE(cft->private);
  4476. name = MEMFILE_ATTR(cft->private);
  4477. if (!do_swap_account && type == _MEMSWAP)
  4478. return -EOPNOTSUPP;
  4479. switch (name) {
  4480. case RES_LIMIT:
  4481. if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
  4482. ret = -EINVAL;
  4483. break;
  4484. }
  4485. /* This function does all necessary parse...reuse it */
  4486. ret = res_counter_memparse_write_strategy(buffer, &val);
  4487. if (ret)
  4488. break;
  4489. if (type == _MEM)
  4490. ret = mem_cgroup_resize_limit(memcg, val);
  4491. else if (type == _MEMSWAP)
  4492. ret = mem_cgroup_resize_memsw_limit(memcg, val);
  4493. else if (type == _KMEM)
  4494. ret = memcg_update_kmem_limit(cont, val);
  4495. else
  4496. return -EINVAL;
  4497. break;
  4498. case RES_SOFT_LIMIT:
  4499. ret = res_counter_memparse_write_strategy(buffer, &val);
  4500. if (ret)
  4501. break;
  4502. /*
  4503. * For memsw, soft limits are hard to implement in terms
  4504. * of semantics, for now, we support soft limits for
  4505. * control without swap
  4506. */
  4507. if (type == _MEM)
  4508. ret = res_counter_set_soft_limit(&memcg->res, val);
  4509. else
  4510. ret = -EINVAL;
  4511. break;
  4512. default:
  4513. ret = -EINVAL; /* should be BUG() ? */
  4514. break;
  4515. }
  4516. return ret;
  4517. }
  4518. static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
  4519. unsigned long long *mem_limit, unsigned long long *memsw_limit)
  4520. {
  4521. struct cgroup *cgroup;
  4522. unsigned long long min_limit, min_memsw_limit, tmp;
  4523. min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  4524. min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  4525. cgroup = memcg->css.cgroup;
  4526. if (!memcg->use_hierarchy)
  4527. goto out;
  4528. while (cgroup->parent) {
  4529. cgroup = cgroup->parent;
  4530. memcg = mem_cgroup_from_cont(cgroup);
  4531. if (!memcg->use_hierarchy)
  4532. break;
  4533. tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
  4534. min_limit = min(min_limit, tmp);
  4535. tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  4536. min_memsw_limit = min(min_memsw_limit, tmp);
  4537. }
  4538. out:
  4539. *mem_limit = min_limit;
  4540. *memsw_limit = min_memsw_limit;
  4541. }
  4542. static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  4543. {
  4544. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4545. int name;
  4546. enum res_type type;
  4547. type = MEMFILE_TYPE(event);
  4548. name = MEMFILE_ATTR(event);
  4549. if (!do_swap_account && type == _MEMSWAP)
  4550. return -EOPNOTSUPP;
  4551. switch (name) {
  4552. case RES_MAX_USAGE:
  4553. if (type == _MEM)
  4554. res_counter_reset_max(&memcg->res);
  4555. else if (type == _MEMSWAP)
  4556. res_counter_reset_max(&memcg->memsw);
  4557. else if (type == _KMEM)
  4558. res_counter_reset_max(&memcg->kmem);
  4559. else
  4560. return -EINVAL;
  4561. break;
  4562. case RES_FAILCNT:
  4563. if (type == _MEM)
  4564. res_counter_reset_failcnt(&memcg->res);
  4565. else if (type == _MEMSWAP)
  4566. res_counter_reset_failcnt(&memcg->memsw);
  4567. else if (type == _KMEM)
  4568. res_counter_reset_failcnt(&memcg->kmem);
  4569. else
  4570. return -EINVAL;
  4571. break;
  4572. }
  4573. return 0;
  4574. }
  4575. static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
  4576. struct cftype *cft)
  4577. {
  4578. return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
  4579. }
  4580. #ifdef CONFIG_MMU
  4581. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  4582. struct cftype *cft, u64 val)
  4583. {
  4584. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4585. if (val >= (1 << NR_MOVE_TYPE))
  4586. return -EINVAL;
  4587. /*
  4588. * No kind of locking is needed in here, because ->can_attach() will
  4589. * check this value once in the beginning of the process, and then carry
  4590. * on with stale data. This means that changes to this value will only
  4591. * affect task migrations starting after the change.
  4592. */
  4593. memcg->move_charge_at_immigrate = val;
  4594. return 0;
  4595. }
  4596. #else
  4597. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  4598. struct cftype *cft, u64 val)
  4599. {
  4600. return -ENOSYS;
  4601. }
  4602. #endif
  4603. #ifdef CONFIG_NUMA
  4604. static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
  4605. struct seq_file *m)
  4606. {
  4607. int nid;
  4608. unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
  4609. unsigned long node_nr;
  4610. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4611. total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
  4612. seq_printf(m, "total=%lu", total_nr);
  4613. for_each_node_state(nid, N_MEMORY) {
  4614. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
  4615. seq_printf(m, " N%d=%lu", nid, node_nr);
  4616. }
  4617. seq_putc(m, '\n');
  4618. file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
  4619. seq_printf(m, "file=%lu", file_nr);
  4620. for_each_node_state(nid, N_MEMORY) {
  4621. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  4622. LRU_ALL_FILE);
  4623. seq_printf(m, " N%d=%lu", nid, node_nr);
  4624. }
  4625. seq_putc(m, '\n');
  4626. anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
  4627. seq_printf(m, "anon=%lu", anon_nr);
  4628. for_each_node_state(nid, N_MEMORY) {
  4629. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  4630. LRU_ALL_ANON);
  4631. seq_printf(m, " N%d=%lu", nid, node_nr);
  4632. }
  4633. seq_putc(m, '\n');
  4634. unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
  4635. seq_printf(m, "unevictable=%lu", unevictable_nr);
  4636. for_each_node_state(nid, N_MEMORY) {
  4637. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  4638. BIT(LRU_UNEVICTABLE));
  4639. seq_printf(m, " N%d=%lu", nid, node_nr);
  4640. }
  4641. seq_putc(m, '\n');
  4642. return 0;
  4643. }
  4644. #endif /* CONFIG_NUMA */
  4645. static inline void mem_cgroup_lru_names_not_uptodate(void)
  4646. {
  4647. BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
  4648. }
  4649. static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
  4650. struct seq_file *m)
  4651. {
  4652. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4653. struct mem_cgroup *mi;
  4654. unsigned int i;
  4655. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  4656. if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
  4657. continue;
  4658. seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
  4659. mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
  4660. }
  4661. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
  4662. seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
  4663. mem_cgroup_read_events(memcg, i));
  4664. for (i = 0; i < NR_LRU_LISTS; i++)
  4665. seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
  4666. mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
  4667. /* Hierarchical information */
  4668. {
  4669. unsigned long long limit, memsw_limit;
  4670. memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
  4671. seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
  4672. if (do_swap_account)
  4673. seq_printf(m, "hierarchical_memsw_limit %llu\n",
  4674. memsw_limit);
  4675. }
  4676. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  4677. long long val = 0;
  4678. if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
  4679. continue;
  4680. for_each_mem_cgroup_tree(mi, memcg)
  4681. val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
  4682. seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
  4683. }
  4684. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
  4685. unsigned long long val = 0;
  4686. for_each_mem_cgroup_tree(mi, memcg)
  4687. val += mem_cgroup_read_events(mi, i);
  4688. seq_printf(m, "total_%s %llu\n",
  4689. mem_cgroup_events_names[i], val);
  4690. }
  4691. for (i = 0; i < NR_LRU_LISTS; i++) {
  4692. unsigned long long val = 0;
  4693. for_each_mem_cgroup_tree(mi, memcg)
  4694. val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
  4695. seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
  4696. }
  4697. #ifdef CONFIG_DEBUG_VM
  4698. {
  4699. int nid, zid;
  4700. struct mem_cgroup_per_zone *mz;
  4701. struct zone_reclaim_stat *rstat;
  4702. unsigned long recent_rotated[2] = {0, 0};
  4703. unsigned long recent_scanned[2] = {0, 0};
  4704. for_each_online_node(nid)
  4705. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  4706. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  4707. rstat = &mz->lruvec.reclaim_stat;
  4708. recent_rotated[0] += rstat->recent_rotated[0];
  4709. recent_rotated[1] += rstat->recent_rotated[1];
  4710. recent_scanned[0] += rstat->recent_scanned[0];
  4711. recent_scanned[1] += rstat->recent_scanned[1];
  4712. }
  4713. seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
  4714. seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
  4715. seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
  4716. seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
  4717. }
  4718. #endif
  4719. return 0;
  4720. }
  4721. static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
  4722. {
  4723. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4724. return mem_cgroup_swappiness(memcg);
  4725. }
  4726. static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
  4727. u64 val)
  4728. {
  4729. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4730. struct mem_cgroup *parent;
  4731. if (val > 100)
  4732. return -EINVAL;
  4733. if (cgrp->parent == NULL)
  4734. return -EINVAL;
  4735. parent = mem_cgroup_from_cont(cgrp->parent);
  4736. cgroup_lock();
  4737. /* If under hierarchy, only empty-root can set this value */
  4738. if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
  4739. cgroup_unlock();
  4740. return -EINVAL;
  4741. }
  4742. memcg->swappiness = val;
  4743. cgroup_unlock();
  4744. return 0;
  4745. }
  4746. static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
  4747. {
  4748. struct mem_cgroup_threshold_ary *t;
  4749. u64 usage;
  4750. int i;
  4751. rcu_read_lock();
  4752. if (!swap)
  4753. t = rcu_dereference(memcg->thresholds.primary);
  4754. else
  4755. t = rcu_dereference(memcg->memsw_thresholds.primary);
  4756. if (!t)
  4757. goto unlock;
  4758. usage = mem_cgroup_usage(memcg, swap);
  4759. /*
  4760. * current_threshold points to threshold just below or equal to usage.
  4761. * If it's not true, a threshold was crossed after last
  4762. * call of __mem_cgroup_threshold().
  4763. */
  4764. i = t->current_threshold;
  4765. /*
  4766. * Iterate backward over array of thresholds starting from
  4767. * current_threshold and check if a threshold is crossed.
  4768. * If none of thresholds below usage is crossed, we read
  4769. * only one element of the array here.
  4770. */
  4771. for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
  4772. eventfd_signal(t->entries[i].eventfd, 1);
  4773. /* i = current_threshold + 1 */
  4774. i++;
  4775. /*
  4776. * Iterate forward over array of thresholds starting from
  4777. * current_threshold+1 and check if a threshold is crossed.
  4778. * If none of thresholds above usage is crossed, we read
  4779. * only one element of the array here.
  4780. */
  4781. for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
  4782. eventfd_signal(t->entries[i].eventfd, 1);
  4783. /* Update current_threshold */
  4784. t->current_threshold = i - 1;
  4785. unlock:
  4786. rcu_read_unlock();
  4787. }
  4788. static void mem_cgroup_threshold(struct mem_cgroup *memcg)
  4789. {
  4790. while (memcg) {
  4791. __mem_cgroup_threshold(memcg, false);
  4792. if (do_swap_account)
  4793. __mem_cgroup_threshold(memcg, true);
  4794. memcg = parent_mem_cgroup(memcg);
  4795. }
  4796. }
  4797. static int compare_thresholds(const void *a, const void *b)
  4798. {
  4799. const struct mem_cgroup_threshold *_a = a;
  4800. const struct mem_cgroup_threshold *_b = b;
  4801. return _a->threshold - _b->threshold;
  4802. }
  4803. static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
  4804. {
  4805. struct mem_cgroup_eventfd_list *ev;
  4806. list_for_each_entry(ev, &memcg->oom_notify, list)
  4807. eventfd_signal(ev->eventfd, 1);
  4808. return 0;
  4809. }
  4810. static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
  4811. {
  4812. struct mem_cgroup *iter;
  4813. for_each_mem_cgroup_tree(iter, memcg)
  4814. mem_cgroup_oom_notify_cb(iter);
  4815. }
  4816. static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
  4817. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  4818. {
  4819. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4820. struct mem_cgroup_thresholds *thresholds;
  4821. struct mem_cgroup_threshold_ary *new;
  4822. enum res_type type = MEMFILE_TYPE(cft->private);
  4823. u64 threshold, usage;
  4824. int i, size, ret;
  4825. ret = res_counter_memparse_write_strategy(args, &threshold);
  4826. if (ret)
  4827. return ret;
  4828. mutex_lock(&memcg->thresholds_lock);
  4829. if (type == _MEM)
  4830. thresholds = &memcg->thresholds;
  4831. else if (type == _MEMSWAP)
  4832. thresholds = &memcg->memsw_thresholds;
  4833. else
  4834. BUG();
  4835. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  4836. /* Check if a threshold crossed before adding a new one */
  4837. if (thresholds->primary)
  4838. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  4839. size = thresholds->primary ? thresholds->primary->size + 1 : 1;
  4840. /* Allocate memory for new array of thresholds */
  4841. new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
  4842. GFP_KERNEL);
  4843. if (!new) {
  4844. ret = -ENOMEM;
  4845. goto unlock;
  4846. }
  4847. new->size = size;
  4848. /* Copy thresholds (if any) to new array */
  4849. if (thresholds->primary) {
  4850. memcpy(new->entries, thresholds->primary->entries, (size - 1) *
  4851. sizeof(struct mem_cgroup_threshold));
  4852. }
  4853. /* Add new threshold */
  4854. new->entries[size - 1].eventfd = eventfd;
  4855. new->entries[size - 1].threshold = threshold;
  4856. /* Sort thresholds. Registering of new threshold isn't time-critical */
  4857. sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
  4858. compare_thresholds, NULL);
  4859. /* Find current threshold */
  4860. new->current_threshold = -1;
  4861. for (i = 0; i < size; i++) {
  4862. if (new->entries[i].threshold <= usage) {
  4863. /*
  4864. * new->current_threshold will not be used until
  4865. * rcu_assign_pointer(), so it's safe to increment
  4866. * it here.
  4867. */
  4868. ++new->current_threshold;
  4869. } else
  4870. break;
  4871. }
  4872. /* Free old spare buffer and save old primary buffer as spare */
  4873. kfree(thresholds->spare);
  4874. thresholds->spare = thresholds->primary;
  4875. rcu_assign_pointer(thresholds->primary, new);
  4876. /* To be sure that nobody uses thresholds */
  4877. synchronize_rcu();
  4878. unlock:
  4879. mutex_unlock(&memcg->thresholds_lock);
  4880. return ret;
  4881. }
  4882. static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
  4883. struct cftype *cft, struct eventfd_ctx *eventfd)
  4884. {
  4885. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4886. struct mem_cgroup_thresholds *thresholds;
  4887. struct mem_cgroup_threshold_ary *new;
  4888. enum res_type type = MEMFILE_TYPE(cft->private);
  4889. u64 usage;
  4890. int i, j, size;
  4891. mutex_lock(&memcg->thresholds_lock);
  4892. if (type == _MEM)
  4893. thresholds = &memcg->thresholds;
  4894. else if (type == _MEMSWAP)
  4895. thresholds = &memcg->memsw_thresholds;
  4896. else
  4897. BUG();
  4898. if (!thresholds->primary)
  4899. goto unlock;
  4900. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  4901. /* Check if a threshold crossed before removing */
  4902. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  4903. /* Calculate new number of threshold */
  4904. size = 0;
  4905. for (i = 0; i < thresholds->primary->size; i++) {
  4906. if (thresholds->primary->entries[i].eventfd != eventfd)
  4907. size++;
  4908. }
  4909. new = thresholds->spare;
  4910. /* Set thresholds array to NULL if we don't have thresholds */
  4911. if (!size) {
  4912. kfree(new);
  4913. new = NULL;
  4914. goto swap_buffers;
  4915. }
  4916. new->size = size;
  4917. /* Copy thresholds and find current threshold */
  4918. new->current_threshold = -1;
  4919. for (i = 0, j = 0; i < thresholds->primary->size; i++) {
  4920. if (thresholds->primary->entries[i].eventfd == eventfd)
  4921. continue;
  4922. new->entries[j] = thresholds->primary->entries[i];
  4923. if (new->entries[j].threshold <= usage) {
  4924. /*
  4925. * new->current_threshold will not be used
  4926. * until rcu_assign_pointer(), so it's safe to increment
  4927. * it here.
  4928. */
  4929. ++new->current_threshold;
  4930. }
  4931. j++;
  4932. }
  4933. swap_buffers:
  4934. /* Swap primary and spare array */
  4935. thresholds->spare = thresholds->primary;
  4936. /* If all events are unregistered, free the spare array */
  4937. if (!new) {
  4938. kfree(thresholds->spare);
  4939. thresholds->spare = NULL;
  4940. }
  4941. rcu_assign_pointer(thresholds->primary, new);
  4942. /* To be sure that nobody uses thresholds */
  4943. synchronize_rcu();
  4944. unlock:
  4945. mutex_unlock(&memcg->thresholds_lock);
  4946. }
  4947. static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
  4948. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  4949. {
  4950. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4951. struct mem_cgroup_eventfd_list *event;
  4952. enum res_type type = MEMFILE_TYPE(cft->private);
  4953. BUG_ON(type != _OOM_TYPE);
  4954. event = kmalloc(sizeof(*event), GFP_KERNEL);
  4955. if (!event)
  4956. return -ENOMEM;
  4957. spin_lock(&memcg_oom_lock);
  4958. event->eventfd = eventfd;
  4959. list_add(&event->list, &memcg->oom_notify);
  4960. /* already in OOM ? */
  4961. if (atomic_read(&memcg->under_oom))
  4962. eventfd_signal(eventfd, 1);
  4963. spin_unlock(&memcg_oom_lock);
  4964. return 0;
  4965. }
  4966. static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
  4967. struct cftype *cft, struct eventfd_ctx *eventfd)
  4968. {
  4969. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4970. struct mem_cgroup_eventfd_list *ev, *tmp;
  4971. enum res_type type = MEMFILE_TYPE(cft->private);
  4972. BUG_ON(type != _OOM_TYPE);
  4973. spin_lock(&memcg_oom_lock);
  4974. list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
  4975. if (ev->eventfd == eventfd) {
  4976. list_del(&ev->list);
  4977. kfree(ev);
  4978. }
  4979. }
  4980. spin_unlock(&memcg_oom_lock);
  4981. }
  4982. static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
  4983. struct cftype *cft, struct cgroup_map_cb *cb)
  4984. {
  4985. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4986. cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
  4987. if (atomic_read(&memcg->under_oom))
  4988. cb->fill(cb, "under_oom", 1);
  4989. else
  4990. cb->fill(cb, "under_oom", 0);
  4991. return 0;
  4992. }
  4993. static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
  4994. struct cftype *cft, u64 val)
  4995. {
  4996. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4997. struct mem_cgroup *parent;
  4998. /* cannot set to root cgroup and only 0 and 1 are allowed */
  4999. if (!cgrp->parent || !((val == 0) || (val == 1)))
  5000. return -EINVAL;
  5001. parent = mem_cgroup_from_cont(cgrp->parent);
  5002. cgroup_lock();
  5003. /* oom-kill-disable is a flag for subhierarchy. */
  5004. if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
  5005. cgroup_unlock();
  5006. return -EINVAL;
  5007. }
  5008. memcg->oom_kill_disable = val;
  5009. if (!val)
  5010. memcg_oom_recover(memcg);
  5011. cgroup_unlock();
  5012. return 0;
  5013. }
  5014. #ifdef CONFIG_MEMCG_KMEM
  5015. static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  5016. {
  5017. int ret;
  5018. memcg->kmemcg_id = -1;
  5019. ret = memcg_propagate_kmem(memcg);
  5020. if (ret)
  5021. return ret;
  5022. return mem_cgroup_sockets_init(memcg, ss);
  5023. };
  5024. static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
  5025. {
  5026. mem_cgroup_sockets_destroy(memcg);
  5027. memcg_kmem_mark_dead(memcg);
  5028. if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
  5029. return;
  5030. /*
  5031. * Charges already down to 0, undo mem_cgroup_get() done in the charge
  5032. * path here, being careful not to race with memcg_uncharge_kmem: it is
  5033. * possible that the charges went down to 0 between mark_dead and the
  5034. * res_counter read, so in that case, we don't need the put
  5035. */
  5036. if (memcg_kmem_test_and_clear_dead(memcg))
  5037. mem_cgroup_put(memcg);
  5038. }
  5039. #else
  5040. static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  5041. {
  5042. return 0;
  5043. }
  5044. static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
  5045. {
  5046. }
  5047. #endif
  5048. static struct cftype mem_cgroup_files[] = {
  5049. {
  5050. .name = "usage_in_bytes",
  5051. .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
  5052. .read = mem_cgroup_read,
  5053. .register_event = mem_cgroup_usage_register_event,
  5054. .unregister_event = mem_cgroup_usage_unregister_event,
  5055. },
  5056. {
  5057. .name = "max_usage_in_bytes",
  5058. .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
  5059. .trigger = mem_cgroup_reset,
  5060. .read = mem_cgroup_read,
  5061. },
  5062. {
  5063. .name = "limit_in_bytes",
  5064. .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
  5065. .write_string = mem_cgroup_write,
  5066. .read = mem_cgroup_read,
  5067. },
  5068. {
  5069. .name = "soft_limit_in_bytes",
  5070. .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
  5071. .write_string = mem_cgroup_write,
  5072. .read = mem_cgroup_read,
  5073. },
  5074. {
  5075. .name = "failcnt",
  5076. .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
  5077. .trigger = mem_cgroup_reset,
  5078. .read = mem_cgroup_read,
  5079. },
  5080. {
  5081. .name = "stat",
  5082. .read_seq_string = memcg_stat_show,
  5083. },
  5084. {
  5085. .name = "force_empty",
  5086. .trigger = mem_cgroup_force_empty_write,
  5087. },
  5088. {
  5089. .name = "use_hierarchy",
  5090. .write_u64 = mem_cgroup_hierarchy_write,
  5091. .read_u64 = mem_cgroup_hierarchy_read,
  5092. },
  5093. {
  5094. .name = "swappiness",
  5095. .read_u64 = mem_cgroup_swappiness_read,
  5096. .write_u64 = mem_cgroup_swappiness_write,
  5097. },
  5098. {
  5099. .name = "move_charge_at_immigrate",
  5100. .read_u64 = mem_cgroup_move_charge_read,
  5101. .write_u64 = mem_cgroup_move_charge_write,
  5102. },
  5103. {
  5104. .name = "oom_control",
  5105. .read_map = mem_cgroup_oom_control_read,
  5106. .write_u64 = mem_cgroup_oom_control_write,
  5107. .register_event = mem_cgroup_oom_register_event,
  5108. .unregister_event = mem_cgroup_oom_unregister_event,
  5109. .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
  5110. },
  5111. #ifdef CONFIG_NUMA
  5112. {
  5113. .name = "numa_stat",
  5114. .read_seq_string = memcg_numa_stat_show,
  5115. },
  5116. #endif
  5117. #ifdef CONFIG_MEMCG_KMEM
  5118. {
  5119. .name = "kmem.limit_in_bytes",
  5120. .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
  5121. .write_string = mem_cgroup_write,
  5122. .read = mem_cgroup_read,
  5123. },
  5124. {
  5125. .name = "kmem.usage_in_bytes",
  5126. .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
  5127. .read = mem_cgroup_read,
  5128. },
  5129. {
  5130. .name = "kmem.failcnt",
  5131. .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
  5132. .trigger = mem_cgroup_reset,
  5133. .read = mem_cgroup_read,
  5134. },
  5135. {
  5136. .name = "kmem.max_usage_in_bytes",
  5137. .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
  5138. .trigger = mem_cgroup_reset,
  5139. .read = mem_cgroup_read,
  5140. },
  5141. #ifdef CONFIG_SLABINFO
  5142. {
  5143. .name = "kmem.slabinfo",
  5144. .read_seq_string = mem_cgroup_slabinfo_read,
  5145. },
  5146. #endif
  5147. #endif
  5148. { }, /* terminate */
  5149. };
  5150. #ifdef CONFIG_MEMCG_SWAP
  5151. static struct cftype memsw_cgroup_files[] = {
  5152. {
  5153. .name = "memsw.usage_in_bytes",
  5154. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
  5155. .read = mem_cgroup_read,
  5156. .register_event = mem_cgroup_usage_register_event,
  5157. .unregister_event = mem_cgroup_usage_unregister_event,
  5158. },
  5159. {
  5160. .name = "memsw.max_usage_in_bytes",
  5161. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
  5162. .trigger = mem_cgroup_reset,
  5163. .read = mem_cgroup_read,
  5164. },
  5165. {
  5166. .name = "memsw.limit_in_bytes",
  5167. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
  5168. .write_string = mem_cgroup_write,
  5169. .read = mem_cgroup_read,
  5170. },
  5171. {
  5172. .name = "memsw.failcnt",
  5173. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
  5174. .trigger = mem_cgroup_reset,
  5175. .read = mem_cgroup_read,
  5176. },
  5177. { }, /* terminate */
  5178. };
  5179. #endif
  5180. static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  5181. {
  5182. struct mem_cgroup_per_node *pn;
  5183. struct mem_cgroup_per_zone *mz;
  5184. int zone, tmp = node;
  5185. /*
  5186. * This routine is called against possible nodes.
  5187. * But it's BUG to call kmalloc() against offline node.
  5188. *
  5189. * TODO: this routine can waste much memory for nodes which will
  5190. * never be onlined. It's better to use memory hotplug callback
  5191. * function.
  5192. */
  5193. if (!node_state(node, N_NORMAL_MEMORY))
  5194. tmp = -1;
  5195. pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
  5196. if (!pn)
  5197. return 1;
  5198. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  5199. mz = &pn->zoneinfo[zone];
  5200. lruvec_init(&mz->lruvec);
  5201. mz->usage_in_excess = 0;
  5202. mz->on_tree = false;
  5203. mz->memcg = memcg;
  5204. }
  5205. memcg->info.nodeinfo[node] = pn;
  5206. return 0;
  5207. }
  5208. static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  5209. {
  5210. kfree(memcg->info.nodeinfo[node]);
  5211. }
  5212. static struct mem_cgroup *mem_cgroup_alloc(void)
  5213. {
  5214. struct mem_cgroup *memcg;
  5215. size_t size = memcg_size();
  5216. /* Can be very big if nr_node_ids is very big */
  5217. if (size < PAGE_SIZE)
  5218. memcg = kzalloc(size, GFP_KERNEL);
  5219. else
  5220. memcg = vzalloc(size);
  5221. if (!memcg)
  5222. return NULL;
  5223. memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
  5224. if (!memcg->stat)
  5225. goto out_free;
  5226. spin_lock_init(&memcg->pcp_counter_lock);
  5227. return memcg;
  5228. out_free:
  5229. if (size < PAGE_SIZE)
  5230. kfree(memcg);
  5231. else
  5232. vfree(memcg);
  5233. return NULL;
  5234. }
  5235. /*
  5236. * At destroying mem_cgroup, references from swap_cgroup can remain.
  5237. * (scanning all at force_empty is too costly...)
  5238. *
  5239. * Instead of clearing all references at force_empty, we remember
  5240. * the number of reference from swap_cgroup and free mem_cgroup when
  5241. * it goes down to 0.
  5242. *
  5243. * Removal of cgroup itself succeeds regardless of refs from swap.
  5244. */
  5245. static void __mem_cgroup_free(struct mem_cgroup *memcg)
  5246. {
  5247. int node;
  5248. size_t size = memcg_size();
  5249. mem_cgroup_remove_from_trees(memcg);
  5250. free_css_id(&mem_cgroup_subsys, &memcg->css);
  5251. for_each_node(node)
  5252. free_mem_cgroup_per_zone_info(memcg, node);
  5253. free_percpu(memcg->stat);
  5254. /*
  5255. * We need to make sure that (at least for now), the jump label
  5256. * destruction code runs outside of the cgroup lock. This is because
  5257. * get_online_cpus(), which is called from the static_branch update,
  5258. * can't be called inside the cgroup_lock. cpusets are the ones
  5259. * enforcing this dependency, so if they ever change, we might as well.
  5260. *
  5261. * schedule_work() will guarantee this happens. Be careful if you need
  5262. * to move this code around, and make sure it is outside
  5263. * the cgroup_lock.
  5264. */
  5265. disarm_static_keys(memcg);
  5266. if (size < PAGE_SIZE)
  5267. kfree(memcg);
  5268. else
  5269. vfree(memcg);
  5270. }
  5271. /*
  5272. * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
  5273. * but in process context. The work_freeing structure is overlaid
  5274. * on the rcu_freeing structure, which itself is overlaid on memsw.
  5275. */
  5276. static void free_work(struct work_struct *work)
  5277. {
  5278. struct mem_cgroup *memcg;
  5279. memcg = container_of(work, struct mem_cgroup, work_freeing);
  5280. __mem_cgroup_free(memcg);
  5281. }
  5282. static void free_rcu(struct rcu_head *rcu_head)
  5283. {
  5284. struct mem_cgroup *memcg;
  5285. memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
  5286. INIT_WORK(&memcg->work_freeing, free_work);
  5287. schedule_work(&memcg->work_freeing);
  5288. }
  5289. static void mem_cgroup_get(struct mem_cgroup *memcg)
  5290. {
  5291. atomic_inc(&memcg->refcnt);
  5292. }
  5293. static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
  5294. {
  5295. if (atomic_sub_and_test(count, &memcg->refcnt)) {
  5296. struct mem_cgroup *parent = parent_mem_cgroup(memcg);
  5297. call_rcu(&memcg->rcu_freeing, free_rcu);
  5298. if (parent)
  5299. mem_cgroup_put(parent);
  5300. }
  5301. }
  5302. static void mem_cgroup_put(struct mem_cgroup *memcg)
  5303. {
  5304. __mem_cgroup_put(memcg, 1);
  5305. }
  5306. /*
  5307. * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  5308. */
  5309. struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  5310. {
  5311. if (!memcg->res.parent)
  5312. return NULL;
  5313. return mem_cgroup_from_res_counter(memcg->res.parent, res);
  5314. }
  5315. EXPORT_SYMBOL(parent_mem_cgroup);
  5316. static int mem_cgroup_soft_limit_tree_init(void)
  5317. {
  5318. struct mem_cgroup_tree_per_node *rtpn;
  5319. struct mem_cgroup_tree_per_zone *rtpz;
  5320. int tmp, node, zone;
  5321. for_each_node(node) {
  5322. tmp = node;
  5323. if (!node_state(node, N_NORMAL_MEMORY))
  5324. tmp = -1;
  5325. rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
  5326. if (!rtpn)
  5327. goto err_cleanup;
  5328. soft_limit_tree.rb_tree_per_node[node] = rtpn;
  5329. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  5330. rtpz = &rtpn->rb_tree_per_zone[zone];
  5331. rtpz->rb_root = RB_ROOT;
  5332. spin_lock_init(&rtpz->lock);
  5333. }
  5334. }
  5335. return 0;
  5336. err_cleanup:
  5337. for_each_node(node) {
  5338. if (!soft_limit_tree.rb_tree_per_node[node])
  5339. break;
  5340. kfree(soft_limit_tree.rb_tree_per_node[node]);
  5341. soft_limit_tree.rb_tree_per_node[node] = NULL;
  5342. }
  5343. return 1;
  5344. }
  5345. static struct cgroup_subsys_state * __ref
  5346. mem_cgroup_css_alloc(struct cgroup *cont)
  5347. {
  5348. struct mem_cgroup *memcg;
  5349. long error = -ENOMEM;
  5350. int node;
  5351. memcg = mem_cgroup_alloc();
  5352. if (!memcg)
  5353. return ERR_PTR(error);
  5354. for_each_node(node)
  5355. if (alloc_mem_cgroup_per_zone_info(memcg, node))
  5356. goto free_out;
  5357. /* root ? */
  5358. if (cont->parent == NULL) {
  5359. int cpu;
  5360. if (mem_cgroup_soft_limit_tree_init())
  5361. goto free_out;
  5362. root_mem_cgroup = memcg;
  5363. for_each_possible_cpu(cpu) {
  5364. struct memcg_stock_pcp *stock =
  5365. &per_cpu(memcg_stock, cpu);
  5366. INIT_WORK(&stock->work, drain_local_stock);
  5367. }
  5368. res_counter_init(&memcg->res, NULL);
  5369. res_counter_init(&memcg->memsw, NULL);
  5370. res_counter_init(&memcg->kmem, NULL);
  5371. }
  5372. memcg->last_scanned_node = MAX_NUMNODES;
  5373. INIT_LIST_HEAD(&memcg->oom_notify);
  5374. atomic_set(&memcg->refcnt, 1);
  5375. memcg->move_charge_at_immigrate = 0;
  5376. mutex_init(&memcg->thresholds_lock);
  5377. spin_lock_init(&memcg->move_lock);
  5378. return &memcg->css;
  5379. free_out:
  5380. __mem_cgroup_free(memcg);
  5381. return ERR_PTR(error);
  5382. }
  5383. static int
  5384. mem_cgroup_css_online(struct cgroup *cont)
  5385. {
  5386. struct mem_cgroup *memcg, *parent;
  5387. int error = 0;
  5388. if (!cont->parent)
  5389. return 0;
  5390. memcg = mem_cgroup_from_cont(cont);
  5391. parent = mem_cgroup_from_cont(cont->parent);
  5392. memcg->use_hierarchy = parent->use_hierarchy;
  5393. memcg->oom_kill_disable = parent->oom_kill_disable;
  5394. memcg->swappiness = mem_cgroup_swappiness(parent);
  5395. if (parent->use_hierarchy) {
  5396. res_counter_init(&memcg->res, &parent->res);
  5397. res_counter_init(&memcg->memsw, &parent->memsw);
  5398. res_counter_init(&memcg->kmem, &parent->kmem);
  5399. /*
  5400. * We increment refcnt of the parent to ensure that we can
  5401. * safely access it on res_counter_charge/uncharge.
  5402. * This refcnt will be decremented when freeing this
  5403. * mem_cgroup(see mem_cgroup_put).
  5404. */
  5405. mem_cgroup_get(parent);
  5406. } else {
  5407. res_counter_init(&memcg->res, NULL);
  5408. res_counter_init(&memcg->memsw, NULL);
  5409. res_counter_init(&memcg->kmem, NULL);
  5410. /*
  5411. * Deeper hierachy with use_hierarchy == false doesn't make
  5412. * much sense so let cgroup subsystem know about this
  5413. * unfortunate state in our controller.
  5414. */
  5415. if (parent != root_mem_cgroup)
  5416. mem_cgroup_subsys.broken_hierarchy = true;
  5417. }
  5418. error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
  5419. if (error) {
  5420. /*
  5421. * We call put now because our (and parent's) refcnts
  5422. * are already in place. mem_cgroup_put() will internally
  5423. * call __mem_cgroup_free, so return directly
  5424. */
  5425. mem_cgroup_put(memcg);
  5426. }
  5427. return error;
  5428. }
  5429. static void mem_cgroup_css_offline(struct cgroup *cont)
  5430. {
  5431. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  5432. mem_cgroup_reparent_charges(memcg);
  5433. mem_cgroup_destroy_all_caches(memcg);
  5434. }
  5435. static void mem_cgroup_css_free(struct cgroup *cont)
  5436. {
  5437. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  5438. kmem_cgroup_destroy(memcg);
  5439. mem_cgroup_put(memcg);
  5440. }
  5441. #ifdef CONFIG_MMU
  5442. /* Handlers for move charge at task migration. */
  5443. #define PRECHARGE_COUNT_AT_ONCE 256
  5444. static int mem_cgroup_do_precharge(unsigned long count)
  5445. {
  5446. int ret = 0;
  5447. int batch_count = PRECHARGE_COUNT_AT_ONCE;
  5448. struct mem_cgroup *memcg = mc.to;
  5449. if (mem_cgroup_is_root(memcg)) {
  5450. mc.precharge += count;
  5451. /* we don't need css_get for root */
  5452. return ret;
  5453. }
  5454. /* try to charge at once */
  5455. if (count > 1) {
  5456. struct res_counter *dummy;
  5457. /*
  5458. * "memcg" cannot be under rmdir() because we've already checked
  5459. * by cgroup_lock_live_cgroup() that it is not removed and we
  5460. * are still under the same cgroup_mutex. So we can postpone
  5461. * css_get().
  5462. */
  5463. if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
  5464. goto one_by_one;
  5465. if (do_swap_account && res_counter_charge(&memcg->memsw,
  5466. PAGE_SIZE * count, &dummy)) {
  5467. res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
  5468. goto one_by_one;
  5469. }
  5470. mc.precharge += count;
  5471. return ret;
  5472. }
  5473. one_by_one:
  5474. /* fall back to one by one charge */
  5475. while (count--) {
  5476. if (signal_pending(current)) {
  5477. ret = -EINTR;
  5478. break;
  5479. }
  5480. if (!batch_count--) {
  5481. batch_count = PRECHARGE_COUNT_AT_ONCE;
  5482. cond_resched();
  5483. }
  5484. ret = __mem_cgroup_try_charge(NULL,
  5485. GFP_KERNEL, 1, &memcg, false);
  5486. if (ret)
  5487. /* mem_cgroup_clear_mc() will do uncharge later */
  5488. return ret;
  5489. mc.precharge++;
  5490. }
  5491. return ret;
  5492. }
  5493. /**
  5494. * get_mctgt_type - get target type of moving charge
  5495. * @vma: the vma the pte to be checked belongs
  5496. * @addr: the address corresponding to the pte to be checked
  5497. * @ptent: the pte to be checked
  5498. * @target: the pointer the target page or swap ent will be stored(can be NULL)
  5499. *
  5500. * Returns
  5501. * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
  5502. * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
  5503. * move charge. if @target is not NULL, the page is stored in target->page
  5504. * with extra refcnt got(Callers should handle it).
  5505. * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
  5506. * target for charge migration. if @target is not NULL, the entry is stored
  5507. * in target->ent.
  5508. *
  5509. * Called with pte lock held.
  5510. */
  5511. union mc_target {
  5512. struct page *page;
  5513. swp_entry_t ent;
  5514. };
  5515. enum mc_target_type {
  5516. MC_TARGET_NONE = 0,
  5517. MC_TARGET_PAGE,
  5518. MC_TARGET_SWAP,
  5519. };
  5520. static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
  5521. unsigned long addr, pte_t ptent)
  5522. {
  5523. struct page *page = vm_normal_page(vma, addr, ptent);
  5524. if (!page || !page_mapped(page))
  5525. return NULL;
  5526. if (PageAnon(page)) {
  5527. /* we don't move shared anon */
  5528. if (!move_anon())
  5529. return NULL;
  5530. } else if (!move_file())
  5531. /* we ignore mapcount for file pages */
  5532. return NULL;
  5533. if (!get_page_unless_zero(page))
  5534. return NULL;
  5535. return page;
  5536. }
  5537. #ifdef CONFIG_SWAP
  5538. static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  5539. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  5540. {
  5541. struct page *page = NULL;
  5542. swp_entry_t ent = pte_to_swp_entry(ptent);
  5543. if (!move_anon() || non_swap_entry(ent))
  5544. return NULL;
  5545. /*
  5546. * Because lookup_swap_cache() updates some statistics counter,
  5547. * we call find_get_page() with swapper_space directly.
  5548. */
  5549. page = find_get_page(swap_address_space(ent), ent.val);
  5550. if (do_swap_account)
  5551. entry->val = ent.val;
  5552. return page;
  5553. }
  5554. #else
  5555. static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  5556. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  5557. {
  5558. return NULL;
  5559. }
  5560. #endif
  5561. static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
  5562. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  5563. {
  5564. struct page *page = NULL;
  5565. struct address_space *mapping;
  5566. pgoff_t pgoff;
  5567. if (!vma->vm_file) /* anonymous vma */
  5568. return NULL;
  5569. if (!move_file())
  5570. return NULL;
  5571. mapping = vma->vm_file->f_mapping;
  5572. if (pte_none(ptent))
  5573. pgoff = linear_page_index(vma, addr);
  5574. else /* pte_file(ptent) is true */
  5575. pgoff = pte_to_pgoff(ptent);
  5576. /* page is moved even if it's not RSS of this task(page-faulted). */
  5577. page = find_get_page(mapping, pgoff);
  5578. #ifdef CONFIG_SWAP
  5579. /* shmem/tmpfs may report page out on swap: account for that too. */
  5580. if (radix_tree_exceptional_entry(page)) {
  5581. swp_entry_t swap = radix_to_swp_entry(page);
  5582. if (do_swap_account)
  5583. *entry = swap;
  5584. page = find_get_page(swap_address_space(swap), swap.val);
  5585. }
  5586. #endif
  5587. return page;
  5588. }
  5589. static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
  5590. unsigned long addr, pte_t ptent, union mc_target *target)
  5591. {
  5592. struct page *page = NULL;
  5593. struct page_cgroup *pc;
  5594. enum mc_target_type ret = MC_TARGET_NONE;
  5595. swp_entry_t ent = { .val = 0 };
  5596. if (pte_present(ptent))
  5597. page = mc_handle_present_pte(vma, addr, ptent);
  5598. else if (is_swap_pte(ptent))
  5599. page = mc_handle_swap_pte(vma, addr, ptent, &ent);
  5600. else if (pte_none(ptent) || pte_file(ptent))
  5601. page = mc_handle_file_pte(vma, addr, ptent, &ent);
  5602. if (!page && !ent.val)
  5603. return ret;
  5604. if (page) {
  5605. pc = lookup_page_cgroup(page);
  5606. /*
  5607. * Do only loose check w/o page_cgroup lock.
  5608. * mem_cgroup_move_account() checks the pc is valid or not under
  5609. * the lock.
  5610. */
  5611. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  5612. ret = MC_TARGET_PAGE;
  5613. if (target)
  5614. target->page = page;
  5615. }
  5616. if (!ret || !target)
  5617. put_page(page);
  5618. }
  5619. /* There is a swap entry and a page doesn't exist or isn't charged */
  5620. if (ent.val && !ret &&
  5621. css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
  5622. ret = MC_TARGET_SWAP;
  5623. if (target)
  5624. target->ent = ent;
  5625. }
  5626. return ret;
  5627. }
  5628. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  5629. /*
  5630. * We don't consider swapping or file mapped pages because THP does not
  5631. * support them for now.
  5632. * Caller should make sure that pmd_trans_huge(pmd) is true.
  5633. */
  5634. static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
  5635. unsigned long addr, pmd_t pmd, union mc_target *target)
  5636. {
  5637. struct page *page = NULL;
  5638. struct page_cgroup *pc;
  5639. enum mc_target_type ret = MC_TARGET_NONE;
  5640. page = pmd_page(pmd);
  5641. VM_BUG_ON(!page || !PageHead(page));
  5642. if (!move_anon())
  5643. return ret;
  5644. pc = lookup_page_cgroup(page);
  5645. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  5646. ret = MC_TARGET_PAGE;
  5647. if (target) {
  5648. get_page(page);
  5649. target->page = page;
  5650. }
  5651. }
  5652. return ret;
  5653. }
  5654. #else
  5655. static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
  5656. unsigned long addr, pmd_t pmd, union mc_target *target)
  5657. {
  5658. return MC_TARGET_NONE;
  5659. }
  5660. #endif
  5661. static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
  5662. unsigned long addr, unsigned long end,
  5663. struct mm_walk *walk)
  5664. {
  5665. struct vm_area_struct *vma = walk->private;
  5666. pte_t *pte;
  5667. spinlock_t *ptl;
  5668. if (pmd_trans_huge_lock(pmd, vma) == 1) {
  5669. if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
  5670. mc.precharge += HPAGE_PMD_NR;
  5671. spin_unlock(&vma->vm_mm->page_table_lock);
  5672. return 0;
  5673. }
  5674. if (pmd_trans_unstable(pmd))
  5675. return 0;
  5676. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  5677. for (; addr != end; pte++, addr += PAGE_SIZE)
  5678. if (get_mctgt_type(vma, addr, *pte, NULL))
  5679. mc.precharge++; /* increment precharge temporarily */
  5680. pte_unmap_unlock(pte - 1, ptl);
  5681. cond_resched();
  5682. return 0;
  5683. }
  5684. static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
  5685. {
  5686. unsigned long precharge;
  5687. struct vm_area_struct *vma;
  5688. down_read(&mm->mmap_sem);
  5689. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  5690. struct mm_walk mem_cgroup_count_precharge_walk = {
  5691. .pmd_entry = mem_cgroup_count_precharge_pte_range,
  5692. .mm = mm,
  5693. .private = vma,
  5694. };
  5695. if (is_vm_hugetlb_page(vma))
  5696. continue;
  5697. walk_page_range(vma->vm_start, vma->vm_end,
  5698. &mem_cgroup_count_precharge_walk);
  5699. }
  5700. up_read(&mm->mmap_sem);
  5701. precharge = mc.precharge;
  5702. mc.precharge = 0;
  5703. return precharge;
  5704. }
  5705. static int mem_cgroup_precharge_mc(struct mm_struct *mm)
  5706. {
  5707. unsigned long precharge = mem_cgroup_count_precharge(mm);
  5708. VM_BUG_ON(mc.moving_task);
  5709. mc.moving_task = current;
  5710. return mem_cgroup_do_precharge(precharge);
  5711. }
  5712. /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
  5713. static void __mem_cgroup_clear_mc(void)
  5714. {
  5715. struct mem_cgroup *from = mc.from;
  5716. struct mem_cgroup *to = mc.to;
  5717. /* we must uncharge all the leftover precharges from mc.to */
  5718. if (mc.precharge) {
  5719. __mem_cgroup_cancel_charge(mc.to, mc.precharge);
  5720. mc.precharge = 0;
  5721. }
  5722. /*
  5723. * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
  5724. * we must uncharge here.
  5725. */
  5726. if (mc.moved_charge) {
  5727. __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
  5728. mc.moved_charge = 0;
  5729. }
  5730. /* we must fixup refcnts and charges */
  5731. if (mc.moved_swap) {
  5732. /* uncharge swap account from the old cgroup */
  5733. if (!mem_cgroup_is_root(mc.from))
  5734. res_counter_uncharge(&mc.from->memsw,
  5735. PAGE_SIZE * mc.moved_swap);
  5736. __mem_cgroup_put(mc.from, mc.moved_swap);
  5737. if (!mem_cgroup_is_root(mc.to)) {
  5738. /*
  5739. * we charged both to->res and to->memsw, so we should
  5740. * uncharge to->res.
  5741. */
  5742. res_counter_uncharge(&mc.to->res,
  5743. PAGE_SIZE * mc.moved_swap);
  5744. }
  5745. /* we've already done mem_cgroup_get(mc.to) */
  5746. mc.moved_swap = 0;
  5747. }
  5748. memcg_oom_recover(from);
  5749. memcg_oom_recover(to);
  5750. wake_up_all(&mc.waitq);
  5751. }
  5752. static void mem_cgroup_clear_mc(void)
  5753. {
  5754. struct mem_cgroup *from = mc.from;
  5755. /*
  5756. * we must clear moving_task before waking up waiters at the end of
  5757. * task migration.
  5758. */
  5759. mc.moving_task = NULL;
  5760. __mem_cgroup_clear_mc();
  5761. spin_lock(&mc.lock);
  5762. mc.from = NULL;
  5763. mc.to = NULL;
  5764. spin_unlock(&mc.lock);
  5765. mem_cgroup_end_move(from);
  5766. }
  5767. static int mem_cgroup_can_attach(struct cgroup *cgroup,
  5768. struct cgroup_taskset *tset)
  5769. {
  5770. struct task_struct *p = cgroup_taskset_first(tset);
  5771. int ret = 0;
  5772. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
  5773. unsigned long move_charge_at_immigrate;
  5774. /*
  5775. * We are now commited to this value whatever it is. Changes in this
  5776. * tunable will only affect upcoming migrations, not the current one.
  5777. * So we need to save it, and keep it going.
  5778. */
  5779. move_charge_at_immigrate = memcg->move_charge_at_immigrate;
  5780. if (move_charge_at_immigrate) {
  5781. struct mm_struct *mm;
  5782. struct mem_cgroup *from = mem_cgroup_from_task(p);
  5783. VM_BUG_ON(from == memcg);
  5784. mm = get_task_mm(p);
  5785. if (!mm)
  5786. return 0;
  5787. /* We move charges only when we move a owner of the mm */
  5788. if (mm->owner == p) {
  5789. VM_BUG_ON(mc.from);
  5790. VM_BUG_ON(mc.to);
  5791. VM_BUG_ON(mc.precharge);
  5792. VM_BUG_ON(mc.moved_charge);
  5793. VM_BUG_ON(mc.moved_swap);
  5794. mem_cgroup_start_move(from);
  5795. spin_lock(&mc.lock);
  5796. mc.from = from;
  5797. mc.to = memcg;
  5798. mc.immigrate_flags = move_charge_at_immigrate;
  5799. spin_unlock(&mc.lock);
  5800. /* We set mc.moving_task later */
  5801. ret = mem_cgroup_precharge_mc(mm);
  5802. if (ret)
  5803. mem_cgroup_clear_mc();
  5804. }
  5805. mmput(mm);
  5806. }
  5807. return ret;
  5808. }
  5809. static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
  5810. struct cgroup_taskset *tset)
  5811. {
  5812. mem_cgroup_clear_mc();
  5813. }
  5814. static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
  5815. unsigned long addr, unsigned long end,
  5816. struct mm_walk *walk)
  5817. {
  5818. int ret = 0;
  5819. struct vm_area_struct *vma = walk->private;
  5820. pte_t *pte;
  5821. spinlock_t *ptl;
  5822. enum mc_target_type target_type;
  5823. union mc_target target;
  5824. struct page *page;
  5825. struct page_cgroup *pc;
  5826. /*
  5827. * We don't take compound_lock() here but no race with splitting thp
  5828. * happens because:
  5829. * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
  5830. * under splitting, which means there's no concurrent thp split,
  5831. * - if another thread runs into split_huge_page() just after we
  5832. * entered this if-block, the thread must wait for page table lock
  5833. * to be unlocked in __split_huge_page_splitting(), where the main
  5834. * part of thp split is not executed yet.
  5835. */
  5836. if (pmd_trans_huge_lock(pmd, vma) == 1) {
  5837. if (mc.precharge < HPAGE_PMD_NR) {
  5838. spin_unlock(&vma->vm_mm->page_table_lock);
  5839. return 0;
  5840. }
  5841. target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
  5842. if (target_type == MC_TARGET_PAGE) {
  5843. page = target.page;
  5844. if (!isolate_lru_page(page)) {
  5845. pc = lookup_page_cgroup(page);
  5846. if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
  5847. pc, mc.from, mc.to)) {
  5848. mc.precharge -= HPAGE_PMD_NR;
  5849. mc.moved_charge += HPAGE_PMD_NR;
  5850. }
  5851. putback_lru_page(page);
  5852. }
  5853. put_page(page);
  5854. }
  5855. spin_unlock(&vma->vm_mm->page_table_lock);
  5856. return 0;
  5857. }
  5858. if (pmd_trans_unstable(pmd))
  5859. return 0;
  5860. retry:
  5861. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  5862. for (; addr != end; addr += PAGE_SIZE) {
  5863. pte_t ptent = *(pte++);
  5864. swp_entry_t ent;
  5865. if (!mc.precharge)
  5866. break;
  5867. switch (get_mctgt_type(vma, addr, ptent, &target)) {
  5868. case MC_TARGET_PAGE:
  5869. page = target.page;
  5870. if (isolate_lru_page(page))
  5871. goto put;
  5872. pc = lookup_page_cgroup(page);
  5873. if (!mem_cgroup_move_account(page, 1, pc,
  5874. mc.from, mc.to)) {
  5875. mc.precharge--;
  5876. /* we uncharge from mc.from later. */
  5877. mc.moved_charge++;
  5878. }
  5879. putback_lru_page(page);
  5880. put: /* get_mctgt_type() gets the page */
  5881. put_page(page);
  5882. break;
  5883. case MC_TARGET_SWAP:
  5884. ent = target.ent;
  5885. if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
  5886. mc.precharge--;
  5887. /* we fixup refcnts and charges later. */
  5888. mc.moved_swap++;
  5889. }
  5890. break;
  5891. default:
  5892. break;
  5893. }
  5894. }
  5895. pte_unmap_unlock(pte - 1, ptl);
  5896. cond_resched();
  5897. if (addr != end) {
  5898. /*
  5899. * We have consumed all precharges we got in can_attach().
  5900. * We try charge one by one, but don't do any additional
  5901. * charges to mc.to if we have failed in charge once in attach()
  5902. * phase.
  5903. */
  5904. ret = mem_cgroup_do_precharge(1);
  5905. if (!ret)
  5906. goto retry;
  5907. }
  5908. return ret;
  5909. }
  5910. static void mem_cgroup_move_charge(struct mm_struct *mm)
  5911. {
  5912. struct vm_area_struct *vma;
  5913. lru_add_drain_all();
  5914. retry:
  5915. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  5916. /*
  5917. * Someone who are holding the mmap_sem might be waiting in
  5918. * waitq. So we cancel all extra charges, wake up all waiters,
  5919. * and retry. Because we cancel precharges, we might not be able
  5920. * to move enough charges, but moving charge is a best-effort
  5921. * feature anyway, so it wouldn't be a big problem.
  5922. */
  5923. __mem_cgroup_clear_mc();
  5924. cond_resched();
  5925. goto retry;
  5926. }
  5927. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  5928. int ret;
  5929. struct mm_walk mem_cgroup_move_charge_walk = {
  5930. .pmd_entry = mem_cgroup_move_charge_pte_range,
  5931. .mm = mm,
  5932. .private = vma,
  5933. };
  5934. if (is_vm_hugetlb_page(vma))
  5935. continue;
  5936. ret = walk_page_range(vma->vm_start, vma->vm_end,
  5937. &mem_cgroup_move_charge_walk);
  5938. if (ret)
  5939. /*
  5940. * means we have consumed all precharges and failed in
  5941. * doing additional charge. Just abandon here.
  5942. */
  5943. break;
  5944. }
  5945. up_read(&mm->mmap_sem);
  5946. }
  5947. static void mem_cgroup_move_task(struct cgroup *cont,
  5948. struct cgroup_taskset *tset)
  5949. {
  5950. struct task_struct *p = cgroup_taskset_first(tset);
  5951. struct mm_struct *mm = get_task_mm(p);
  5952. if (mm) {
  5953. if (mc.to)
  5954. mem_cgroup_move_charge(mm);
  5955. mmput(mm);
  5956. }
  5957. if (mc.to)
  5958. mem_cgroup_clear_mc();
  5959. }
  5960. #else /* !CONFIG_MMU */
  5961. static int mem_cgroup_can_attach(struct cgroup *cgroup,
  5962. struct cgroup_taskset *tset)
  5963. {
  5964. return 0;
  5965. }
  5966. static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
  5967. struct cgroup_taskset *tset)
  5968. {
  5969. }
  5970. static void mem_cgroup_move_task(struct cgroup *cont,
  5971. struct cgroup_taskset *tset)
  5972. {
  5973. }
  5974. #endif
  5975. struct cgroup_subsys mem_cgroup_subsys = {
  5976. .name = "memory",
  5977. .subsys_id = mem_cgroup_subsys_id,
  5978. .css_alloc = mem_cgroup_css_alloc,
  5979. .css_online = mem_cgroup_css_online,
  5980. .css_offline = mem_cgroup_css_offline,
  5981. .css_free = mem_cgroup_css_free,
  5982. .can_attach = mem_cgroup_can_attach,
  5983. .cancel_attach = mem_cgroup_cancel_attach,
  5984. .attach = mem_cgroup_move_task,
  5985. .base_cftypes = mem_cgroup_files,
  5986. .early_init = 0,
  5987. .use_id = 1,
  5988. };
  5989. #ifdef CONFIG_MEMCG_SWAP
  5990. static int __init enable_swap_account(char *s)
  5991. {
  5992. /* consider enabled if no parameter or 1 is given */
  5993. if (!strcmp(s, "1"))
  5994. really_do_swap_account = 1;
  5995. else if (!strcmp(s, "0"))
  5996. really_do_swap_account = 0;
  5997. return 1;
  5998. }
  5999. __setup("swapaccount=", enable_swap_account);
  6000. static void __init memsw_file_init(void)
  6001. {
  6002. WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
  6003. }
  6004. static void __init enable_swap_cgroup(void)
  6005. {
  6006. if (!mem_cgroup_disabled() && really_do_swap_account) {
  6007. do_swap_account = 1;
  6008. memsw_file_init();
  6009. }
  6010. }
  6011. #else
  6012. static void __init enable_swap_cgroup(void)
  6013. {
  6014. }
  6015. #endif
  6016. /*
  6017. * The rest of init is performed during ->css_alloc() for root css which
  6018. * happens before initcalls. hotcpu_notifier() can't be done together as
  6019. * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
  6020. * dependency. Do it from a subsys_initcall().
  6021. */
  6022. static int __init mem_cgroup_init(void)
  6023. {
  6024. hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
  6025. enable_swap_cgroup();
  6026. return 0;
  6027. }
  6028. subsys_initcall(mem_cgroup_init);