memcontrol.c 185 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997
  1. /* memcontrol.c - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * Memory thresholds
  10. * Copyright (C) 2009 Nokia Corporation
  11. * Author: Kirill A. Shutemov
  12. *
  13. * Kernel Memory Controller
  14. * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15. * Authors: Glauber Costa and Suleiman Souhlal
  16. *
  17. * This program is free software; you can redistribute it and/or modify
  18. * it under the terms of the GNU General Public License as published by
  19. * the Free Software Foundation; either version 2 of the License, or
  20. * (at your option) any later version.
  21. *
  22. * This program is distributed in the hope that it will be useful,
  23. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  25. * GNU General Public License for more details.
  26. */
  27. #include <linux/res_counter.h>
  28. #include <linux/memcontrol.h>
  29. #include <linux/cgroup.h>
  30. #include <linux/mm.h>
  31. #include <linux/hugetlb.h>
  32. #include <linux/pagemap.h>
  33. #include <linux/smp.h>
  34. #include <linux/page-flags.h>
  35. #include <linux/backing-dev.h>
  36. #include <linux/bit_spinlock.h>
  37. #include <linux/rcupdate.h>
  38. #include <linux/limits.h>
  39. #include <linux/export.h>
  40. #include <linux/mutex.h>
  41. #include <linux/rbtree.h>
  42. #include <linux/slab.h>
  43. #include <linux/swap.h>
  44. #include <linux/swapops.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/eventfd.h>
  47. #include <linux/sort.h>
  48. #include <linux/fs.h>
  49. #include <linux/seq_file.h>
  50. #include <linux/vmalloc.h>
  51. #include <linux/vmpressure.h>
  52. #include <linux/mm_inline.h>
  53. #include <linux/page_cgroup.h>
  54. #include <linux/cpu.h>
  55. #include <linux/oom.h>
  56. #include "internal.h"
  57. #include <net/sock.h>
  58. #include <net/ip.h>
  59. #include <net/tcp_memcontrol.h>
  60. #include <asm/uaccess.h>
  61. #include <trace/events/vmscan.h>
  62. struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  63. EXPORT_SYMBOL(mem_cgroup_subsys);
  64. #define MEM_CGROUP_RECLAIM_RETRIES 5
  65. static struct mem_cgroup *root_mem_cgroup __read_mostly;
  66. #ifdef CONFIG_MEMCG_SWAP
  67. /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  68. int do_swap_account __read_mostly;
  69. /* for remember boot option*/
  70. #ifdef CONFIG_MEMCG_SWAP_ENABLED
  71. static int really_do_swap_account __initdata = 1;
  72. #else
  73. static int really_do_swap_account __initdata = 0;
  74. #endif
  75. #else
  76. #define do_swap_account 0
  77. #endif
  78. /*
  79. * Statistics for memory cgroup.
  80. */
  81. enum mem_cgroup_stat_index {
  82. /*
  83. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  84. */
  85. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  86. MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
  87. MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
  88. MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
  89. MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
  90. MEM_CGROUP_STAT_NSTATS,
  91. };
  92. static const char * const mem_cgroup_stat_names[] = {
  93. "cache",
  94. "rss",
  95. "rss_huge",
  96. "mapped_file",
  97. "swap",
  98. };
  99. enum mem_cgroup_events_index {
  100. MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
  101. MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
  102. MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
  103. MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
  104. MEM_CGROUP_EVENTS_NSTATS,
  105. };
  106. static const char * const mem_cgroup_events_names[] = {
  107. "pgpgin",
  108. "pgpgout",
  109. "pgfault",
  110. "pgmajfault",
  111. };
  112. static const char * const mem_cgroup_lru_names[] = {
  113. "inactive_anon",
  114. "active_anon",
  115. "inactive_file",
  116. "active_file",
  117. "unevictable",
  118. };
  119. /*
  120. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  121. * it will be incremated by the number of pages. This counter is used for
  122. * for trigger some periodic events. This is straightforward and better
  123. * than using jiffies etc. to handle periodic memcg event.
  124. */
  125. enum mem_cgroup_events_target {
  126. MEM_CGROUP_TARGET_THRESH,
  127. MEM_CGROUP_TARGET_SOFTLIMIT,
  128. MEM_CGROUP_TARGET_NUMAINFO,
  129. MEM_CGROUP_NTARGETS,
  130. };
  131. #define THRESHOLDS_EVENTS_TARGET 128
  132. #define SOFTLIMIT_EVENTS_TARGET 1024
  133. #define NUMAINFO_EVENTS_TARGET 1024
  134. struct mem_cgroup_stat_cpu {
  135. long count[MEM_CGROUP_STAT_NSTATS];
  136. unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
  137. unsigned long nr_page_events;
  138. unsigned long targets[MEM_CGROUP_NTARGETS];
  139. };
  140. struct mem_cgroup_reclaim_iter {
  141. /*
  142. * last scanned hierarchy member. Valid only if last_dead_count
  143. * matches memcg->dead_count of the hierarchy root group.
  144. */
  145. struct mem_cgroup *last_visited;
  146. unsigned long last_dead_count;
  147. /* scan generation, increased every round-trip */
  148. unsigned int generation;
  149. };
  150. /*
  151. * per-zone information in memory controller.
  152. */
  153. struct mem_cgroup_per_zone {
  154. struct lruvec lruvec;
  155. unsigned long lru_size[NR_LRU_LISTS];
  156. struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
  157. struct rb_node tree_node; /* RB tree node */
  158. unsigned long long usage_in_excess;/* Set to the value by which */
  159. /* the soft limit is exceeded*/
  160. bool on_tree;
  161. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  162. /* use container_of */
  163. };
  164. struct mem_cgroup_per_node {
  165. struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  166. };
  167. /*
  168. * Cgroups above their limits are maintained in a RB-Tree, independent of
  169. * their hierarchy representation
  170. */
  171. struct mem_cgroup_tree_per_zone {
  172. struct rb_root rb_root;
  173. spinlock_t lock;
  174. };
  175. struct mem_cgroup_tree_per_node {
  176. struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
  177. };
  178. struct mem_cgroup_tree {
  179. struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
  180. };
  181. static struct mem_cgroup_tree soft_limit_tree __read_mostly;
  182. struct mem_cgroup_threshold {
  183. struct eventfd_ctx *eventfd;
  184. u64 threshold;
  185. };
  186. /* For threshold */
  187. struct mem_cgroup_threshold_ary {
  188. /* An array index points to threshold just below or equal to usage. */
  189. int current_threshold;
  190. /* Size of entries[] */
  191. unsigned int size;
  192. /* Array of thresholds */
  193. struct mem_cgroup_threshold entries[0];
  194. };
  195. struct mem_cgroup_thresholds {
  196. /* Primary thresholds array */
  197. struct mem_cgroup_threshold_ary *primary;
  198. /*
  199. * Spare threshold array.
  200. * This is needed to make mem_cgroup_unregister_event() "never fail".
  201. * It must be able to store at least primary->size - 1 entries.
  202. */
  203. struct mem_cgroup_threshold_ary *spare;
  204. };
  205. /* for OOM */
  206. struct mem_cgroup_eventfd_list {
  207. struct list_head list;
  208. struct eventfd_ctx *eventfd;
  209. };
  210. static void mem_cgroup_threshold(struct mem_cgroup *memcg);
  211. static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
  212. /*
  213. * The memory controller data structure. The memory controller controls both
  214. * page cache and RSS per cgroup. We would eventually like to provide
  215. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  216. * to help the administrator determine what knobs to tune.
  217. *
  218. * TODO: Add a water mark for the memory controller. Reclaim will begin when
  219. * we hit the water mark. May be even add a low water mark, such that
  220. * no reclaim occurs from a cgroup at it's low water mark, this is
  221. * a feature that will be implemented much later in the future.
  222. */
  223. struct mem_cgroup {
  224. struct cgroup_subsys_state css;
  225. /*
  226. * the counter to account for memory usage
  227. */
  228. struct res_counter res;
  229. /* vmpressure notifications */
  230. struct vmpressure vmpressure;
  231. /*
  232. * the counter to account for mem+swap usage.
  233. */
  234. struct res_counter memsw;
  235. /*
  236. * the counter to account for kernel memory usage.
  237. */
  238. struct res_counter kmem;
  239. /*
  240. * Should the accounting and control be hierarchical, per subtree?
  241. */
  242. bool use_hierarchy;
  243. unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
  244. bool oom_lock;
  245. atomic_t under_oom;
  246. int swappiness;
  247. /* OOM-Killer disable */
  248. int oom_kill_disable;
  249. /* set when res.limit == memsw.limit */
  250. bool memsw_is_minimum;
  251. /* protect arrays of thresholds */
  252. struct mutex thresholds_lock;
  253. /* thresholds for memory usage. RCU-protected */
  254. struct mem_cgroup_thresholds thresholds;
  255. /* thresholds for mem+swap usage. RCU-protected */
  256. struct mem_cgroup_thresholds memsw_thresholds;
  257. /* For oom notifier event fd */
  258. struct list_head oom_notify;
  259. /*
  260. * Should we move charges of a task when a task is moved into this
  261. * mem_cgroup ? And what type of charges should we move ?
  262. */
  263. unsigned long move_charge_at_immigrate;
  264. /*
  265. * set > 0 if pages under this cgroup are moving to other cgroup.
  266. */
  267. atomic_t moving_account;
  268. /* taken only while moving_account > 0 */
  269. spinlock_t move_lock;
  270. /*
  271. * percpu counter.
  272. */
  273. struct mem_cgroup_stat_cpu __percpu *stat;
  274. /*
  275. * used when a cpu is offlined or other synchronizations
  276. * See mem_cgroup_read_stat().
  277. */
  278. struct mem_cgroup_stat_cpu nocpu_base;
  279. spinlock_t pcp_counter_lock;
  280. atomic_t dead_count;
  281. #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
  282. struct tcp_memcontrol tcp_mem;
  283. #endif
  284. #if defined(CONFIG_MEMCG_KMEM)
  285. /* analogous to slab_common's slab_caches list. per-memcg */
  286. struct list_head memcg_slab_caches;
  287. /* Not a spinlock, we can take a lot of time walking the list */
  288. struct mutex slab_caches_mutex;
  289. /* Index in the kmem_cache->memcg_params->memcg_caches array */
  290. int kmemcg_id;
  291. #endif
  292. int last_scanned_node;
  293. #if MAX_NUMNODES > 1
  294. nodemask_t scan_nodes;
  295. atomic_t numainfo_events;
  296. atomic_t numainfo_updating;
  297. #endif
  298. struct mem_cgroup_per_node *nodeinfo[0];
  299. /* WARNING: nodeinfo must be the last member here */
  300. };
  301. static size_t memcg_size(void)
  302. {
  303. return sizeof(struct mem_cgroup) +
  304. nr_node_ids * sizeof(struct mem_cgroup_per_node);
  305. }
  306. /* internal only representation about the status of kmem accounting. */
  307. enum {
  308. KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
  309. KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
  310. KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
  311. };
  312. /* We account when limit is on, but only after call sites are patched */
  313. #define KMEM_ACCOUNTED_MASK \
  314. ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
  315. #ifdef CONFIG_MEMCG_KMEM
  316. static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
  317. {
  318. set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
  319. }
  320. static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
  321. {
  322. return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
  323. }
  324. static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
  325. {
  326. set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
  327. }
  328. static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
  329. {
  330. clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
  331. }
  332. static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
  333. {
  334. /*
  335. * Our caller must use css_get() first, because memcg_uncharge_kmem()
  336. * will call css_put() if it sees the memcg is dead.
  337. */
  338. smp_wmb();
  339. if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
  340. set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
  341. }
  342. static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
  343. {
  344. return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
  345. &memcg->kmem_account_flags);
  346. }
  347. #endif
  348. /* Stuffs for move charges at task migration. */
  349. /*
  350. * Types of charges to be moved. "move_charge_at_immitgrate" and
  351. * "immigrate_flags" are treated as a left-shifted bitmap of these types.
  352. */
  353. enum move_type {
  354. MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
  355. MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
  356. NR_MOVE_TYPE,
  357. };
  358. /* "mc" and its members are protected by cgroup_mutex */
  359. static struct move_charge_struct {
  360. spinlock_t lock; /* for from, to */
  361. struct mem_cgroup *from;
  362. struct mem_cgroup *to;
  363. unsigned long immigrate_flags;
  364. unsigned long precharge;
  365. unsigned long moved_charge;
  366. unsigned long moved_swap;
  367. struct task_struct *moving_task; /* a task moving charges */
  368. wait_queue_head_t waitq; /* a waitq for other context */
  369. } mc = {
  370. .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
  371. .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
  372. };
  373. static bool move_anon(void)
  374. {
  375. return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
  376. }
  377. static bool move_file(void)
  378. {
  379. return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
  380. }
  381. /*
  382. * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  383. * limit reclaim to prevent infinite loops, if they ever occur.
  384. */
  385. #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
  386. #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
  387. enum charge_type {
  388. MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  389. MEM_CGROUP_CHARGE_TYPE_ANON,
  390. MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
  391. MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
  392. NR_CHARGE_TYPE,
  393. };
  394. /* for encoding cft->private value on file */
  395. enum res_type {
  396. _MEM,
  397. _MEMSWAP,
  398. _OOM_TYPE,
  399. _KMEM,
  400. };
  401. #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
  402. #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
  403. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  404. /* Used for OOM nofiier */
  405. #define OOM_CONTROL (0)
  406. /*
  407. * Reclaim flags for mem_cgroup_hierarchical_reclaim
  408. */
  409. #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
  410. #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
  411. #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
  412. #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
  413. /*
  414. * The memcg_create_mutex will be held whenever a new cgroup is created.
  415. * As a consequence, any change that needs to protect against new child cgroups
  416. * appearing has to hold it as well.
  417. */
  418. static DEFINE_MUTEX(memcg_create_mutex);
  419. static inline
  420. struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
  421. {
  422. return s ? container_of(s, struct mem_cgroup, css) : NULL;
  423. }
  424. /* Some nice accessors for the vmpressure. */
  425. struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
  426. {
  427. if (!memcg)
  428. memcg = root_mem_cgroup;
  429. return &memcg->vmpressure;
  430. }
  431. struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
  432. {
  433. return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
  434. }
  435. struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
  436. {
  437. return &mem_cgroup_from_css(css)->vmpressure;
  438. }
  439. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  440. {
  441. return (memcg == root_mem_cgroup);
  442. }
  443. /* Writing them here to avoid exposing memcg's inner layout */
  444. #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
  445. void sock_update_memcg(struct sock *sk)
  446. {
  447. if (mem_cgroup_sockets_enabled) {
  448. struct mem_cgroup *memcg;
  449. struct cg_proto *cg_proto;
  450. BUG_ON(!sk->sk_prot->proto_cgroup);
  451. /* Socket cloning can throw us here with sk_cgrp already
  452. * filled. It won't however, necessarily happen from
  453. * process context. So the test for root memcg given
  454. * the current task's memcg won't help us in this case.
  455. *
  456. * Respecting the original socket's memcg is a better
  457. * decision in this case.
  458. */
  459. if (sk->sk_cgrp) {
  460. BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
  461. css_get(&sk->sk_cgrp->memcg->css);
  462. return;
  463. }
  464. rcu_read_lock();
  465. memcg = mem_cgroup_from_task(current);
  466. cg_proto = sk->sk_prot->proto_cgroup(memcg);
  467. if (!mem_cgroup_is_root(memcg) &&
  468. memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
  469. sk->sk_cgrp = cg_proto;
  470. }
  471. rcu_read_unlock();
  472. }
  473. }
  474. EXPORT_SYMBOL(sock_update_memcg);
  475. void sock_release_memcg(struct sock *sk)
  476. {
  477. if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
  478. struct mem_cgroup *memcg;
  479. WARN_ON(!sk->sk_cgrp->memcg);
  480. memcg = sk->sk_cgrp->memcg;
  481. css_put(&sk->sk_cgrp->memcg->css);
  482. }
  483. }
  484. struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
  485. {
  486. if (!memcg || mem_cgroup_is_root(memcg))
  487. return NULL;
  488. return &memcg->tcp_mem.cg_proto;
  489. }
  490. EXPORT_SYMBOL(tcp_proto_cgroup);
  491. static void disarm_sock_keys(struct mem_cgroup *memcg)
  492. {
  493. if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
  494. return;
  495. static_key_slow_dec(&memcg_socket_limit_enabled);
  496. }
  497. #else
  498. static void disarm_sock_keys(struct mem_cgroup *memcg)
  499. {
  500. }
  501. #endif
  502. #ifdef CONFIG_MEMCG_KMEM
  503. /*
  504. * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
  505. * There are two main reasons for not using the css_id for this:
  506. * 1) this works better in sparse environments, where we have a lot of memcgs,
  507. * but only a few kmem-limited. Or also, if we have, for instance, 200
  508. * memcgs, and none but the 200th is kmem-limited, we'd have to have a
  509. * 200 entry array for that.
  510. *
  511. * 2) In order not to violate the cgroup API, we would like to do all memory
  512. * allocation in ->create(). At that point, we haven't yet allocated the
  513. * css_id. Having a separate index prevents us from messing with the cgroup
  514. * core for this
  515. *
  516. * The current size of the caches array is stored in
  517. * memcg_limited_groups_array_size. It will double each time we have to
  518. * increase it.
  519. */
  520. static DEFINE_IDA(kmem_limited_groups);
  521. int memcg_limited_groups_array_size;
  522. /*
  523. * MIN_SIZE is different than 1, because we would like to avoid going through
  524. * the alloc/free process all the time. In a small machine, 4 kmem-limited
  525. * cgroups is a reasonable guess. In the future, it could be a parameter or
  526. * tunable, but that is strictly not necessary.
  527. *
  528. * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
  529. * this constant directly from cgroup, but it is understandable that this is
  530. * better kept as an internal representation in cgroup.c. In any case, the
  531. * css_id space is not getting any smaller, and we don't have to necessarily
  532. * increase ours as well if it increases.
  533. */
  534. #define MEMCG_CACHES_MIN_SIZE 4
  535. #define MEMCG_CACHES_MAX_SIZE 65535
  536. /*
  537. * A lot of the calls to the cache allocation functions are expected to be
  538. * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
  539. * conditional to this static branch, we'll have to allow modules that does
  540. * kmem_cache_alloc and the such to see this symbol as well
  541. */
  542. struct static_key memcg_kmem_enabled_key;
  543. EXPORT_SYMBOL(memcg_kmem_enabled_key);
  544. static void disarm_kmem_keys(struct mem_cgroup *memcg)
  545. {
  546. if (memcg_kmem_is_active(memcg)) {
  547. static_key_slow_dec(&memcg_kmem_enabled_key);
  548. ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
  549. }
  550. /*
  551. * This check can't live in kmem destruction function,
  552. * since the charges will outlive the cgroup
  553. */
  554. WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
  555. }
  556. #else
  557. static void disarm_kmem_keys(struct mem_cgroup *memcg)
  558. {
  559. }
  560. #endif /* CONFIG_MEMCG_KMEM */
  561. static void disarm_static_keys(struct mem_cgroup *memcg)
  562. {
  563. disarm_sock_keys(memcg);
  564. disarm_kmem_keys(memcg);
  565. }
  566. static void drain_all_stock_async(struct mem_cgroup *memcg);
  567. static struct mem_cgroup_per_zone *
  568. mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
  569. {
  570. VM_BUG_ON((unsigned)nid >= nr_node_ids);
  571. return &memcg->nodeinfo[nid]->zoneinfo[zid];
  572. }
  573. struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
  574. {
  575. return &memcg->css;
  576. }
  577. static struct mem_cgroup_per_zone *
  578. page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
  579. {
  580. int nid = page_to_nid(page);
  581. int zid = page_zonenum(page);
  582. return mem_cgroup_zoneinfo(memcg, nid, zid);
  583. }
  584. static struct mem_cgroup_tree_per_zone *
  585. soft_limit_tree_node_zone(int nid, int zid)
  586. {
  587. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  588. }
  589. static struct mem_cgroup_tree_per_zone *
  590. soft_limit_tree_from_page(struct page *page)
  591. {
  592. int nid = page_to_nid(page);
  593. int zid = page_zonenum(page);
  594. return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  595. }
  596. static void
  597. __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
  598. struct mem_cgroup_per_zone *mz,
  599. struct mem_cgroup_tree_per_zone *mctz,
  600. unsigned long long new_usage_in_excess)
  601. {
  602. struct rb_node **p = &mctz->rb_root.rb_node;
  603. struct rb_node *parent = NULL;
  604. struct mem_cgroup_per_zone *mz_node;
  605. if (mz->on_tree)
  606. return;
  607. mz->usage_in_excess = new_usage_in_excess;
  608. if (!mz->usage_in_excess)
  609. return;
  610. while (*p) {
  611. parent = *p;
  612. mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
  613. tree_node);
  614. if (mz->usage_in_excess < mz_node->usage_in_excess)
  615. p = &(*p)->rb_left;
  616. /*
  617. * We can't avoid mem cgroups that are over their soft
  618. * limit by the same amount
  619. */
  620. else if (mz->usage_in_excess >= mz_node->usage_in_excess)
  621. p = &(*p)->rb_right;
  622. }
  623. rb_link_node(&mz->tree_node, parent, p);
  624. rb_insert_color(&mz->tree_node, &mctz->rb_root);
  625. mz->on_tree = true;
  626. }
  627. static void
  628. __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
  629. struct mem_cgroup_per_zone *mz,
  630. struct mem_cgroup_tree_per_zone *mctz)
  631. {
  632. if (!mz->on_tree)
  633. return;
  634. rb_erase(&mz->tree_node, &mctz->rb_root);
  635. mz->on_tree = false;
  636. }
  637. static void
  638. mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
  639. struct mem_cgroup_per_zone *mz,
  640. struct mem_cgroup_tree_per_zone *mctz)
  641. {
  642. spin_lock(&mctz->lock);
  643. __mem_cgroup_remove_exceeded(memcg, mz, mctz);
  644. spin_unlock(&mctz->lock);
  645. }
  646. static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
  647. {
  648. unsigned long long excess;
  649. struct mem_cgroup_per_zone *mz;
  650. struct mem_cgroup_tree_per_zone *mctz;
  651. int nid = page_to_nid(page);
  652. int zid = page_zonenum(page);
  653. mctz = soft_limit_tree_from_page(page);
  654. /*
  655. * Necessary to update all ancestors when hierarchy is used.
  656. * because their event counter is not touched.
  657. */
  658. for (; memcg; memcg = parent_mem_cgroup(memcg)) {
  659. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  660. excess = res_counter_soft_limit_excess(&memcg->res);
  661. /*
  662. * We have to update the tree if mz is on RB-tree or
  663. * mem is over its softlimit.
  664. */
  665. if (excess || mz->on_tree) {
  666. spin_lock(&mctz->lock);
  667. /* if on-tree, remove it */
  668. if (mz->on_tree)
  669. __mem_cgroup_remove_exceeded(memcg, mz, mctz);
  670. /*
  671. * Insert again. mz->usage_in_excess will be updated.
  672. * If excess is 0, no tree ops.
  673. */
  674. __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
  675. spin_unlock(&mctz->lock);
  676. }
  677. }
  678. }
  679. static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
  680. {
  681. int node, zone;
  682. struct mem_cgroup_per_zone *mz;
  683. struct mem_cgroup_tree_per_zone *mctz;
  684. for_each_node(node) {
  685. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  686. mz = mem_cgroup_zoneinfo(memcg, node, zone);
  687. mctz = soft_limit_tree_node_zone(node, zone);
  688. mem_cgroup_remove_exceeded(memcg, mz, mctz);
  689. }
  690. }
  691. }
  692. static struct mem_cgroup_per_zone *
  693. __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  694. {
  695. struct rb_node *rightmost = NULL;
  696. struct mem_cgroup_per_zone *mz;
  697. retry:
  698. mz = NULL;
  699. rightmost = rb_last(&mctz->rb_root);
  700. if (!rightmost)
  701. goto done; /* Nothing to reclaim from */
  702. mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
  703. /*
  704. * Remove the node now but someone else can add it back,
  705. * we will to add it back at the end of reclaim to its correct
  706. * position in the tree.
  707. */
  708. __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
  709. if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
  710. !css_tryget(&mz->memcg->css))
  711. goto retry;
  712. done:
  713. return mz;
  714. }
  715. static struct mem_cgroup_per_zone *
  716. mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  717. {
  718. struct mem_cgroup_per_zone *mz;
  719. spin_lock(&mctz->lock);
  720. mz = __mem_cgroup_largest_soft_limit_node(mctz);
  721. spin_unlock(&mctz->lock);
  722. return mz;
  723. }
  724. /*
  725. * Implementation Note: reading percpu statistics for memcg.
  726. *
  727. * Both of vmstat[] and percpu_counter has threshold and do periodic
  728. * synchronization to implement "quick" read. There are trade-off between
  729. * reading cost and precision of value. Then, we may have a chance to implement
  730. * a periodic synchronizion of counter in memcg's counter.
  731. *
  732. * But this _read() function is used for user interface now. The user accounts
  733. * memory usage by memory cgroup and he _always_ requires exact value because
  734. * he accounts memory. Even if we provide quick-and-fuzzy read, we always
  735. * have to visit all online cpus and make sum. So, for now, unnecessary
  736. * synchronization is not implemented. (just implemented for cpu hotplug)
  737. *
  738. * If there are kernel internal actions which can make use of some not-exact
  739. * value, and reading all cpu value can be performance bottleneck in some
  740. * common workload, threashold and synchonization as vmstat[] should be
  741. * implemented.
  742. */
  743. static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
  744. enum mem_cgroup_stat_index idx)
  745. {
  746. long val = 0;
  747. int cpu;
  748. get_online_cpus();
  749. for_each_online_cpu(cpu)
  750. val += per_cpu(memcg->stat->count[idx], cpu);
  751. #ifdef CONFIG_HOTPLUG_CPU
  752. spin_lock(&memcg->pcp_counter_lock);
  753. val += memcg->nocpu_base.count[idx];
  754. spin_unlock(&memcg->pcp_counter_lock);
  755. #endif
  756. put_online_cpus();
  757. return val;
  758. }
  759. static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
  760. bool charge)
  761. {
  762. int val = (charge) ? 1 : -1;
  763. this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
  764. }
  765. static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
  766. enum mem_cgroup_events_index idx)
  767. {
  768. unsigned long val = 0;
  769. int cpu;
  770. for_each_online_cpu(cpu)
  771. val += per_cpu(memcg->stat->events[idx], cpu);
  772. #ifdef CONFIG_HOTPLUG_CPU
  773. spin_lock(&memcg->pcp_counter_lock);
  774. val += memcg->nocpu_base.events[idx];
  775. spin_unlock(&memcg->pcp_counter_lock);
  776. #endif
  777. return val;
  778. }
  779. static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
  780. struct page *page,
  781. bool anon, int nr_pages)
  782. {
  783. preempt_disable();
  784. /*
  785. * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
  786. * counted as CACHE even if it's on ANON LRU.
  787. */
  788. if (anon)
  789. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
  790. nr_pages);
  791. else
  792. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
  793. nr_pages);
  794. if (PageTransHuge(page))
  795. __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
  796. nr_pages);
  797. /* pagein of a big page is an event. So, ignore page size */
  798. if (nr_pages > 0)
  799. __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
  800. else {
  801. __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
  802. nr_pages = -nr_pages; /* for event */
  803. }
  804. __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
  805. preempt_enable();
  806. }
  807. unsigned long
  808. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  809. {
  810. struct mem_cgroup_per_zone *mz;
  811. mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
  812. return mz->lru_size[lru];
  813. }
  814. static unsigned long
  815. mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
  816. unsigned int lru_mask)
  817. {
  818. struct mem_cgroup_per_zone *mz;
  819. enum lru_list lru;
  820. unsigned long ret = 0;
  821. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  822. for_each_lru(lru) {
  823. if (BIT(lru) & lru_mask)
  824. ret += mz->lru_size[lru];
  825. }
  826. return ret;
  827. }
  828. static unsigned long
  829. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  830. int nid, unsigned int lru_mask)
  831. {
  832. u64 total = 0;
  833. int zid;
  834. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  835. total += mem_cgroup_zone_nr_lru_pages(memcg,
  836. nid, zid, lru_mask);
  837. return total;
  838. }
  839. static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
  840. unsigned int lru_mask)
  841. {
  842. int nid;
  843. u64 total = 0;
  844. for_each_node_state(nid, N_MEMORY)
  845. total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
  846. return total;
  847. }
  848. static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
  849. enum mem_cgroup_events_target target)
  850. {
  851. unsigned long val, next;
  852. val = __this_cpu_read(memcg->stat->nr_page_events);
  853. next = __this_cpu_read(memcg->stat->targets[target]);
  854. /* from time_after() in jiffies.h */
  855. if ((long)next - (long)val < 0) {
  856. switch (target) {
  857. case MEM_CGROUP_TARGET_THRESH:
  858. next = val + THRESHOLDS_EVENTS_TARGET;
  859. break;
  860. case MEM_CGROUP_TARGET_SOFTLIMIT:
  861. next = val + SOFTLIMIT_EVENTS_TARGET;
  862. break;
  863. case MEM_CGROUP_TARGET_NUMAINFO:
  864. next = val + NUMAINFO_EVENTS_TARGET;
  865. break;
  866. default:
  867. break;
  868. }
  869. __this_cpu_write(memcg->stat->targets[target], next);
  870. return true;
  871. }
  872. return false;
  873. }
  874. /*
  875. * Check events in order.
  876. *
  877. */
  878. static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
  879. {
  880. preempt_disable();
  881. /* threshold event is triggered in finer grain than soft limit */
  882. if (unlikely(mem_cgroup_event_ratelimit(memcg,
  883. MEM_CGROUP_TARGET_THRESH))) {
  884. bool do_softlimit;
  885. bool do_numainfo __maybe_unused;
  886. do_softlimit = mem_cgroup_event_ratelimit(memcg,
  887. MEM_CGROUP_TARGET_SOFTLIMIT);
  888. #if MAX_NUMNODES > 1
  889. do_numainfo = mem_cgroup_event_ratelimit(memcg,
  890. MEM_CGROUP_TARGET_NUMAINFO);
  891. #endif
  892. preempt_enable();
  893. mem_cgroup_threshold(memcg);
  894. if (unlikely(do_softlimit))
  895. mem_cgroup_update_tree(memcg, page);
  896. #if MAX_NUMNODES > 1
  897. if (unlikely(do_numainfo))
  898. atomic_inc(&memcg->numainfo_events);
  899. #endif
  900. } else
  901. preempt_enable();
  902. }
  903. struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
  904. {
  905. return mem_cgroup_from_css(cgroup_css(cont, mem_cgroup_subsys_id));
  906. }
  907. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
  908. {
  909. /*
  910. * mm_update_next_owner() may clear mm->owner to NULL
  911. * if it races with swapoff, page migration, etc.
  912. * So this can be called with p == NULL.
  913. */
  914. if (unlikely(!p))
  915. return NULL;
  916. return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
  917. }
  918. struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  919. {
  920. struct mem_cgroup *memcg = NULL;
  921. if (!mm)
  922. return NULL;
  923. /*
  924. * Because we have no locks, mm->owner's may be being moved to other
  925. * cgroup. We use css_tryget() here even if this looks
  926. * pessimistic (rather than adding locks here).
  927. */
  928. rcu_read_lock();
  929. do {
  930. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  931. if (unlikely(!memcg))
  932. break;
  933. } while (!css_tryget(&memcg->css));
  934. rcu_read_unlock();
  935. return memcg;
  936. }
  937. /*
  938. * Returns a next (in a pre-order walk) alive memcg (with elevated css
  939. * ref. count) or NULL if the whole root's subtree has been visited.
  940. *
  941. * helper function to be used by mem_cgroup_iter
  942. */
  943. static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
  944. struct mem_cgroup *last_visited)
  945. {
  946. struct cgroup *prev_cgroup, *next_cgroup;
  947. /*
  948. * Root is not visited by cgroup iterators so it needs an
  949. * explicit visit.
  950. */
  951. if (!last_visited)
  952. return root;
  953. prev_cgroup = (last_visited == root) ? NULL
  954. : last_visited->css.cgroup;
  955. skip_node:
  956. next_cgroup = cgroup_next_descendant_pre(
  957. prev_cgroup, root->css.cgroup);
  958. /*
  959. * Even if we found a group we have to make sure it is
  960. * alive. css && !memcg means that the groups should be
  961. * skipped and we should continue the tree walk.
  962. * last_visited css is safe to use because it is
  963. * protected by css_get and the tree walk is rcu safe.
  964. */
  965. if (next_cgroup) {
  966. struct mem_cgroup *mem = mem_cgroup_from_cont(
  967. next_cgroup);
  968. if (css_tryget(&mem->css))
  969. return mem;
  970. else {
  971. prev_cgroup = next_cgroup;
  972. goto skip_node;
  973. }
  974. }
  975. return NULL;
  976. }
  977. static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
  978. {
  979. /*
  980. * When a group in the hierarchy below root is destroyed, the
  981. * hierarchy iterator can no longer be trusted since it might
  982. * have pointed to the destroyed group. Invalidate it.
  983. */
  984. atomic_inc(&root->dead_count);
  985. }
  986. static struct mem_cgroup *
  987. mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
  988. struct mem_cgroup *root,
  989. int *sequence)
  990. {
  991. struct mem_cgroup *position = NULL;
  992. /*
  993. * A cgroup destruction happens in two stages: offlining and
  994. * release. They are separated by a RCU grace period.
  995. *
  996. * If the iterator is valid, we may still race with an
  997. * offlining. The RCU lock ensures the object won't be
  998. * released, tryget will fail if we lost the race.
  999. */
  1000. *sequence = atomic_read(&root->dead_count);
  1001. if (iter->last_dead_count == *sequence) {
  1002. smp_rmb();
  1003. position = iter->last_visited;
  1004. if (position && !css_tryget(&position->css))
  1005. position = NULL;
  1006. }
  1007. return position;
  1008. }
  1009. static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  1010. struct mem_cgroup *last_visited,
  1011. struct mem_cgroup *new_position,
  1012. int sequence)
  1013. {
  1014. if (last_visited)
  1015. css_put(&last_visited->css);
  1016. /*
  1017. * We store the sequence count from the time @last_visited was
  1018. * loaded successfully instead of rereading it here so that we
  1019. * don't lose destruction events in between. We could have
  1020. * raced with the destruction of @new_position after all.
  1021. */
  1022. iter->last_visited = new_position;
  1023. smp_wmb();
  1024. iter->last_dead_count = sequence;
  1025. }
  1026. /**
  1027. * mem_cgroup_iter - iterate over memory cgroup hierarchy
  1028. * @root: hierarchy root
  1029. * @prev: previously returned memcg, NULL on first invocation
  1030. * @reclaim: cookie for shared reclaim walks, NULL for full walks
  1031. *
  1032. * Returns references to children of the hierarchy below @root, or
  1033. * @root itself, or %NULL after a full round-trip.
  1034. *
  1035. * Caller must pass the return value in @prev on subsequent
  1036. * invocations for reference counting, or use mem_cgroup_iter_break()
  1037. * to cancel a hierarchy walk before the round-trip is complete.
  1038. *
  1039. * Reclaimers can specify a zone and a priority level in @reclaim to
  1040. * divide up the memcgs in the hierarchy among all concurrent
  1041. * reclaimers operating on the same zone and priority.
  1042. */
  1043. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
  1044. struct mem_cgroup *prev,
  1045. struct mem_cgroup_reclaim_cookie *reclaim)
  1046. {
  1047. struct mem_cgroup *memcg = NULL;
  1048. struct mem_cgroup *last_visited = NULL;
  1049. if (mem_cgroup_disabled())
  1050. return NULL;
  1051. if (!root)
  1052. root = root_mem_cgroup;
  1053. if (prev && !reclaim)
  1054. last_visited = prev;
  1055. if (!root->use_hierarchy && root != root_mem_cgroup) {
  1056. if (prev)
  1057. goto out_css_put;
  1058. return root;
  1059. }
  1060. rcu_read_lock();
  1061. while (!memcg) {
  1062. struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
  1063. int uninitialized_var(seq);
  1064. if (reclaim) {
  1065. int nid = zone_to_nid(reclaim->zone);
  1066. int zid = zone_idx(reclaim->zone);
  1067. struct mem_cgroup_per_zone *mz;
  1068. mz = mem_cgroup_zoneinfo(root, nid, zid);
  1069. iter = &mz->reclaim_iter[reclaim->priority];
  1070. if (prev && reclaim->generation != iter->generation) {
  1071. iter->last_visited = NULL;
  1072. goto out_unlock;
  1073. }
  1074. last_visited = mem_cgroup_iter_load(iter, root, &seq);
  1075. }
  1076. memcg = __mem_cgroup_iter_next(root, last_visited);
  1077. if (reclaim) {
  1078. mem_cgroup_iter_update(iter, last_visited, memcg, seq);
  1079. if (!memcg)
  1080. iter->generation++;
  1081. else if (!prev && memcg)
  1082. reclaim->generation = iter->generation;
  1083. }
  1084. if (prev && !memcg)
  1085. goto out_unlock;
  1086. }
  1087. out_unlock:
  1088. rcu_read_unlock();
  1089. out_css_put:
  1090. if (prev && prev != root)
  1091. css_put(&prev->css);
  1092. return memcg;
  1093. }
  1094. /**
  1095. * mem_cgroup_iter_break - abort a hierarchy walk prematurely
  1096. * @root: hierarchy root
  1097. * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
  1098. */
  1099. void mem_cgroup_iter_break(struct mem_cgroup *root,
  1100. struct mem_cgroup *prev)
  1101. {
  1102. if (!root)
  1103. root = root_mem_cgroup;
  1104. if (prev && prev != root)
  1105. css_put(&prev->css);
  1106. }
  1107. /*
  1108. * Iteration constructs for visiting all cgroups (under a tree). If
  1109. * loops are exited prematurely (break), mem_cgroup_iter_break() must
  1110. * be used for reference counting.
  1111. */
  1112. #define for_each_mem_cgroup_tree(iter, root) \
  1113. for (iter = mem_cgroup_iter(root, NULL, NULL); \
  1114. iter != NULL; \
  1115. iter = mem_cgroup_iter(root, iter, NULL))
  1116. #define for_each_mem_cgroup(iter) \
  1117. for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
  1118. iter != NULL; \
  1119. iter = mem_cgroup_iter(NULL, iter, NULL))
  1120. void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  1121. {
  1122. struct mem_cgroup *memcg;
  1123. rcu_read_lock();
  1124. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  1125. if (unlikely(!memcg))
  1126. goto out;
  1127. switch (idx) {
  1128. case PGFAULT:
  1129. this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
  1130. break;
  1131. case PGMAJFAULT:
  1132. this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
  1133. break;
  1134. default:
  1135. BUG();
  1136. }
  1137. out:
  1138. rcu_read_unlock();
  1139. }
  1140. EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
  1141. /**
  1142. * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
  1143. * @zone: zone of the wanted lruvec
  1144. * @memcg: memcg of the wanted lruvec
  1145. *
  1146. * Returns the lru list vector holding pages for the given @zone and
  1147. * @mem. This can be the global zone lruvec, if the memory controller
  1148. * is disabled.
  1149. */
  1150. struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  1151. struct mem_cgroup *memcg)
  1152. {
  1153. struct mem_cgroup_per_zone *mz;
  1154. struct lruvec *lruvec;
  1155. if (mem_cgroup_disabled()) {
  1156. lruvec = &zone->lruvec;
  1157. goto out;
  1158. }
  1159. mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
  1160. lruvec = &mz->lruvec;
  1161. out:
  1162. /*
  1163. * Since a node can be onlined after the mem_cgroup was created,
  1164. * we have to be prepared to initialize lruvec->zone here;
  1165. * and if offlined then reonlined, we need to reinitialize it.
  1166. */
  1167. if (unlikely(lruvec->zone != zone))
  1168. lruvec->zone = zone;
  1169. return lruvec;
  1170. }
  1171. /*
  1172. * Following LRU functions are allowed to be used without PCG_LOCK.
  1173. * Operations are called by routine of global LRU independently from memcg.
  1174. * What we have to take care of here is validness of pc->mem_cgroup.
  1175. *
  1176. * Changes to pc->mem_cgroup happens when
  1177. * 1. charge
  1178. * 2. moving account
  1179. * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
  1180. * It is added to LRU before charge.
  1181. * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
  1182. * When moving account, the page is not on LRU. It's isolated.
  1183. */
  1184. /**
  1185. * mem_cgroup_page_lruvec - return lruvec for adding an lru page
  1186. * @page: the page
  1187. * @zone: zone of the page
  1188. */
  1189. struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
  1190. {
  1191. struct mem_cgroup_per_zone *mz;
  1192. struct mem_cgroup *memcg;
  1193. struct page_cgroup *pc;
  1194. struct lruvec *lruvec;
  1195. if (mem_cgroup_disabled()) {
  1196. lruvec = &zone->lruvec;
  1197. goto out;
  1198. }
  1199. pc = lookup_page_cgroup(page);
  1200. memcg = pc->mem_cgroup;
  1201. /*
  1202. * Surreptitiously switch any uncharged offlist page to root:
  1203. * an uncharged page off lru does nothing to secure
  1204. * its former mem_cgroup from sudden removal.
  1205. *
  1206. * Our caller holds lru_lock, and PageCgroupUsed is updated
  1207. * under page_cgroup lock: between them, they make all uses
  1208. * of pc->mem_cgroup safe.
  1209. */
  1210. if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
  1211. pc->mem_cgroup = memcg = root_mem_cgroup;
  1212. mz = page_cgroup_zoneinfo(memcg, page);
  1213. lruvec = &mz->lruvec;
  1214. out:
  1215. /*
  1216. * Since a node can be onlined after the mem_cgroup was created,
  1217. * we have to be prepared to initialize lruvec->zone here;
  1218. * and if offlined then reonlined, we need to reinitialize it.
  1219. */
  1220. if (unlikely(lruvec->zone != zone))
  1221. lruvec->zone = zone;
  1222. return lruvec;
  1223. }
  1224. /**
  1225. * mem_cgroup_update_lru_size - account for adding or removing an lru page
  1226. * @lruvec: mem_cgroup per zone lru vector
  1227. * @lru: index of lru list the page is sitting on
  1228. * @nr_pages: positive when adding or negative when removing
  1229. *
  1230. * This function must be called when a page is added to or removed from an
  1231. * lru list.
  1232. */
  1233. void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  1234. int nr_pages)
  1235. {
  1236. struct mem_cgroup_per_zone *mz;
  1237. unsigned long *lru_size;
  1238. if (mem_cgroup_disabled())
  1239. return;
  1240. mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
  1241. lru_size = mz->lru_size + lru;
  1242. *lru_size += nr_pages;
  1243. VM_BUG_ON((long)(*lru_size) < 0);
  1244. }
  1245. /*
  1246. * Checks whether given mem is same or in the root_mem_cgroup's
  1247. * hierarchy subtree
  1248. */
  1249. bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  1250. struct mem_cgroup *memcg)
  1251. {
  1252. if (root_memcg == memcg)
  1253. return true;
  1254. if (!root_memcg->use_hierarchy || !memcg)
  1255. return false;
  1256. return css_is_ancestor(&memcg->css, &root_memcg->css);
  1257. }
  1258. static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  1259. struct mem_cgroup *memcg)
  1260. {
  1261. bool ret;
  1262. rcu_read_lock();
  1263. ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
  1264. rcu_read_unlock();
  1265. return ret;
  1266. }
  1267. bool task_in_mem_cgroup(struct task_struct *task,
  1268. const struct mem_cgroup *memcg)
  1269. {
  1270. struct mem_cgroup *curr = NULL;
  1271. struct task_struct *p;
  1272. bool ret;
  1273. p = find_lock_task_mm(task);
  1274. if (p) {
  1275. curr = try_get_mem_cgroup_from_mm(p->mm);
  1276. task_unlock(p);
  1277. } else {
  1278. /*
  1279. * All threads may have already detached their mm's, but the oom
  1280. * killer still needs to detect if they have already been oom
  1281. * killed to prevent needlessly killing additional tasks.
  1282. */
  1283. rcu_read_lock();
  1284. curr = mem_cgroup_from_task(task);
  1285. if (curr)
  1286. css_get(&curr->css);
  1287. rcu_read_unlock();
  1288. }
  1289. if (!curr)
  1290. return false;
  1291. /*
  1292. * We should check use_hierarchy of "memcg" not "curr". Because checking
  1293. * use_hierarchy of "curr" here make this function true if hierarchy is
  1294. * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
  1295. * hierarchy(even if use_hierarchy is disabled in "memcg").
  1296. */
  1297. ret = mem_cgroup_same_or_subtree(memcg, curr);
  1298. css_put(&curr->css);
  1299. return ret;
  1300. }
  1301. int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
  1302. {
  1303. unsigned long inactive_ratio;
  1304. unsigned long inactive;
  1305. unsigned long active;
  1306. unsigned long gb;
  1307. inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
  1308. active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
  1309. gb = (inactive + active) >> (30 - PAGE_SHIFT);
  1310. if (gb)
  1311. inactive_ratio = int_sqrt(10 * gb);
  1312. else
  1313. inactive_ratio = 1;
  1314. return inactive * inactive_ratio < active;
  1315. }
  1316. #define mem_cgroup_from_res_counter(counter, member) \
  1317. container_of(counter, struct mem_cgroup, member)
  1318. /**
  1319. * mem_cgroup_margin - calculate chargeable space of a memory cgroup
  1320. * @memcg: the memory cgroup
  1321. *
  1322. * Returns the maximum amount of memory @mem can be charged with, in
  1323. * pages.
  1324. */
  1325. static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
  1326. {
  1327. unsigned long long margin;
  1328. margin = res_counter_margin(&memcg->res);
  1329. if (do_swap_account)
  1330. margin = min(margin, res_counter_margin(&memcg->memsw));
  1331. return margin >> PAGE_SHIFT;
  1332. }
  1333. int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  1334. {
  1335. /* root ? */
  1336. if (!css_parent(&memcg->css))
  1337. return vm_swappiness;
  1338. return memcg->swappiness;
  1339. }
  1340. /*
  1341. * memcg->moving_account is used for checking possibility that some thread is
  1342. * calling move_account(). When a thread on CPU-A starts moving pages under
  1343. * a memcg, other threads should check memcg->moving_account under
  1344. * rcu_read_lock(), like this:
  1345. *
  1346. * CPU-A CPU-B
  1347. * rcu_read_lock()
  1348. * memcg->moving_account+1 if (memcg->mocing_account)
  1349. * take heavy locks.
  1350. * synchronize_rcu() update something.
  1351. * rcu_read_unlock()
  1352. * start move here.
  1353. */
  1354. /* for quick checking without looking up memcg */
  1355. atomic_t memcg_moving __read_mostly;
  1356. static void mem_cgroup_start_move(struct mem_cgroup *memcg)
  1357. {
  1358. atomic_inc(&memcg_moving);
  1359. atomic_inc(&memcg->moving_account);
  1360. synchronize_rcu();
  1361. }
  1362. static void mem_cgroup_end_move(struct mem_cgroup *memcg)
  1363. {
  1364. /*
  1365. * Now, mem_cgroup_clear_mc() may call this function with NULL.
  1366. * We check NULL in callee rather than caller.
  1367. */
  1368. if (memcg) {
  1369. atomic_dec(&memcg_moving);
  1370. atomic_dec(&memcg->moving_account);
  1371. }
  1372. }
  1373. /*
  1374. * 2 routines for checking "mem" is under move_account() or not.
  1375. *
  1376. * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
  1377. * is used for avoiding races in accounting. If true,
  1378. * pc->mem_cgroup may be overwritten.
  1379. *
  1380. * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
  1381. * under hierarchy of moving cgroups. This is for
  1382. * waiting at hith-memory prressure caused by "move".
  1383. */
  1384. static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
  1385. {
  1386. VM_BUG_ON(!rcu_read_lock_held());
  1387. return atomic_read(&memcg->moving_account) > 0;
  1388. }
  1389. static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
  1390. {
  1391. struct mem_cgroup *from;
  1392. struct mem_cgroup *to;
  1393. bool ret = false;
  1394. /*
  1395. * Unlike task_move routines, we access mc.to, mc.from not under
  1396. * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
  1397. */
  1398. spin_lock(&mc.lock);
  1399. from = mc.from;
  1400. to = mc.to;
  1401. if (!from)
  1402. goto unlock;
  1403. ret = mem_cgroup_same_or_subtree(memcg, from)
  1404. || mem_cgroup_same_or_subtree(memcg, to);
  1405. unlock:
  1406. spin_unlock(&mc.lock);
  1407. return ret;
  1408. }
  1409. static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
  1410. {
  1411. if (mc.moving_task && current != mc.moving_task) {
  1412. if (mem_cgroup_under_move(memcg)) {
  1413. DEFINE_WAIT(wait);
  1414. prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
  1415. /* moving charge context might have finished. */
  1416. if (mc.moving_task)
  1417. schedule();
  1418. finish_wait(&mc.waitq, &wait);
  1419. return true;
  1420. }
  1421. }
  1422. return false;
  1423. }
  1424. /*
  1425. * Take this lock when
  1426. * - a code tries to modify page's memcg while it's USED.
  1427. * - a code tries to modify page state accounting in a memcg.
  1428. * see mem_cgroup_stolen(), too.
  1429. */
  1430. static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
  1431. unsigned long *flags)
  1432. {
  1433. spin_lock_irqsave(&memcg->move_lock, *flags);
  1434. }
  1435. static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
  1436. unsigned long *flags)
  1437. {
  1438. spin_unlock_irqrestore(&memcg->move_lock, *flags);
  1439. }
  1440. #define K(x) ((x) << (PAGE_SHIFT-10))
  1441. /**
  1442. * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
  1443. * @memcg: The memory cgroup that went over limit
  1444. * @p: Task that is going to be killed
  1445. *
  1446. * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
  1447. * enabled
  1448. */
  1449. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  1450. {
  1451. struct cgroup *task_cgrp;
  1452. struct cgroup *mem_cgrp;
  1453. /*
  1454. * Need a buffer in BSS, can't rely on allocations. The code relies
  1455. * on the assumption that OOM is serialized for memory controller.
  1456. * If this assumption is broken, revisit this code.
  1457. */
  1458. static char memcg_name[PATH_MAX];
  1459. int ret;
  1460. struct mem_cgroup *iter;
  1461. unsigned int i;
  1462. if (!p)
  1463. return;
  1464. rcu_read_lock();
  1465. mem_cgrp = memcg->css.cgroup;
  1466. task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
  1467. ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
  1468. if (ret < 0) {
  1469. /*
  1470. * Unfortunately, we are unable to convert to a useful name
  1471. * But we'll still print out the usage information
  1472. */
  1473. rcu_read_unlock();
  1474. goto done;
  1475. }
  1476. rcu_read_unlock();
  1477. pr_info("Task in %s killed", memcg_name);
  1478. rcu_read_lock();
  1479. ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
  1480. if (ret < 0) {
  1481. rcu_read_unlock();
  1482. goto done;
  1483. }
  1484. rcu_read_unlock();
  1485. /*
  1486. * Continues from above, so we don't need an KERN_ level
  1487. */
  1488. pr_cont(" as a result of limit of %s\n", memcg_name);
  1489. done:
  1490. pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
  1491. res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
  1492. res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
  1493. res_counter_read_u64(&memcg->res, RES_FAILCNT));
  1494. pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
  1495. res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
  1496. res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
  1497. res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
  1498. pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
  1499. res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
  1500. res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
  1501. res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
  1502. for_each_mem_cgroup_tree(iter, memcg) {
  1503. pr_info("Memory cgroup stats");
  1504. rcu_read_lock();
  1505. ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
  1506. if (!ret)
  1507. pr_cont(" for %s", memcg_name);
  1508. rcu_read_unlock();
  1509. pr_cont(":");
  1510. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  1511. if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
  1512. continue;
  1513. pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
  1514. K(mem_cgroup_read_stat(iter, i)));
  1515. }
  1516. for (i = 0; i < NR_LRU_LISTS; i++)
  1517. pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
  1518. K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
  1519. pr_cont("\n");
  1520. }
  1521. }
  1522. /*
  1523. * This function returns the number of memcg under hierarchy tree. Returns
  1524. * 1(self count) if no children.
  1525. */
  1526. static int mem_cgroup_count_children(struct mem_cgroup *memcg)
  1527. {
  1528. int num = 0;
  1529. struct mem_cgroup *iter;
  1530. for_each_mem_cgroup_tree(iter, memcg)
  1531. num++;
  1532. return num;
  1533. }
  1534. /*
  1535. * Return the memory (and swap, if configured) limit for a memcg.
  1536. */
  1537. static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
  1538. {
  1539. u64 limit;
  1540. limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  1541. /*
  1542. * Do not consider swap space if we cannot swap due to swappiness
  1543. */
  1544. if (mem_cgroup_swappiness(memcg)) {
  1545. u64 memsw;
  1546. limit += total_swap_pages << PAGE_SHIFT;
  1547. memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  1548. /*
  1549. * If memsw is finite and limits the amount of swap space
  1550. * available to this memcg, return that limit.
  1551. */
  1552. limit = min(limit, memsw);
  1553. }
  1554. return limit;
  1555. }
  1556. static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
  1557. int order)
  1558. {
  1559. struct mem_cgroup *iter;
  1560. unsigned long chosen_points = 0;
  1561. unsigned long totalpages;
  1562. unsigned int points = 0;
  1563. struct task_struct *chosen = NULL;
  1564. /*
  1565. * If current has a pending SIGKILL or is exiting, then automatically
  1566. * select it. The goal is to allow it to allocate so that it may
  1567. * quickly exit and free its memory.
  1568. */
  1569. if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
  1570. set_thread_flag(TIF_MEMDIE);
  1571. return;
  1572. }
  1573. check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
  1574. totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
  1575. for_each_mem_cgroup_tree(iter, memcg) {
  1576. struct cgroup *cgroup = iter->css.cgroup;
  1577. struct cgroup_iter it;
  1578. struct task_struct *task;
  1579. cgroup_iter_start(cgroup, &it);
  1580. while ((task = cgroup_iter_next(cgroup, &it))) {
  1581. switch (oom_scan_process_thread(task, totalpages, NULL,
  1582. false)) {
  1583. case OOM_SCAN_SELECT:
  1584. if (chosen)
  1585. put_task_struct(chosen);
  1586. chosen = task;
  1587. chosen_points = ULONG_MAX;
  1588. get_task_struct(chosen);
  1589. /* fall through */
  1590. case OOM_SCAN_CONTINUE:
  1591. continue;
  1592. case OOM_SCAN_ABORT:
  1593. cgroup_iter_end(cgroup, &it);
  1594. mem_cgroup_iter_break(memcg, iter);
  1595. if (chosen)
  1596. put_task_struct(chosen);
  1597. return;
  1598. case OOM_SCAN_OK:
  1599. break;
  1600. };
  1601. points = oom_badness(task, memcg, NULL, totalpages);
  1602. if (points > chosen_points) {
  1603. if (chosen)
  1604. put_task_struct(chosen);
  1605. chosen = task;
  1606. chosen_points = points;
  1607. get_task_struct(chosen);
  1608. }
  1609. }
  1610. cgroup_iter_end(cgroup, &it);
  1611. }
  1612. if (!chosen)
  1613. return;
  1614. points = chosen_points * 1000 / totalpages;
  1615. oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
  1616. NULL, "Memory cgroup out of memory");
  1617. }
  1618. static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
  1619. gfp_t gfp_mask,
  1620. unsigned long flags)
  1621. {
  1622. unsigned long total = 0;
  1623. bool noswap = false;
  1624. int loop;
  1625. if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
  1626. noswap = true;
  1627. if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
  1628. noswap = true;
  1629. for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
  1630. if (loop)
  1631. drain_all_stock_async(memcg);
  1632. total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
  1633. /*
  1634. * Allow limit shrinkers, which are triggered directly
  1635. * by userspace, to catch signals and stop reclaim
  1636. * after minimal progress, regardless of the margin.
  1637. */
  1638. if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
  1639. break;
  1640. if (mem_cgroup_margin(memcg))
  1641. break;
  1642. /*
  1643. * If nothing was reclaimed after two attempts, there
  1644. * may be no reclaimable pages in this hierarchy.
  1645. */
  1646. if (loop && !total)
  1647. break;
  1648. }
  1649. return total;
  1650. }
  1651. /**
  1652. * test_mem_cgroup_node_reclaimable
  1653. * @memcg: the target memcg
  1654. * @nid: the node ID to be checked.
  1655. * @noswap : specify true here if the user wants flle only information.
  1656. *
  1657. * This function returns whether the specified memcg contains any
  1658. * reclaimable pages on a node. Returns true if there are any reclaimable
  1659. * pages in the node.
  1660. */
  1661. static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
  1662. int nid, bool noswap)
  1663. {
  1664. if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
  1665. return true;
  1666. if (noswap || !total_swap_pages)
  1667. return false;
  1668. if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
  1669. return true;
  1670. return false;
  1671. }
  1672. #if MAX_NUMNODES > 1
  1673. /*
  1674. * Always updating the nodemask is not very good - even if we have an empty
  1675. * list or the wrong list here, we can start from some node and traverse all
  1676. * nodes based on the zonelist. So update the list loosely once per 10 secs.
  1677. *
  1678. */
  1679. static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
  1680. {
  1681. int nid;
  1682. /*
  1683. * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
  1684. * pagein/pageout changes since the last update.
  1685. */
  1686. if (!atomic_read(&memcg->numainfo_events))
  1687. return;
  1688. if (atomic_inc_return(&memcg->numainfo_updating) > 1)
  1689. return;
  1690. /* make a nodemask where this memcg uses memory from */
  1691. memcg->scan_nodes = node_states[N_MEMORY];
  1692. for_each_node_mask(nid, node_states[N_MEMORY]) {
  1693. if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
  1694. node_clear(nid, memcg->scan_nodes);
  1695. }
  1696. atomic_set(&memcg->numainfo_events, 0);
  1697. atomic_set(&memcg->numainfo_updating, 0);
  1698. }
  1699. /*
  1700. * Selecting a node where we start reclaim from. Because what we need is just
  1701. * reducing usage counter, start from anywhere is O,K. Considering
  1702. * memory reclaim from current node, there are pros. and cons.
  1703. *
  1704. * Freeing memory from current node means freeing memory from a node which
  1705. * we'll use or we've used. So, it may make LRU bad. And if several threads
  1706. * hit limits, it will see a contention on a node. But freeing from remote
  1707. * node means more costs for memory reclaim because of memory latency.
  1708. *
  1709. * Now, we use round-robin. Better algorithm is welcomed.
  1710. */
  1711. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  1712. {
  1713. int node;
  1714. mem_cgroup_may_update_nodemask(memcg);
  1715. node = memcg->last_scanned_node;
  1716. node = next_node(node, memcg->scan_nodes);
  1717. if (node == MAX_NUMNODES)
  1718. node = first_node(memcg->scan_nodes);
  1719. /*
  1720. * We call this when we hit limit, not when pages are added to LRU.
  1721. * No LRU may hold pages because all pages are UNEVICTABLE or
  1722. * memcg is too small and all pages are not on LRU. In that case,
  1723. * we use curret node.
  1724. */
  1725. if (unlikely(node == MAX_NUMNODES))
  1726. node = numa_node_id();
  1727. memcg->last_scanned_node = node;
  1728. return node;
  1729. }
  1730. /*
  1731. * Check all nodes whether it contains reclaimable pages or not.
  1732. * For quick scan, we make use of scan_nodes. This will allow us to skip
  1733. * unused nodes. But scan_nodes is lazily updated and may not cotain
  1734. * enough new information. We need to do double check.
  1735. */
  1736. static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  1737. {
  1738. int nid;
  1739. /*
  1740. * quick check...making use of scan_node.
  1741. * We can skip unused nodes.
  1742. */
  1743. if (!nodes_empty(memcg->scan_nodes)) {
  1744. for (nid = first_node(memcg->scan_nodes);
  1745. nid < MAX_NUMNODES;
  1746. nid = next_node(nid, memcg->scan_nodes)) {
  1747. if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
  1748. return true;
  1749. }
  1750. }
  1751. /*
  1752. * Check rest of nodes.
  1753. */
  1754. for_each_node_state(nid, N_MEMORY) {
  1755. if (node_isset(nid, memcg->scan_nodes))
  1756. continue;
  1757. if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
  1758. return true;
  1759. }
  1760. return false;
  1761. }
  1762. #else
  1763. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  1764. {
  1765. return 0;
  1766. }
  1767. static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  1768. {
  1769. return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
  1770. }
  1771. #endif
  1772. static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
  1773. struct zone *zone,
  1774. gfp_t gfp_mask,
  1775. unsigned long *total_scanned)
  1776. {
  1777. struct mem_cgroup *victim = NULL;
  1778. int total = 0;
  1779. int loop = 0;
  1780. unsigned long excess;
  1781. unsigned long nr_scanned;
  1782. struct mem_cgroup_reclaim_cookie reclaim = {
  1783. .zone = zone,
  1784. .priority = 0,
  1785. };
  1786. excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
  1787. while (1) {
  1788. victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
  1789. if (!victim) {
  1790. loop++;
  1791. if (loop >= 2) {
  1792. /*
  1793. * If we have not been able to reclaim
  1794. * anything, it might because there are
  1795. * no reclaimable pages under this hierarchy
  1796. */
  1797. if (!total)
  1798. break;
  1799. /*
  1800. * We want to do more targeted reclaim.
  1801. * excess >> 2 is not to excessive so as to
  1802. * reclaim too much, nor too less that we keep
  1803. * coming back to reclaim from this cgroup
  1804. */
  1805. if (total >= (excess >> 2) ||
  1806. (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
  1807. break;
  1808. }
  1809. continue;
  1810. }
  1811. if (!mem_cgroup_reclaimable(victim, false))
  1812. continue;
  1813. total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
  1814. zone, &nr_scanned);
  1815. *total_scanned += nr_scanned;
  1816. if (!res_counter_soft_limit_excess(&root_memcg->res))
  1817. break;
  1818. }
  1819. mem_cgroup_iter_break(root_memcg, victim);
  1820. return total;
  1821. }
  1822. /*
  1823. * Check OOM-Killer is already running under our hierarchy.
  1824. * If someone is running, return false.
  1825. * Has to be called with memcg_oom_lock
  1826. */
  1827. static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
  1828. {
  1829. struct mem_cgroup *iter, *failed = NULL;
  1830. for_each_mem_cgroup_tree(iter, memcg) {
  1831. if (iter->oom_lock) {
  1832. /*
  1833. * this subtree of our hierarchy is already locked
  1834. * so we cannot give a lock.
  1835. */
  1836. failed = iter;
  1837. mem_cgroup_iter_break(memcg, iter);
  1838. break;
  1839. } else
  1840. iter->oom_lock = true;
  1841. }
  1842. if (!failed)
  1843. return true;
  1844. /*
  1845. * OK, we failed to lock the whole subtree so we have to clean up
  1846. * what we set up to the failing subtree
  1847. */
  1848. for_each_mem_cgroup_tree(iter, memcg) {
  1849. if (iter == failed) {
  1850. mem_cgroup_iter_break(memcg, iter);
  1851. break;
  1852. }
  1853. iter->oom_lock = false;
  1854. }
  1855. return false;
  1856. }
  1857. /*
  1858. * Has to be called with memcg_oom_lock
  1859. */
  1860. static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
  1861. {
  1862. struct mem_cgroup *iter;
  1863. for_each_mem_cgroup_tree(iter, memcg)
  1864. iter->oom_lock = false;
  1865. return 0;
  1866. }
  1867. static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
  1868. {
  1869. struct mem_cgroup *iter;
  1870. for_each_mem_cgroup_tree(iter, memcg)
  1871. atomic_inc(&iter->under_oom);
  1872. }
  1873. static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
  1874. {
  1875. struct mem_cgroup *iter;
  1876. /*
  1877. * When a new child is created while the hierarchy is under oom,
  1878. * mem_cgroup_oom_lock() may not be called. We have to use
  1879. * atomic_add_unless() here.
  1880. */
  1881. for_each_mem_cgroup_tree(iter, memcg)
  1882. atomic_add_unless(&iter->under_oom, -1, 0);
  1883. }
  1884. static DEFINE_SPINLOCK(memcg_oom_lock);
  1885. static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
  1886. struct oom_wait_info {
  1887. struct mem_cgroup *memcg;
  1888. wait_queue_t wait;
  1889. };
  1890. static int memcg_oom_wake_function(wait_queue_t *wait,
  1891. unsigned mode, int sync, void *arg)
  1892. {
  1893. struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
  1894. struct mem_cgroup *oom_wait_memcg;
  1895. struct oom_wait_info *oom_wait_info;
  1896. oom_wait_info = container_of(wait, struct oom_wait_info, wait);
  1897. oom_wait_memcg = oom_wait_info->memcg;
  1898. /*
  1899. * Both of oom_wait_info->memcg and wake_memcg are stable under us.
  1900. * Then we can use css_is_ancestor without taking care of RCU.
  1901. */
  1902. if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
  1903. && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
  1904. return 0;
  1905. return autoremove_wake_function(wait, mode, sync, arg);
  1906. }
  1907. static void memcg_wakeup_oom(struct mem_cgroup *memcg)
  1908. {
  1909. /* for filtering, pass "memcg" as argument. */
  1910. __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
  1911. }
  1912. static void memcg_oom_recover(struct mem_cgroup *memcg)
  1913. {
  1914. if (memcg && atomic_read(&memcg->under_oom))
  1915. memcg_wakeup_oom(memcg);
  1916. }
  1917. /*
  1918. * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  1919. */
  1920. static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
  1921. int order)
  1922. {
  1923. struct oom_wait_info owait;
  1924. bool locked, need_to_kill;
  1925. owait.memcg = memcg;
  1926. owait.wait.flags = 0;
  1927. owait.wait.func = memcg_oom_wake_function;
  1928. owait.wait.private = current;
  1929. INIT_LIST_HEAD(&owait.wait.task_list);
  1930. need_to_kill = true;
  1931. mem_cgroup_mark_under_oom(memcg);
  1932. /* At first, try to OOM lock hierarchy under memcg.*/
  1933. spin_lock(&memcg_oom_lock);
  1934. locked = mem_cgroup_oom_lock(memcg);
  1935. /*
  1936. * Even if signal_pending(), we can't quit charge() loop without
  1937. * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
  1938. * under OOM is always welcomed, use TASK_KILLABLE here.
  1939. */
  1940. prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
  1941. if (!locked || memcg->oom_kill_disable)
  1942. need_to_kill = false;
  1943. if (locked)
  1944. mem_cgroup_oom_notify(memcg);
  1945. spin_unlock(&memcg_oom_lock);
  1946. if (need_to_kill) {
  1947. finish_wait(&memcg_oom_waitq, &owait.wait);
  1948. mem_cgroup_out_of_memory(memcg, mask, order);
  1949. } else {
  1950. schedule();
  1951. finish_wait(&memcg_oom_waitq, &owait.wait);
  1952. }
  1953. spin_lock(&memcg_oom_lock);
  1954. if (locked)
  1955. mem_cgroup_oom_unlock(memcg);
  1956. memcg_wakeup_oom(memcg);
  1957. spin_unlock(&memcg_oom_lock);
  1958. mem_cgroup_unmark_under_oom(memcg);
  1959. if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
  1960. return false;
  1961. /* Give chance to dying process */
  1962. schedule_timeout_uninterruptible(1);
  1963. return true;
  1964. }
  1965. /*
  1966. * Currently used to update mapped file statistics, but the routine can be
  1967. * generalized to update other statistics as well.
  1968. *
  1969. * Notes: Race condition
  1970. *
  1971. * We usually use page_cgroup_lock() for accessing page_cgroup member but
  1972. * it tends to be costly. But considering some conditions, we doesn't need
  1973. * to do so _always_.
  1974. *
  1975. * Considering "charge", lock_page_cgroup() is not required because all
  1976. * file-stat operations happen after a page is attached to radix-tree. There
  1977. * are no race with "charge".
  1978. *
  1979. * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
  1980. * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
  1981. * if there are race with "uncharge". Statistics itself is properly handled
  1982. * by flags.
  1983. *
  1984. * Considering "move", this is an only case we see a race. To make the race
  1985. * small, we check mm->moving_account and detect there are possibility of race
  1986. * If there is, we take a lock.
  1987. */
  1988. void __mem_cgroup_begin_update_page_stat(struct page *page,
  1989. bool *locked, unsigned long *flags)
  1990. {
  1991. struct mem_cgroup *memcg;
  1992. struct page_cgroup *pc;
  1993. pc = lookup_page_cgroup(page);
  1994. again:
  1995. memcg = pc->mem_cgroup;
  1996. if (unlikely(!memcg || !PageCgroupUsed(pc)))
  1997. return;
  1998. /*
  1999. * If this memory cgroup is not under account moving, we don't
  2000. * need to take move_lock_mem_cgroup(). Because we already hold
  2001. * rcu_read_lock(), any calls to move_account will be delayed until
  2002. * rcu_read_unlock() if mem_cgroup_stolen() == true.
  2003. */
  2004. if (!mem_cgroup_stolen(memcg))
  2005. return;
  2006. move_lock_mem_cgroup(memcg, flags);
  2007. if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
  2008. move_unlock_mem_cgroup(memcg, flags);
  2009. goto again;
  2010. }
  2011. *locked = true;
  2012. }
  2013. void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
  2014. {
  2015. struct page_cgroup *pc = lookup_page_cgroup(page);
  2016. /*
  2017. * It's guaranteed that pc->mem_cgroup never changes while
  2018. * lock is held because a routine modifies pc->mem_cgroup
  2019. * should take move_lock_mem_cgroup().
  2020. */
  2021. move_unlock_mem_cgroup(pc->mem_cgroup, flags);
  2022. }
  2023. void mem_cgroup_update_page_stat(struct page *page,
  2024. enum mem_cgroup_page_stat_item idx, int val)
  2025. {
  2026. struct mem_cgroup *memcg;
  2027. struct page_cgroup *pc = lookup_page_cgroup(page);
  2028. unsigned long uninitialized_var(flags);
  2029. if (mem_cgroup_disabled())
  2030. return;
  2031. memcg = pc->mem_cgroup;
  2032. if (unlikely(!memcg || !PageCgroupUsed(pc)))
  2033. return;
  2034. switch (idx) {
  2035. case MEMCG_NR_FILE_MAPPED:
  2036. idx = MEM_CGROUP_STAT_FILE_MAPPED;
  2037. break;
  2038. default:
  2039. BUG();
  2040. }
  2041. this_cpu_add(memcg->stat->count[idx], val);
  2042. }
  2043. /*
  2044. * size of first charge trial. "32" comes from vmscan.c's magic value.
  2045. * TODO: maybe necessary to use big numbers in big irons.
  2046. */
  2047. #define CHARGE_BATCH 32U
  2048. struct memcg_stock_pcp {
  2049. struct mem_cgroup *cached; /* this never be root cgroup */
  2050. unsigned int nr_pages;
  2051. struct work_struct work;
  2052. unsigned long flags;
  2053. #define FLUSHING_CACHED_CHARGE 0
  2054. };
  2055. static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
  2056. static DEFINE_MUTEX(percpu_charge_mutex);
  2057. /**
  2058. * consume_stock: Try to consume stocked charge on this cpu.
  2059. * @memcg: memcg to consume from.
  2060. * @nr_pages: how many pages to charge.
  2061. *
  2062. * The charges will only happen if @memcg matches the current cpu's memcg
  2063. * stock, and at least @nr_pages are available in that stock. Failure to
  2064. * service an allocation will refill the stock.
  2065. *
  2066. * returns true if successful, false otherwise.
  2067. */
  2068. static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  2069. {
  2070. struct memcg_stock_pcp *stock;
  2071. bool ret = true;
  2072. if (nr_pages > CHARGE_BATCH)
  2073. return false;
  2074. stock = &get_cpu_var(memcg_stock);
  2075. if (memcg == stock->cached && stock->nr_pages >= nr_pages)
  2076. stock->nr_pages -= nr_pages;
  2077. else /* need to call res_counter_charge */
  2078. ret = false;
  2079. put_cpu_var(memcg_stock);
  2080. return ret;
  2081. }
  2082. /*
  2083. * Returns stocks cached in percpu to res_counter and reset cached information.
  2084. */
  2085. static void drain_stock(struct memcg_stock_pcp *stock)
  2086. {
  2087. struct mem_cgroup *old = stock->cached;
  2088. if (stock->nr_pages) {
  2089. unsigned long bytes = stock->nr_pages * PAGE_SIZE;
  2090. res_counter_uncharge(&old->res, bytes);
  2091. if (do_swap_account)
  2092. res_counter_uncharge(&old->memsw, bytes);
  2093. stock->nr_pages = 0;
  2094. }
  2095. stock->cached = NULL;
  2096. }
  2097. /*
  2098. * This must be called under preempt disabled or must be called by
  2099. * a thread which is pinned to local cpu.
  2100. */
  2101. static void drain_local_stock(struct work_struct *dummy)
  2102. {
  2103. struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
  2104. drain_stock(stock);
  2105. clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
  2106. }
  2107. static void __init memcg_stock_init(void)
  2108. {
  2109. int cpu;
  2110. for_each_possible_cpu(cpu) {
  2111. struct memcg_stock_pcp *stock =
  2112. &per_cpu(memcg_stock, cpu);
  2113. INIT_WORK(&stock->work, drain_local_stock);
  2114. }
  2115. }
  2116. /*
  2117. * Cache charges(val) which is from res_counter, to local per_cpu area.
  2118. * This will be consumed by consume_stock() function, later.
  2119. */
  2120. static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  2121. {
  2122. struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
  2123. if (stock->cached != memcg) { /* reset if necessary */
  2124. drain_stock(stock);
  2125. stock->cached = memcg;
  2126. }
  2127. stock->nr_pages += nr_pages;
  2128. put_cpu_var(memcg_stock);
  2129. }
  2130. /*
  2131. * Drains all per-CPU charge caches for given root_memcg resp. subtree
  2132. * of the hierarchy under it. sync flag says whether we should block
  2133. * until the work is done.
  2134. */
  2135. static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
  2136. {
  2137. int cpu, curcpu;
  2138. /* Notify other cpus that system-wide "drain" is running */
  2139. get_online_cpus();
  2140. curcpu = get_cpu();
  2141. for_each_online_cpu(cpu) {
  2142. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  2143. struct mem_cgroup *memcg;
  2144. memcg = stock->cached;
  2145. if (!memcg || !stock->nr_pages)
  2146. continue;
  2147. if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
  2148. continue;
  2149. if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
  2150. if (cpu == curcpu)
  2151. drain_local_stock(&stock->work);
  2152. else
  2153. schedule_work_on(cpu, &stock->work);
  2154. }
  2155. }
  2156. put_cpu();
  2157. if (!sync)
  2158. goto out;
  2159. for_each_online_cpu(cpu) {
  2160. struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  2161. if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
  2162. flush_work(&stock->work);
  2163. }
  2164. out:
  2165. put_online_cpus();
  2166. }
  2167. /*
  2168. * Tries to drain stocked charges in other cpus. This function is asynchronous
  2169. * and just put a work per cpu for draining localy on each cpu. Caller can
  2170. * expects some charges will be back to res_counter later but cannot wait for
  2171. * it.
  2172. */
  2173. static void drain_all_stock_async(struct mem_cgroup *root_memcg)
  2174. {
  2175. /*
  2176. * If someone calls draining, avoid adding more kworker runs.
  2177. */
  2178. if (!mutex_trylock(&percpu_charge_mutex))
  2179. return;
  2180. drain_all_stock(root_memcg, false);
  2181. mutex_unlock(&percpu_charge_mutex);
  2182. }
  2183. /* This is a synchronous drain interface. */
  2184. static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
  2185. {
  2186. /* called when force_empty is called */
  2187. mutex_lock(&percpu_charge_mutex);
  2188. drain_all_stock(root_memcg, true);
  2189. mutex_unlock(&percpu_charge_mutex);
  2190. }
  2191. /*
  2192. * This function drains percpu counter value from DEAD cpu and
  2193. * move it to local cpu. Note that this function can be preempted.
  2194. */
  2195. static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
  2196. {
  2197. int i;
  2198. spin_lock(&memcg->pcp_counter_lock);
  2199. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  2200. long x = per_cpu(memcg->stat->count[i], cpu);
  2201. per_cpu(memcg->stat->count[i], cpu) = 0;
  2202. memcg->nocpu_base.count[i] += x;
  2203. }
  2204. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
  2205. unsigned long x = per_cpu(memcg->stat->events[i], cpu);
  2206. per_cpu(memcg->stat->events[i], cpu) = 0;
  2207. memcg->nocpu_base.events[i] += x;
  2208. }
  2209. spin_unlock(&memcg->pcp_counter_lock);
  2210. }
  2211. static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
  2212. unsigned long action,
  2213. void *hcpu)
  2214. {
  2215. int cpu = (unsigned long)hcpu;
  2216. struct memcg_stock_pcp *stock;
  2217. struct mem_cgroup *iter;
  2218. if (action == CPU_ONLINE)
  2219. return NOTIFY_OK;
  2220. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  2221. return NOTIFY_OK;
  2222. for_each_mem_cgroup(iter)
  2223. mem_cgroup_drain_pcp_counter(iter, cpu);
  2224. stock = &per_cpu(memcg_stock, cpu);
  2225. drain_stock(stock);
  2226. return NOTIFY_OK;
  2227. }
  2228. /* See __mem_cgroup_try_charge() for details */
  2229. enum {
  2230. CHARGE_OK, /* success */
  2231. CHARGE_RETRY, /* need to retry but retry is not bad */
  2232. CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
  2233. CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
  2234. CHARGE_OOM_DIE, /* the current is killed because of OOM */
  2235. };
  2236. static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
  2237. unsigned int nr_pages, unsigned int min_pages,
  2238. bool oom_check)
  2239. {
  2240. unsigned long csize = nr_pages * PAGE_SIZE;
  2241. struct mem_cgroup *mem_over_limit;
  2242. struct res_counter *fail_res;
  2243. unsigned long flags = 0;
  2244. int ret;
  2245. ret = res_counter_charge(&memcg->res, csize, &fail_res);
  2246. if (likely(!ret)) {
  2247. if (!do_swap_account)
  2248. return CHARGE_OK;
  2249. ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
  2250. if (likely(!ret))
  2251. return CHARGE_OK;
  2252. res_counter_uncharge(&memcg->res, csize);
  2253. mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
  2254. flags |= MEM_CGROUP_RECLAIM_NOSWAP;
  2255. } else
  2256. mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
  2257. /*
  2258. * Never reclaim on behalf of optional batching, retry with a
  2259. * single page instead.
  2260. */
  2261. if (nr_pages > min_pages)
  2262. return CHARGE_RETRY;
  2263. if (!(gfp_mask & __GFP_WAIT))
  2264. return CHARGE_WOULDBLOCK;
  2265. if (gfp_mask & __GFP_NORETRY)
  2266. return CHARGE_NOMEM;
  2267. ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
  2268. if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
  2269. return CHARGE_RETRY;
  2270. /*
  2271. * Even though the limit is exceeded at this point, reclaim
  2272. * may have been able to free some pages. Retry the charge
  2273. * before killing the task.
  2274. *
  2275. * Only for regular pages, though: huge pages are rather
  2276. * unlikely to succeed so close to the limit, and we fall back
  2277. * to regular pages anyway in case of failure.
  2278. */
  2279. if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
  2280. return CHARGE_RETRY;
  2281. /*
  2282. * At task move, charge accounts can be doubly counted. So, it's
  2283. * better to wait until the end of task_move if something is going on.
  2284. */
  2285. if (mem_cgroup_wait_acct_move(mem_over_limit))
  2286. return CHARGE_RETRY;
  2287. /* If we don't need to call oom-killer at el, return immediately */
  2288. if (!oom_check)
  2289. return CHARGE_NOMEM;
  2290. /* check OOM */
  2291. if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
  2292. return CHARGE_OOM_DIE;
  2293. return CHARGE_RETRY;
  2294. }
  2295. /*
  2296. * __mem_cgroup_try_charge() does
  2297. * 1. detect memcg to be charged against from passed *mm and *ptr,
  2298. * 2. update res_counter
  2299. * 3. call memory reclaim if necessary.
  2300. *
  2301. * In some special case, if the task is fatal, fatal_signal_pending() or
  2302. * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
  2303. * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
  2304. * as possible without any hazards. 2: all pages should have a valid
  2305. * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
  2306. * pointer, that is treated as a charge to root_mem_cgroup.
  2307. *
  2308. * So __mem_cgroup_try_charge() will return
  2309. * 0 ... on success, filling *ptr with a valid memcg pointer.
  2310. * -ENOMEM ... charge failure because of resource limits.
  2311. * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup.
  2312. *
  2313. * Unlike the exported interface, an "oom" parameter is added. if oom==true,
  2314. * the oom-killer can be invoked.
  2315. */
  2316. static int __mem_cgroup_try_charge(struct mm_struct *mm,
  2317. gfp_t gfp_mask,
  2318. unsigned int nr_pages,
  2319. struct mem_cgroup **ptr,
  2320. bool oom)
  2321. {
  2322. unsigned int batch = max(CHARGE_BATCH, nr_pages);
  2323. int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2324. struct mem_cgroup *memcg = NULL;
  2325. int ret;
  2326. /*
  2327. * Unlike gloval-vm's OOM-kill, we're not in memory shortage
  2328. * in system level. So, allow to go ahead dying process in addition to
  2329. * MEMDIE process.
  2330. */
  2331. if (unlikely(test_thread_flag(TIF_MEMDIE)
  2332. || fatal_signal_pending(current)))
  2333. goto bypass;
  2334. /*
  2335. * We always charge the cgroup the mm_struct belongs to.
  2336. * The mm_struct's mem_cgroup changes on task migration if the
  2337. * thread group leader migrates. It's possible that mm is not
  2338. * set, if so charge the root memcg (happens for pagecache usage).
  2339. */
  2340. if (!*ptr && !mm)
  2341. *ptr = root_mem_cgroup;
  2342. again:
  2343. if (*ptr) { /* css should be a valid one */
  2344. memcg = *ptr;
  2345. if (mem_cgroup_is_root(memcg))
  2346. goto done;
  2347. if (consume_stock(memcg, nr_pages))
  2348. goto done;
  2349. css_get(&memcg->css);
  2350. } else {
  2351. struct task_struct *p;
  2352. rcu_read_lock();
  2353. p = rcu_dereference(mm->owner);
  2354. /*
  2355. * Because we don't have task_lock(), "p" can exit.
  2356. * In that case, "memcg" can point to root or p can be NULL with
  2357. * race with swapoff. Then, we have small risk of mis-accouning.
  2358. * But such kind of mis-account by race always happens because
  2359. * we don't have cgroup_mutex(). It's overkill and we allo that
  2360. * small race, here.
  2361. * (*) swapoff at el will charge against mm-struct not against
  2362. * task-struct. So, mm->owner can be NULL.
  2363. */
  2364. memcg = mem_cgroup_from_task(p);
  2365. if (!memcg)
  2366. memcg = root_mem_cgroup;
  2367. if (mem_cgroup_is_root(memcg)) {
  2368. rcu_read_unlock();
  2369. goto done;
  2370. }
  2371. if (consume_stock(memcg, nr_pages)) {
  2372. /*
  2373. * It seems dagerous to access memcg without css_get().
  2374. * But considering how consume_stok works, it's not
  2375. * necessary. If consume_stock success, some charges
  2376. * from this memcg are cached on this cpu. So, we
  2377. * don't need to call css_get()/css_tryget() before
  2378. * calling consume_stock().
  2379. */
  2380. rcu_read_unlock();
  2381. goto done;
  2382. }
  2383. /* after here, we may be blocked. we need to get refcnt */
  2384. if (!css_tryget(&memcg->css)) {
  2385. rcu_read_unlock();
  2386. goto again;
  2387. }
  2388. rcu_read_unlock();
  2389. }
  2390. do {
  2391. bool oom_check;
  2392. /* If killed, bypass charge */
  2393. if (fatal_signal_pending(current)) {
  2394. css_put(&memcg->css);
  2395. goto bypass;
  2396. }
  2397. oom_check = false;
  2398. if (oom && !nr_oom_retries) {
  2399. oom_check = true;
  2400. nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  2401. }
  2402. ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
  2403. oom_check);
  2404. switch (ret) {
  2405. case CHARGE_OK:
  2406. break;
  2407. case CHARGE_RETRY: /* not in OOM situation but retry */
  2408. batch = nr_pages;
  2409. css_put(&memcg->css);
  2410. memcg = NULL;
  2411. goto again;
  2412. case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
  2413. css_put(&memcg->css);
  2414. goto nomem;
  2415. case CHARGE_NOMEM: /* OOM routine works */
  2416. if (!oom) {
  2417. css_put(&memcg->css);
  2418. goto nomem;
  2419. }
  2420. /* If oom, we never return -ENOMEM */
  2421. nr_oom_retries--;
  2422. break;
  2423. case CHARGE_OOM_DIE: /* Killed by OOM Killer */
  2424. css_put(&memcg->css);
  2425. goto bypass;
  2426. }
  2427. } while (ret != CHARGE_OK);
  2428. if (batch > nr_pages)
  2429. refill_stock(memcg, batch - nr_pages);
  2430. css_put(&memcg->css);
  2431. done:
  2432. *ptr = memcg;
  2433. return 0;
  2434. nomem:
  2435. *ptr = NULL;
  2436. return -ENOMEM;
  2437. bypass:
  2438. *ptr = root_mem_cgroup;
  2439. return -EINTR;
  2440. }
  2441. /*
  2442. * Somemtimes we have to undo a charge we got by try_charge().
  2443. * This function is for that and do uncharge, put css's refcnt.
  2444. * gotten by try_charge().
  2445. */
  2446. static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
  2447. unsigned int nr_pages)
  2448. {
  2449. if (!mem_cgroup_is_root(memcg)) {
  2450. unsigned long bytes = nr_pages * PAGE_SIZE;
  2451. res_counter_uncharge(&memcg->res, bytes);
  2452. if (do_swap_account)
  2453. res_counter_uncharge(&memcg->memsw, bytes);
  2454. }
  2455. }
  2456. /*
  2457. * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
  2458. * This is useful when moving usage to parent cgroup.
  2459. */
  2460. static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
  2461. unsigned int nr_pages)
  2462. {
  2463. unsigned long bytes = nr_pages * PAGE_SIZE;
  2464. if (mem_cgroup_is_root(memcg))
  2465. return;
  2466. res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
  2467. if (do_swap_account)
  2468. res_counter_uncharge_until(&memcg->memsw,
  2469. memcg->memsw.parent, bytes);
  2470. }
  2471. /*
  2472. * A helper function to get mem_cgroup from ID. must be called under
  2473. * rcu_read_lock(). The caller is responsible for calling css_tryget if
  2474. * the mem_cgroup is used for charging. (dropping refcnt from swap can be
  2475. * called against removed memcg.)
  2476. */
  2477. static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
  2478. {
  2479. struct cgroup_subsys_state *css;
  2480. /* ID 0 is unused ID */
  2481. if (!id)
  2482. return NULL;
  2483. css = css_lookup(&mem_cgroup_subsys, id);
  2484. if (!css)
  2485. return NULL;
  2486. return mem_cgroup_from_css(css);
  2487. }
  2488. struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  2489. {
  2490. struct mem_cgroup *memcg = NULL;
  2491. struct page_cgroup *pc;
  2492. unsigned short id;
  2493. swp_entry_t ent;
  2494. VM_BUG_ON(!PageLocked(page));
  2495. pc = lookup_page_cgroup(page);
  2496. lock_page_cgroup(pc);
  2497. if (PageCgroupUsed(pc)) {
  2498. memcg = pc->mem_cgroup;
  2499. if (memcg && !css_tryget(&memcg->css))
  2500. memcg = NULL;
  2501. } else if (PageSwapCache(page)) {
  2502. ent.val = page_private(page);
  2503. id = lookup_swap_cgroup_id(ent);
  2504. rcu_read_lock();
  2505. memcg = mem_cgroup_lookup(id);
  2506. if (memcg && !css_tryget(&memcg->css))
  2507. memcg = NULL;
  2508. rcu_read_unlock();
  2509. }
  2510. unlock_page_cgroup(pc);
  2511. return memcg;
  2512. }
  2513. static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
  2514. struct page *page,
  2515. unsigned int nr_pages,
  2516. enum charge_type ctype,
  2517. bool lrucare)
  2518. {
  2519. struct page_cgroup *pc = lookup_page_cgroup(page);
  2520. struct zone *uninitialized_var(zone);
  2521. struct lruvec *lruvec;
  2522. bool was_on_lru = false;
  2523. bool anon;
  2524. lock_page_cgroup(pc);
  2525. VM_BUG_ON(PageCgroupUsed(pc));
  2526. /*
  2527. * we don't need page_cgroup_lock about tail pages, becase they are not
  2528. * accessed by any other context at this point.
  2529. */
  2530. /*
  2531. * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
  2532. * may already be on some other mem_cgroup's LRU. Take care of it.
  2533. */
  2534. if (lrucare) {
  2535. zone = page_zone(page);
  2536. spin_lock_irq(&zone->lru_lock);
  2537. if (PageLRU(page)) {
  2538. lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
  2539. ClearPageLRU(page);
  2540. del_page_from_lru_list(page, lruvec, page_lru(page));
  2541. was_on_lru = true;
  2542. }
  2543. }
  2544. pc->mem_cgroup = memcg;
  2545. /*
  2546. * We access a page_cgroup asynchronously without lock_page_cgroup().
  2547. * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
  2548. * is accessed after testing USED bit. To make pc->mem_cgroup visible
  2549. * before USED bit, we need memory barrier here.
  2550. * See mem_cgroup_add_lru_list(), etc.
  2551. */
  2552. smp_wmb();
  2553. SetPageCgroupUsed(pc);
  2554. if (lrucare) {
  2555. if (was_on_lru) {
  2556. lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
  2557. VM_BUG_ON(PageLRU(page));
  2558. SetPageLRU(page);
  2559. add_page_to_lru_list(page, lruvec, page_lru(page));
  2560. }
  2561. spin_unlock_irq(&zone->lru_lock);
  2562. }
  2563. if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
  2564. anon = true;
  2565. else
  2566. anon = false;
  2567. mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
  2568. unlock_page_cgroup(pc);
  2569. /*
  2570. * "charge_statistics" updated event counter. Then, check it.
  2571. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
  2572. * if they exceeds softlimit.
  2573. */
  2574. memcg_check_events(memcg, page);
  2575. }
  2576. static DEFINE_MUTEX(set_limit_mutex);
  2577. #ifdef CONFIG_MEMCG_KMEM
  2578. static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
  2579. {
  2580. return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
  2581. (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
  2582. }
  2583. /*
  2584. * This is a bit cumbersome, but it is rarely used and avoids a backpointer
  2585. * in the memcg_cache_params struct.
  2586. */
  2587. static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
  2588. {
  2589. struct kmem_cache *cachep;
  2590. VM_BUG_ON(p->is_root_cache);
  2591. cachep = p->root_cache;
  2592. return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
  2593. }
  2594. #ifdef CONFIG_SLABINFO
  2595. static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
  2596. struct seq_file *m)
  2597. {
  2598. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  2599. struct memcg_cache_params *params;
  2600. if (!memcg_can_account_kmem(memcg))
  2601. return -EIO;
  2602. print_slabinfo_header(m);
  2603. mutex_lock(&memcg->slab_caches_mutex);
  2604. list_for_each_entry(params, &memcg->memcg_slab_caches, list)
  2605. cache_show(memcg_params_to_cache(params), m);
  2606. mutex_unlock(&memcg->slab_caches_mutex);
  2607. return 0;
  2608. }
  2609. #endif
  2610. static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
  2611. {
  2612. struct res_counter *fail_res;
  2613. struct mem_cgroup *_memcg;
  2614. int ret = 0;
  2615. bool may_oom;
  2616. ret = res_counter_charge(&memcg->kmem, size, &fail_res);
  2617. if (ret)
  2618. return ret;
  2619. /*
  2620. * Conditions under which we can wait for the oom_killer. Those are
  2621. * the same conditions tested by the core page allocator
  2622. */
  2623. may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
  2624. _memcg = memcg;
  2625. ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
  2626. &_memcg, may_oom);
  2627. if (ret == -EINTR) {
  2628. /*
  2629. * __mem_cgroup_try_charge() chosed to bypass to root due to
  2630. * OOM kill or fatal signal. Since our only options are to
  2631. * either fail the allocation or charge it to this cgroup, do
  2632. * it as a temporary condition. But we can't fail. From a
  2633. * kmem/slab perspective, the cache has already been selected,
  2634. * by mem_cgroup_kmem_get_cache(), so it is too late to change
  2635. * our minds.
  2636. *
  2637. * This condition will only trigger if the task entered
  2638. * memcg_charge_kmem in a sane state, but was OOM-killed during
  2639. * __mem_cgroup_try_charge() above. Tasks that were already
  2640. * dying when the allocation triggers should have been already
  2641. * directed to the root cgroup in memcontrol.h
  2642. */
  2643. res_counter_charge_nofail(&memcg->res, size, &fail_res);
  2644. if (do_swap_account)
  2645. res_counter_charge_nofail(&memcg->memsw, size,
  2646. &fail_res);
  2647. ret = 0;
  2648. } else if (ret)
  2649. res_counter_uncharge(&memcg->kmem, size);
  2650. return ret;
  2651. }
  2652. static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
  2653. {
  2654. res_counter_uncharge(&memcg->res, size);
  2655. if (do_swap_account)
  2656. res_counter_uncharge(&memcg->memsw, size);
  2657. /* Not down to 0 */
  2658. if (res_counter_uncharge(&memcg->kmem, size))
  2659. return;
  2660. /*
  2661. * Releases a reference taken in kmem_cgroup_css_offline in case
  2662. * this last uncharge is racing with the offlining code or it is
  2663. * outliving the memcg existence.
  2664. *
  2665. * The memory barrier imposed by test&clear is paired with the
  2666. * explicit one in memcg_kmem_mark_dead().
  2667. */
  2668. if (memcg_kmem_test_and_clear_dead(memcg))
  2669. css_put(&memcg->css);
  2670. }
  2671. void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
  2672. {
  2673. if (!memcg)
  2674. return;
  2675. mutex_lock(&memcg->slab_caches_mutex);
  2676. list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
  2677. mutex_unlock(&memcg->slab_caches_mutex);
  2678. }
  2679. /*
  2680. * helper for acessing a memcg's index. It will be used as an index in the
  2681. * child cache array in kmem_cache, and also to derive its name. This function
  2682. * will return -1 when this is not a kmem-limited memcg.
  2683. */
  2684. int memcg_cache_id(struct mem_cgroup *memcg)
  2685. {
  2686. return memcg ? memcg->kmemcg_id : -1;
  2687. }
  2688. /*
  2689. * This ends up being protected by the set_limit mutex, during normal
  2690. * operation, because that is its main call site.
  2691. *
  2692. * But when we create a new cache, we can call this as well if its parent
  2693. * is kmem-limited. That will have to hold set_limit_mutex as well.
  2694. */
  2695. int memcg_update_cache_sizes(struct mem_cgroup *memcg)
  2696. {
  2697. int num, ret;
  2698. num = ida_simple_get(&kmem_limited_groups,
  2699. 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
  2700. if (num < 0)
  2701. return num;
  2702. /*
  2703. * After this point, kmem_accounted (that we test atomically in
  2704. * the beginning of this conditional), is no longer 0. This
  2705. * guarantees only one process will set the following boolean
  2706. * to true. We don't need test_and_set because we're protected
  2707. * by the set_limit_mutex anyway.
  2708. */
  2709. memcg_kmem_set_activated(memcg);
  2710. ret = memcg_update_all_caches(num+1);
  2711. if (ret) {
  2712. ida_simple_remove(&kmem_limited_groups, num);
  2713. memcg_kmem_clear_activated(memcg);
  2714. return ret;
  2715. }
  2716. memcg->kmemcg_id = num;
  2717. INIT_LIST_HEAD(&memcg->memcg_slab_caches);
  2718. mutex_init(&memcg->slab_caches_mutex);
  2719. return 0;
  2720. }
  2721. static size_t memcg_caches_array_size(int num_groups)
  2722. {
  2723. ssize_t size;
  2724. if (num_groups <= 0)
  2725. return 0;
  2726. size = 2 * num_groups;
  2727. if (size < MEMCG_CACHES_MIN_SIZE)
  2728. size = MEMCG_CACHES_MIN_SIZE;
  2729. else if (size > MEMCG_CACHES_MAX_SIZE)
  2730. size = MEMCG_CACHES_MAX_SIZE;
  2731. return size;
  2732. }
  2733. /*
  2734. * We should update the current array size iff all caches updates succeed. This
  2735. * can only be done from the slab side. The slab mutex needs to be held when
  2736. * calling this.
  2737. */
  2738. void memcg_update_array_size(int num)
  2739. {
  2740. if (num > memcg_limited_groups_array_size)
  2741. memcg_limited_groups_array_size = memcg_caches_array_size(num);
  2742. }
  2743. static void kmem_cache_destroy_work_func(struct work_struct *w);
  2744. int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
  2745. {
  2746. struct memcg_cache_params *cur_params = s->memcg_params;
  2747. VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
  2748. if (num_groups > memcg_limited_groups_array_size) {
  2749. int i;
  2750. ssize_t size = memcg_caches_array_size(num_groups);
  2751. size *= sizeof(void *);
  2752. size += sizeof(struct memcg_cache_params);
  2753. s->memcg_params = kzalloc(size, GFP_KERNEL);
  2754. if (!s->memcg_params) {
  2755. s->memcg_params = cur_params;
  2756. return -ENOMEM;
  2757. }
  2758. s->memcg_params->is_root_cache = true;
  2759. /*
  2760. * There is the chance it will be bigger than
  2761. * memcg_limited_groups_array_size, if we failed an allocation
  2762. * in a cache, in which case all caches updated before it, will
  2763. * have a bigger array.
  2764. *
  2765. * But if that is the case, the data after
  2766. * memcg_limited_groups_array_size is certainly unused
  2767. */
  2768. for (i = 0; i < memcg_limited_groups_array_size; i++) {
  2769. if (!cur_params->memcg_caches[i])
  2770. continue;
  2771. s->memcg_params->memcg_caches[i] =
  2772. cur_params->memcg_caches[i];
  2773. }
  2774. /*
  2775. * Ideally, we would wait until all caches succeed, and only
  2776. * then free the old one. But this is not worth the extra
  2777. * pointer per-cache we'd have to have for this.
  2778. *
  2779. * It is not a big deal if some caches are left with a size
  2780. * bigger than the others. And all updates will reset this
  2781. * anyway.
  2782. */
  2783. kfree(cur_params);
  2784. }
  2785. return 0;
  2786. }
  2787. int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
  2788. struct kmem_cache *root_cache)
  2789. {
  2790. size_t size = sizeof(struct memcg_cache_params);
  2791. if (!memcg_kmem_enabled())
  2792. return 0;
  2793. if (!memcg)
  2794. size += memcg_limited_groups_array_size * sizeof(void *);
  2795. s->memcg_params = kzalloc(size, GFP_KERNEL);
  2796. if (!s->memcg_params)
  2797. return -ENOMEM;
  2798. INIT_WORK(&s->memcg_params->destroy,
  2799. kmem_cache_destroy_work_func);
  2800. if (memcg) {
  2801. s->memcg_params->memcg = memcg;
  2802. s->memcg_params->root_cache = root_cache;
  2803. } else
  2804. s->memcg_params->is_root_cache = true;
  2805. return 0;
  2806. }
  2807. void memcg_release_cache(struct kmem_cache *s)
  2808. {
  2809. struct kmem_cache *root;
  2810. struct mem_cgroup *memcg;
  2811. int id;
  2812. /*
  2813. * This happens, for instance, when a root cache goes away before we
  2814. * add any memcg.
  2815. */
  2816. if (!s->memcg_params)
  2817. return;
  2818. if (s->memcg_params->is_root_cache)
  2819. goto out;
  2820. memcg = s->memcg_params->memcg;
  2821. id = memcg_cache_id(memcg);
  2822. root = s->memcg_params->root_cache;
  2823. root->memcg_params->memcg_caches[id] = NULL;
  2824. mutex_lock(&memcg->slab_caches_mutex);
  2825. list_del(&s->memcg_params->list);
  2826. mutex_unlock(&memcg->slab_caches_mutex);
  2827. css_put(&memcg->css);
  2828. out:
  2829. kfree(s->memcg_params);
  2830. }
  2831. /*
  2832. * During the creation a new cache, we need to disable our accounting mechanism
  2833. * altogether. This is true even if we are not creating, but rather just
  2834. * enqueing new caches to be created.
  2835. *
  2836. * This is because that process will trigger allocations; some visible, like
  2837. * explicit kmallocs to auxiliary data structures, name strings and internal
  2838. * cache structures; some well concealed, like INIT_WORK() that can allocate
  2839. * objects during debug.
  2840. *
  2841. * If any allocation happens during memcg_kmem_get_cache, we will recurse back
  2842. * to it. This may not be a bounded recursion: since the first cache creation
  2843. * failed to complete (waiting on the allocation), we'll just try to create the
  2844. * cache again, failing at the same point.
  2845. *
  2846. * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
  2847. * memcg_kmem_skip_account. So we enclose anything that might allocate memory
  2848. * inside the following two functions.
  2849. */
  2850. static inline void memcg_stop_kmem_account(void)
  2851. {
  2852. VM_BUG_ON(!current->mm);
  2853. current->memcg_kmem_skip_account++;
  2854. }
  2855. static inline void memcg_resume_kmem_account(void)
  2856. {
  2857. VM_BUG_ON(!current->mm);
  2858. current->memcg_kmem_skip_account--;
  2859. }
  2860. static void kmem_cache_destroy_work_func(struct work_struct *w)
  2861. {
  2862. struct kmem_cache *cachep;
  2863. struct memcg_cache_params *p;
  2864. p = container_of(w, struct memcg_cache_params, destroy);
  2865. cachep = memcg_params_to_cache(p);
  2866. /*
  2867. * If we get down to 0 after shrink, we could delete right away.
  2868. * However, memcg_release_pages() already puts us back in the workqueue
  2869. * in that case. If we proceed deleting, we'll get a dangling
  2870. * reference, and removing the object from the workqueue in that case
  2871. * is unnecessary complication. We are not a fast path.
  2872. *
  2873. * Note that this case is fundamentally different from racing with
  2874. * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
  2875. * kmem_cache_shrink, not only we would be reinserting a dead cache
  2876. * into the queue, but doing so from inside the worker racing to
  2877. * destroy it.
  2878. *
  2879. * So if we aren't down to zero, we'll just schedule a worker and try
  2880. * again
  2881. */
  2882. if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
  2883. kmem_cache_shrink(cachep);
  2884. if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
  2885. return;
  2886. } else
  2887. kmem_cache_destroy(cachep);
  2888. }
  2889. void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
  2890. {
  2891. if (!cachep->memcg_params->dead)
  2892. return;
  2893. /*
  2894. * There are many ways in which we can get here.
  2895. *
  2896. * We can get to a memory-pressure situation while the delayed work is
  2897. * still pending to run. The vmscan shrinkers can then release all
  2898. * cache memory and get us to destruction. If this is the case, we'll
  2899. * be executed twice, which is a bug (the second time will execute over
  2900. * bogus data). In this case, cancelling the work should be fine.
  2901. *
  2902. * But we can also get here from the worker itself, if
  2903. * kmem_cache_shrink is enough to shake all the remaining objects and
  2904. * get the page count to 0. In this case, we'll deadlock if we try to
  2905. * cancel the work (the worker runs with an internal lock held, which
  2906. * is the same lock we would hold for cancel_work_sync().)
  2907. *
  2908. * Since we can't possibly know who got us here, just refrain from
  2909. * running if there is already work pending
  2910. */
  2911. if (work_pending(&cachep->memcg_params->destroy))
  2912. return;
  2913. /*
  2914. * We have to defer the actual destroying to a workqueue, because
  2915. * we might currently be in a context that cannot sleep.
  2916. */
  2917. schedule_work(&cachep->memcg_params->destroy);
  2918. }
  2919. /*
  2920. * This lock protects updaters, not readers. We want readers to be as fast as
  2921. * they can, and they will either see NULL or a valid cache value. Our model
  2922. * allow them to see NULL, in which case the root memcg will be selected.
  2923. *
  2924. * We need this lock because multiple allocations to the same cache from a non
  2925. * will span more than one worker. Only one of them can create the cache.
  2926. */
  2927. static DEFINE_MUTEX(memcg_cache_mutex);
  2928. /*
  2929. * Called with memcg_cache_mutex held
  2930. */
  2931. static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
  2932. struct kmem_cache *s)
  2933. {
  2934. struct kmem_cache *new;
  2935. static char *tmp_name = NULL;
  2936. lockdep_assert_held(&memcg_cache_mutex);
  2937. /*
  2938. * kmem_cache_create_memcg duplicates the given name and
  2939. * cgroup_name for this name requires RCU context.
  2940. * This static temporary buffer is used to prevent from
  2941. * pointless shortliving allocation.
  2942. */
  2943. if (!tmp_name) {
  2944. tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
  2945. if (!tmp_name)
  2946. return NULL;
  2947. }
  2948. rcu_read_lock();
  2949. snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
  2950. memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
  2951. rcu_read_unlock();
  2952. new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
  2953. (s->flags & ~SLAB_PANIC), s->ctor, s);
  2954. if (new)
  2955. new->allocflags |= __GFP_KMEMCG;
  2956. return new;
  2957. }
  2958. static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
  2959. struct kmem_cache *cachep)
  2960. {
  2961. struct kmem_cache *new_cachep;
  2962. int idx;
  2963. BUG_ON(!memcg_can_account_kmem(memcg));
  2964. idx = memcg_cache_id(memcg);
  2965. mutex_lock(&memcg_cache_mutex);
  2966. new_cachep = cachep->memcg_params->memcg_caches[idx];
  2967. if (new_cachep) {
  2968. css_put(&memcg->css);
  2969. goto out;
  2970. }
  2971. new_cachep = kmem_cache_dup(memcg, cachep);
  2972. if (new_cachep == NULL) {
  2973. new_cachep = cachep;
  2974. css_put(&memcg->css);
  2975. goto out;
  2976. }
  2977. atomic_set(&new_cachep->memcg_params->nr_pages , 0);
  2978. cachep->memcg_params->memcg_caches[idx] = new_cachep;
  2979. /*
  2980. * the readers won't lock, make sure everybody sees the updated value,
  2981. * so they won't put stuff in the queue again for no reason
  2982. */
  2983. wmb();
  2984. out:
  2985. mutex_unlock(&memcg_cache_mutex);
  2986. return new_cachep;
  2987. }
  2988. void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
  2989. {
  2990. struct kmem_cache *c;
  2991. int i;
  2992. if (!s->memcg_params)
  2993. return;
  2994. if (!s->memcg_params->is_root_cache)
  2995. return;
  2996. /*
  2997. * If the cache is being destroyed, we trust that there is no one else
  2998. * requesting objects from it. Even if there are, the sanity checks in
  2999. * kmem_cache_destroy should caught this ill-case.
  3000. *
  3001. * Still, we don't want anyone else freeing memcg_caches under our
  3002. * noses, which can happen if a new memcg comes to life. As usual,
  3003. * we'll take the set_limit_mutex to protect ourselves against this.
  3004. */
  3005. mutex_lock(&set_limit_mutex);
  3006. for (i = 0; i < memcg_limited_groups_array_size; i++) {
  3007. c = s->memcg_params->memcg_caches[i];
  3008. if (!c)
  3009. continue;
  3010. /*
  3011. * We will now manually delete the caches, so to avoid races
  3012. * we need to cancel all pending destruction workers and
  3013. * proceed with destruction ourselves.
  3014. *
  3015. * kmem_cache_destroy() will call kmem_cache_shrink internally,
  3016. * and that could spawn the workers again: it is likely that
  3017. * the cache still have active pages until this very moment.
  3018. * This would lead us back to mem_cgroup_destroy_cache.
  3019. *
  3020. * But that will not execute at all if the "dead" flag is not
  3021. * set, so flip it down to guarantee we are in control.
  3022. */
  3023. c->memcg_params->dead = false;
  3024. cancel_work_sync(&c->memcg_params->destroy);
  3025. kmem_cache_destroy(c);
  3026. }
  3027. mutex_unlock(&set_limit_mutex);
  3028. }
  3029. struct create_work {
  3030. struct mem_cgroup *memcg;
  3031. struct kmem_cache *cachep;
  3032. struct work_struct work;
  3033. };
  3034. static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
  3035. {
  3036. struct kmem_cache *cachep;
  3037. struct memcg_cache_params *params;
  3038. if (!memcg_kmem_is_active(memcg))
  3039. return;
  3040. mutex_lock(&memcg->slab_caches_mutex);
  3041. list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
  3042. cachep = memcg_params_to_cache(params);
  3043. cachep->memcg_params->dead = true;
  3044. schedule_work(&cachep->memcg_params->destroy);
  3045. }
  3046. mutex_unlock(&memcg->slab_caches_mutex);
  3047. }
  3048. static void memcg_create_cache_work_func(struct work_struct *w)
  3049. {
  3050. struct create_work *cw;
  3051. cw = container_of(w, struct create_work, work);
  3052. memcg_create_kmem_cache(cw->memcg, cw->cachep);
  3053. kfree(cw);
  3054. }
  3055. /*
  3056. * Enqueue the creation of a per-memcg kmem_cache.
  3057. */
  3058. static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
  3059. struct kmem_cache *cachep)
  3060. {
  3061. struct create_work *cw;
  3062. cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
  3063. if (cw == NULL) {
  3064. css_put(&memcg->css);
  3065. return;
  3066. }
  3067. cw->memcg = memcg;
  3068. cw->cachep = cachep;
  3069. INIT_WORK(&cw->work, memcg_create_cache_work_func);
  3070. schedule_work(&cw->work);
  3071. }
  3072. static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
  3073. struct kmem_cache *cachep)
  3074. {
  3075. /*
  3076. * We need to stop accounting when we kmalloc, because if the
  3077. * corresponding kmalloc cache is not yet created, the first allocation
  3078. * in __memcg_create_cache_enqueue will recurse.
  3079. *
  3080. * However, it is better to enclose the whole function. Depending on
  3081. * the debugging options enabled, INIT_WORK(), for instance, can
  3082. * trigger an allocation. This too, will make us recurse. Because at
  3083. * this point we can't allow ourselves back into memcg_kmem_get_cache,
  3084. * the safest choice is to do it like this, wrapping the whole function.
  3085. */
  3086. memcg_stop_kmem_account();
  3087. __memcg_create_cache_enqueue(memcg, cachep);
  3088. memcg_resume_kmem_account();
  3089. }
  3090. /*
  3091. * Return the kmem_cache we're supposed to use for a slab allocation.
  3092. * We try to use the current memcg's version of the cache.
  3093. *
  3094. * If the cache does not exist yet, if we are the first user of it,
  3095. * we either create it immediately, if possible, or create it asynchronously
  3096. * in a workqueue.
  3097. * In the latter case, we will let the current allocation go through with
  3098. * the original cache.
  3099. *
  3100. * Can't be called in interrupt context or from kernel threads.
  3101. * This function needs to be called with rcu_read_lock() held.
  3102. */
  3103. struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
  3104. gfp_t gfp)
  3105. {
  3106. struct mem_cgroup *memcg;
  3107. int idx;
  3108. VM_BUG_ON(!cachep->memcg_params);
  3109. VM_BUG_ON(!cachep->memcg_params->is_root_cache);
  3110. if (!current->mm || current->memcg_kmem_skip_account)
  3111. return cachep;
  3112. rcu_read_lock();
  3113. memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
  3114. if (!memcg_can_account_kmem(memcg))
  3115. goto out;
  3116. idx = memcg_cache_id(memcg);
  3117. /*
  3118. * barrier to mare sure we're always seeing the up to date value. The
  3119. * code updating memcg_caches will issue a write barrier to match this.
  3120. */
  3121. read_barrier_depends();
  3122. if (likely(cachep->memcg_params->memcg_caches[idx])) {
  3123. cachep = cachep->memcg_params->memcg_caches[idx];
  3124. goto out;
  3125. }
  3126. /* The corresponding put will be done in the workqueue. */
  3127. if (!css_tryget(&memcg->css))
  3128. goto out;
  3129. rcu_read_unlock();
  3130. /*
  3131. * If we are in a safe context (can wait, and not in interrupt
  3132. * context), we could be be predictable and return right away.
  3133. * This would guarantee that the allocation being performed
  3134. * already belongs in the new cache.
  3135. *
  3136. * However, there are some clashes that can arrive from locking.
  3137. * For instance, because we acquire the slab_mutex while doing
  3138. * kmem_cache_dup, this means no further allocation could happen
  3139. * with the slab_mutex held.
  3140. *
  3141. * Also, because cache creation issue get_online_cpus(), this
  3142. * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
  3143. * that ends up reversed during cpu hotplug. (cpuset allocates
  3144. * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
  3145. * better to defer everything.
  3146. */
  3147. memcg_create_cache_enqueue(memcg, cachep);
  3148. return cachep;
  3149. out:
  3150. rcu_read_unlock();
  3151. return cachep;
  3152. }
  3153. EXPORT_SYMBOL(__memcg_kmem_get_cache);
  3154. /*
  3155. * We need to verify if the allocation against current->mm->owner's memcg is
  3156. * possible for the given order. But the page is not allocated yet, so we'll
  3157. * need a further commit step to do the final arrangements.
  3158. *
  3159. * It is possible for the task to switch cgroups in this mean time, so at
  3160. * commit time, we can't rely on task conversion any longer. We'll then use
  3161. * the handle argument to return to the caller which cgroup we should commit
  3162. * against. We could also return the memcg directly and avoid the pointer
  3163. * passing, but a boolean return value gives better semantics considering
  3164. * the compiled-out case as well.
  3165. *
  3166. * Returning true means the allocation is possible.
  3167. */
  3168. bool
  3169. __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
  3170. {
  3171. struct mem_cgroup *memcg;
  3172. int ret;
  3173. *_memcg = NULL;
  3174. /*
  3175. * Disabling accounting is only relevant for some specific memcg
  3176. * internal allocations. Therefore we would initially not have such
  3177. * check here, since direct calls to the page allocator that are marked
  3178. * with GFP_KMEMCG only happen outside memcg core. We are mostly
  3179. * concerned with cache allocations, and by having this test at
  3180. * memcg_kmem_get_cache, we are already able to relay the allocation to
  3181. * the root cache and bypass the memcg cache altogether.
  3182. *
  3183. * There is one exception, though: the SLUB allocator does not create
  3184. * large order caches, but rather service large kmallocs directly from
  3185. * the page allocator. Therefore, the following sequence when backed by
  3186. * the SLUB allocator:
  3187. *
  3188. * memcg_stop_kmem_account();
  3189. * kmalloc(<large_number>)
  3190. * memcg_resume_kmem_account();
  3191. *
  3192. * would effectively ignore the fact that we should skip accounting,
  3193. * since it will drive us directly to this function without passing
  3194. * through the cache selector memcg_kmem_get_cache. Such large
  3195. * allocations are extremely rare but can happen, for instance, for the
  3196. * cache arrays. We bring this test here.
  3197. */
  3198. if (!current->mm || current->memcg_kmem_skip_account)
  3199. return true;
  3200. memcg = try_get_mem_cgroup_from_mm(current->mm);
  3201. /*
  3202. * very rare case described in mem_cgroup_from_task. Unfortunately there
  3203. * isn't much we can do without complicating this too much, and it would
  3204. * be gfp-dependent anyway. Just let it go
  3205. */
  3206. if (unlikely(!memcg))
  3207. return true;
  3208. if (!memcg_can_account_kmem(memcg)) {
  3209. css_put(&memcg->css);
  3210. return true;
  3211. }
  3212. ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
  3213. if (!ret)
  3214. *_memcg = memcg;
  3215. css_put(&memcg->css);
  3216. return (ret == 0);
  3217. }
  3218. void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
  3219. int order)
  3220. {
  3221. struct page_cgroup *pc;
  3222. VM_BUG_ON(mem_cgroup_is_root(memcg));
  3223. /* The page allocation failed. Revert */
  3224. if (!page) {
  3225. memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
  3226. return;
  3227. }
  3228. pc = lookup_page_cgroup(page);
  3229. lock_page_cgroup(pc);
  3230. pc->mem_cgroup = memcg;
  3231. SetPageCgroupUsed(pc);
  3232. unlock_page_cgroup(pc);
  3233. }
  3234. void __memcg_kmem_uncharge_pages(struct page *page, int order)
  3235. {
  3236. struct mem_cgroup *memcg = NULL;
  3237. struct page_cgroup *pc;
  3238. pc = lookup_page_cgroup(page);
  3239. /*
  3240. * Fast unlocked return. Theoretically might have changed, have to
  3241. * check again after locking.
  3242. */
  3243. if (!PageCgroupUsed(pc))
  3244. return;
  3245. lock_page_cgroup(pc);
  3246. if (PageCgroupUsed(pc)) {
  3247. memcg = pc->mem_cgroup;
  3248. ClearPageCgroupUsed(pc);
  3249. }
  3250. unlock_page_cgroup(pc);
  3251. /*
  3252. * We trust that only if there is a memcg associated with the page, it
  3253. * is a valid allocation
  3254. */
  3255. if (!memcg)
  3256. return;
  3257. VM_BUG_ON(mem_cgroup_is_root(memcg));
  3258. memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
  3259. }
  3260. #else
  3261. static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
  3262. {
  3263. }
  3264. #endif /* CONFIG_MEMCG_KMEM */
  3265. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3266. #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
  3267. /*
  3268. * Because tail pages are not marked as "used", set it. We're under
  3269. * zone->lru_lock, 'splitting on pmd' and compound_lock.
  3270. * charge/uncharge will be never happen and move_account() is done under
  3271. * compound_lock(), so we don't have to take care of races.
  3272. */
  3273. void mem_cgroup_split_huge_fixup(struct page *head)
  3274. {
  3275. struct page_cgroup *head_pc = lookup_page_cgroup(head);
  3276. struct page_cgroup *pc;
  3277. struct mem_cgroup *memcg;
  3278. int i;
  3279. if (mem_cgroup_disabled())
  3280. return;
  3281. memcg = head_pc->mem_cgroup;
  3282. for (i = 1; i < HPAGE_PMD_NR; i++) {
  3283. pc = head_pc + i;
  3284. pc->mem_cgroup = memcg;
  3285. smp_wmb();/* see __commit_charge() */
  3286. pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
  3287. }
  3288. __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
  3289. HPAGE_PMD_NR);
  3290. }
  3291. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3292. /**
  3293. * mem_cgroup_move_account - move account of the page
  3294. * @page: the page
  3295. * @nr_pages: number of regular pages (>1 for huge pages)
  3296. * @pc: page_cgroup of the page.
  3297. * @from: mem_cgroup which the page is moved from.
  3298. * @to: mem_cgroup which the page is moved to. @from != @to.
  3299. *
  3300. * The caller must confirm following.
  3301. * - page is not on LRU (isolate_page() is useful.)
  3302. * - compound_lock is held when nr_pages > 1
  3303. *
  3304. * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
  3305. * from old cgroup.
  3306. */
  3307. static int mem_cgroup_move_account(struct page *page,
  3308. unsigned int nr_pages,
  3309. struct page_cgroup *pc,
  3310. struct mem_cgroup *from,
  3311. struct mem_cgroup *to)
  3312. {
  3313. unsigned long flags;
  3314. int ret;
  3315. bool anon = PageAnon(page);
  3316. VM_BUG_ON(from == to);
  3317. VM_BUG_ON(PageLRU(page));
  3318. /*
  3319. * The page is isolated from LRU. So, collapse function
  3320. * will not handle this page. But page splitting can happen.
  3321. * Do this check under compound_page_lock(). The caller should
  3322. * hold it.
  3323. */
  3324. ret = -EBUSY;
  3325. if (nr_pages > 1 && !PageTransHuge(page))
  3326. goto out;
  3327. lock_page_cgroup(pc);
  3328. ret = -EINVAL;
  3329. if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
  3330. goto unlock;
  3331. move_lock_mem_cgroup(from, &flags);
  3332. if (!anon && page_mapped(page)) {
  3333. /* Update mapped_file data for mem_cgroup */
  3334. preempt_disable();
  3335. __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  3336. __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  3337. preempt_enable();
  3338. }
  3339. mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
  3340. /* caller should have done css_get */
  3341. pc->mem_cgroup = to;
  3342. mem_cgroup_charge_statistics(to, page, anon, nr_pages);
  3343. move_unlock_mem_cgroup(from, &flags);
  3344. ret = 0;
  3345. unlock:
  3346. unlock_page_cgroup(pc);
  3347. /*
  3348. * check events
  3349. */
  3350. memcg_check_events(to, page);
  3351. memcg_check_events(from, page);
  3352. out:
  3353. return ret;
  3354. }
  3355. /**
  3356. * mem_cgroup_move_parent - moves page to the parent group
  3357. * @page: the page to move
  3358. * @pc: page_cgroup of the page
  3359. * @child: page's cgroup
  3360. *
  3361. * move charges to its parent or the root cgroup if the group has no
  3362. * parent (aka use_hierarchy==0).
  3363. * Although this might fail (get_page_unless_zero, isolate_lru_page or
  3364. * mem_cgroup_move_account fails) the failure is always temporary and
  3365. * it signals a race with a page removal/uncharge or migration. In the
  3366. * first case the page is on the way out and it will vanish from the LRU
  3367. * on the next attempt and the call should be retried later.
  3368. * Isolation from the LRU fails only if page has been isolated from
  3369. * the LRU since we looked at it and that usually means either global
  3370. * reclaim or migration going on. The page will either get back to the
  3371. * LRU or vanish.
  3372. * Finaly mem_cgroup_move_account fails only if the page got uncharged
  3373. * (!PageCgroupUsed) or moved to a different group. The page will
  3374. * disappear in the next attempt.
  3375. */
  3376. static int mem_cgroup_move_parent(struct page *page,
  3377. struct page_cgroup *pc,
  3378. struct mem_cgroup *child)
  3379. {
  3380. struct mem_cgroup *parent;
  3381. unsigned int nr_pages;
  3382. unsigned long uninitialized_var(flags);
  3383. int ret;
  3384. VM_BUG_ON(mem_cgroup_is_root(child));
  3385. ret = -EBUSY;
  3386. if (!get_page_unless_zero(page))
  3387. goto out;
  3388. if (isolate_lru_page(page))
  3389. goto put;
  3390. nr_pages = hpage_nr_pages(page);
  3391. parent = parent_mem_cgroup(child);
  3392. /*
  3393. * If no parent, move charges to root cgroup.
  3394. */
  3395. if (!parent)
  3396. parent = root_mem_cgroup;
  3397. if (nr_pages > 1) {
  3398. VM_BUG_ON(!PageTransHuge(page));
  3399. flags = compound_lock_irqsave(page);
  3400. }
  3401. ret = mem_cgroup_move_account(page, nr_pages,
  3402. pc, child, parent);
  3403. if (!ret)
  3404. __mem_cgroup_cancel_local_charge(child, nr_pages);
  3405. if (nr_pages > 1)
  3406. compound_unlock_irqrestore(page, flags);
  3407. putback_lru_page(page);
  3408. put:
  3409. put_page(page);
  3410. out:
  3411. return ret;
  3412. }
  3413. /*
  3414. * Charge the memory controller for page usage.
  3415. * Return
  3416. * 0 if the charge was successful
  3417. * < 0 if the cgroup is over its limit
  3418. */
  3419. static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
  3420. gfp_t gfp_mask, enum charge_type ctype)
  3421. {
  3422. struct mem_cgroup *memcg = NULL;
  3423. unsigned int nr_pages = 1;
  3424. bool oom = true;
  3425. int ret;
  3426. if (PageTransHuge(page)) {
  3427. nr_pages <<= compound_order(page);
  3428. VM_BUG_ON(!PageTransHuge(page));
  3429. /*
  3430. * Never OOM-kill a process for a huge page. The
  3431. * fault handler will fall back to regular pages.
  3432. */
  3433. oom = false;
  3434. }
  3435. ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
  3436. if (ret == -ENOMEM)
  3437. return ret;
  3438. __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
  3439. return 0;
  3440. }
  3441. int mem_cgroup_newpage_charge(struct page *page,
  3442. struct mm_struct *mm, gfp_t gfp_mask)
  3443. {
  3444. if (mem_cgroup_disabled())
  3445. return 0;
  3446. VM_BUG_ON(page_mapped(page));
  3447. VM_BUG_ON(page->mapping && !PageAnon(page));
  3448. VM_BUG_ON(!mm);
  3449. return mem_cgroup_charge_common(page, mm, gfp_mask,
  3450. MEM_CGROUP_CHARGE_TYPE_ANON);
  3451. }
  3452. /*
  3453. * While swap-in, try_charge -> commit or cancel, the page is locked.
  3454. * And when try_charge() successfully returns, one refcnt to memcg without
  3455. * struct page_cgroup is acquired. This refcnt will be consumed by
  3456. * "commit()" or removed by "cancel()"
  3457. */
  3458. static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  3459. struct page *page,
  3460. gfp_t mask,
  3461. struct mem_cgroup **memcgp)
  3462. {
  3463. struct mem_cgroup *memcg;
  3464. struct page_cgroup *pc;
  3465. int ret;
  3466. pc = lookup_page_cgroup(page);
  3467. /*
  3468. * Every swap fault against a single page tries to charge the
  3469. * page, bail as early as possible. shmem_unuse() encounters
  3470. * already charged pages, too. The USED bit is protected by
  3471. * the page lock, which serializes swap cache removal, which
  3472. * in turn serializes uncharging.
  3473. */
  3474. if (PageCgroupUsed(pc))
  3475. return 0;
  3476. if (!do_swap_account)
  3477. goto charge_cur_mm;
  3478. memcg = try_get_mem_cgroup_from_page(page);
  3479. if (!memcg)
  3480. goto charge_cur_mm;
  3481. *memcgp = memcg;
  3482. ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
  3483. css_put(&memcg->css);
  3484. if (ret == -EINTR)
  3485. ret = 0;
  3486. return ret;
  3487. charge_cur_mm:
  3488. ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
  3489. if (ret == -EINTR)
  3490. ret = 0;
  3491. return ret;
  3492. }
  3493. int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
  3494. gfp_t gfp_mask, struct mem_cgroup **memcgp)
  3495. {
  3496. *memcgp = NULL;
  3497. if (mem_cgroup_disabled())
  3498. return 0;
  3499. /*
  3500. * A racing thread's fault, or swapoff, may have already
  3501. * updated the pte, and even removed page from swap cache: in
  3502. * those cases unuse_pte()'s pte_same() test will fail; but
  3503. * there's also a KSM case which does need to charge the page.
  3504. */
  3505. if (!PageSwapCache(page)) {
  3506. int ret;
  3507. ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
  3508. if (ret == -EINTR)
  3509. ret = 0;
  3510. return ret;
  3511. }
  3512. return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
  3513. }
  3514. void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
  3515. {
  3516. if (mem_cgroup_disabled())
  3517. return;
  3518. if (!memcg)
  3519. return;
  3520. __mem_cgroup_cancel_charge(memcg, 1);
  3521. }
  3522. static void
  3523. __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
  3524. enum charge_type ctype)
  3525. {
  3526. if (mem_cgroup_disabled())
  3527. return;
  3528. if (!memcg)
  3529. return;
  3530. __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
  3531. /*
  3532. * Now swap is on-memory. This means this page may be
  3533. * counted both as mem and swap....double count.
  3534. * Fix it by uncharging from memsw. Basically, this SwapCache is stable
  3535. * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
  3536. * may call delete_from_swap_cache() before reach here.
  3537. */
  3538. if (do_swap_account && PageSwapCache(page)) {
  3539. swp_entry_t ent = {.val = page_private(page)};
  3540. mem_cgroup_uncharge_swap(ent);
  3541. }
  3542. }
  3543. void mem_cgroup_commit_charge_swapin(struct page *page,
  3544. struct mem_cgroup *memcg)
  3545. {
  3546. __mem_cgroup_commit_charge_swapin(page, memcg,
  3547. MEM_CGROUP_CHARGE_TYPE_ANON);
  3548. }
  3549. int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  3550. gfp_t gfp_mask)
  3551. {
  3552. struct mem_cgroup *memcg = NULL;
  3553. enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
  3554. int ret;
  3555. if (mem_cgroup_disabled())
  3556. return 0;
  3557. if (PageCompound(page))
  3558. return 0;
  3559. if (!PageSwapCache(page))
  3560. ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
  3561. else { /* page is swapcache/shmem */
  3562. ret = __mem_cgroup_try_charge_swapin(mm, page,
  3563. gfp_mask, &memcg);
  3564. if (!ret)
  3565. __mem_cgroup_commit_charge_swapin(page, memcg, type);
  3566. }
  3567. return ret;
  3568. }
  3569. static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
  3570. unsigned int nr_pages,
  3571. const enum charge_type ctype)
  3572. {
  3573. struct memcg_batch_info *batch = NULL;
  3574. bool uncharge_memsw = true;
  3575. /* If swapout, usage of swap doesn't decrease */
  3576. if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
  3577. uncharge_memsw = false;
  3578. batch = &current->memcg_batch;
  3579. /*
  3580. * In usual, we do css_get() when we remember memcg pointer.
  3581. * But in this case, we keep res->usage until end of a series of
  3582. * uncharges. Then, it's ok to ignore memcg's refcnt.
  3583. */
  3584. if (!batch->memcg)
  3585. batch->memcg = memcg;
  3586. /*
  3587. * do_batch > 0 when unmapping pages or inode invalidate/truncate.
  3588. * In those cases, all pages freed continuously can be expected to be in
  3589. * the same cgroup and we have chance to coalesce uncharges.
  3590. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
  3591. * because we want to do uncharge as soon as possible.
  3592. */
  3593. if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
  3594. goto direct_uncharge;
  3595. if (nr_pages > 1)
  3596. goto direct_uncharge;
  3597. /*
  3598. * In typical case, batch->memcg == mem. This means we can
  3599. * merge a series of uncharges to an uncharge of res_counter.
  3600. * If not, we uncharge res_counter ony by one.
  3601. */
  3602. if (batch->memcg != memcg)
  3603. goto direct_uncharge;
  3604. /* remember freed charge and uncharge it later */
  3605. batch->nr_pages++;
  3606. if (uncharge_memsw)
  3607. batch->memsw_nr_pages++;
  3608. return;
  3609. direct_uncharge:
  3610. res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
  3611. if (uncharge_memsw)
  3612. res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
  3613. if (unlikely(batch->memcg != memcg))
  3614. memcg_oom_recover(memcg);
  3615. }
  3616. /*
  3617. * uncharge if !page_mapped(page)
  3618. */
  3619. static struct mem_cgroup *
  3620. __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
  3621. bool end_migration)
  3622. {
  3623. struct mem_cgroup *memcg = NULL;
  3624. unsigned int nr_pages = 1;
  3625. struct page_cgroup *pc;
  3626. bool anon;
  3627. if (mem_cgroup_disabled())
  3628. return NULL;
  3629. if (PageTransHuge(page)) {
  3630. nr_pages <<= compound_order(page);
  3631. VM_BUG_ON(!PageTransHuge(page));
  3632. }
  3633. /*
  3634. * Check if our page_cgroup is valid
  3635. */
  3636. pc = lookup_page_cgroup(page);
  3637. if (unlikely(!PageCgroupUsed(pc)))
  3638. return NULL;
  3639. lock_page_cgroup(pc);
  3640. memcg = pc->mem_cgroup;
  3641. if (!PageCgroupUsed(pc))
  3642. goto unlock_out;
  3643. anon = PageAnon(page);
  3644. switch (ctype) {
  3645. case MEM_CGROUP_CHARGE_TYPE_ANON:
  3646. /*
  3647. * Generally PageAnon tells if it's the anon statistics to be
  3648. * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
  3649. * used before page reached the stage of being marked PageAnon.
  3650. */
  3651. anon = true;
  3652. /* fallthrough */
  3653. case MEM_CGROUP_CHARGE_TYPE_DROP:
  3654. /* See mem_cgroup_prepare_migration() */
  3655. if (page_mapped(page))
  3656. goto unlock_out;
  3657. /*
  3658. * Pages under migration may not be uncharged. But
  3659. * end_migration() /must/ be the one uncharging the
  3660. * unused post-migration page and so it has to call
  3661. * here with the migration bit still set. See the
  3662. * res_counter handling below.
  3663. */
  3664. if (!end_migration && PageCgroupMigration(pc))
  3665. goto unlock_out;
  3666. break;
  3667. case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
  3668. if (!PageAnon(page)) { /* Shared memory */
  3669. if (page->mapping && !page_is_file_cache(page))
  3670. goto unlock_out;
  3671. } else if (page_mapped(page)) /* Anon */
  3672. goto unlock_out;
  3673. break;
  3674. default:
  3675. break;
  3676. }
  3677. mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
  3678. ClearPageCgroupUsed(pc);
  3679. /*
  3680. * pc->mem_cgroup is not cleared here. It will be accessed when it's
  3681. * freed from LRU. This is safe because uncharged page is expected not
  3682. * to be reused (freed soon). Exception is SwapCache, it's handled by
  3683. * special functions.
  3684. */
  3685. unlock_page_cgroup(pc);
  3686. /*
  3687. * even after unlock, we have memcg->res.usage here and this memcg
  3688. * will never be freed, so it's safe to call css_get().
  3689. */
  3690. memcg_check_events(memcg, page);
  3691. if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
  3692. mem_cgroup_swap_statistics(memcg, true);
  3693. css_get(&memcg->css);
  3694. }
  3695. /*
  3696. * Migration does not charge the res_counter for the
  3697. * replacement page, so leave it alone when phasing out the
  3698. * page that is unused after the migration.
  3699. */
  3700. if (!end_migration && !mem_cgroup_is_root(memcg))
  3701. mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
  3702. return memcg;
  3703. unlock_out:
  3704. unlock_page_cgroup(pc);
  3705. return NULL;
  3706. }
  3707. void mem_cgroup_uncharge_page(struct page *page)
  3708. {
  3709. /* early check. */
  3710. if (page_mapped(page))
  3711. return;
  3712. VM_BUG_ON(page->mapping && !PageAnon(page));
  3713. /*
  3714. * If the page is in swap cache, uncharge should be deferred
  3715. * to the swap path, which also properly accounts swap usage
  3716. * and handles memcg lifetime.
  3717. *
  3718. * Note that this check is not stable and reclaim may add the
  3719. * page to swap cache at any time after this. However, if the
  3720. * page is not in swap cache by the time page->mapcount hits
  3721. * 0, there won't be any page table references to the swap
  3722. * slot, and reclaim will free it and not actually write the
  3723. * page to disk.
  3724. */
  3725. if (PageSwapCache(page))
  3726. return;
  3727. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
  3728. }
  3729. void mem_cgroup_uncharge_cache_page(struct page *page)
  3730. {
  3731. VM_BUG_ON(page_mapped(page));
  3732. VM_BUG_ON(page->mapping);
  3733. __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
  3734. }
  3735. /*
  3736. * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
  3737. * In that cases, pages are freed continuously and we can expect pages
  3738. * are in the same memcg. All these calls itself limits the number of
  3739. * pages freed at once, then uncharge_start/end() is called properly.
  3740. * This may be called prural(2) times in a context,
  3741. */
  3742. void mem_cgroup_uncharge_start(void)
  3743. {
  3744. current->memcg_batch.do_batch++;
  3745. /* We can do nest. */
  3746. if (current->memcg_batch.do_batch == 1) {
  3747. current->memcg_batch.memcg = NULL;
  3748. current->memcg_batch.nr_pages = 0;
  3749. current->memcg_batch.memsw_nr_pages = 0;
  3750. }
  3751. }
  3752. void mem_cgroup_uncharge_end(void)
  3753. {
  3754. struct memcg_batch_info *batch = &current->memcg_batch;
  3755. if (!batch->do_batch)
  3756. return;
  3757. batch->do_batch--;
  3758. if (batch->do_batch) /* If stacked, do nothing. */
  3759. return;
  3760. if (!batch->memcg)
  3761. return;
  3762. /*
  3763. * This "batch->memcg" is valid without any css_get/put etc...
  3764. * bacause we hide charges behind us.
  3765. */
  3766. if (batch->nr_pages)
  3767. res_counter_uncharge(&batch->memcg->res,
  3768. batch->nr_pages * PAGE_SIZE);
  3769. if (batch->memsw_nr_pages)
  3770. res_counter_uncharge(&batch->memcg->memsw,
  3771. batch->memsw_nr_pages * PAGE_SIZE);
  3772. memcg_oom_recover(batch->memcg);
  3773. /* forget this pointer (for sanity check) */
  3774. batch->memcg = NULL;
  3775. }
  3776. #ifdef CONFIG_SWAP
  3777. /*
  3778. * called after __delete_from_swap_cache() and drop "page" account.
  3779. * memcg information is recorded to swap_cgroup of "ent"
  3780. */
  3781. void
  3782. mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
  3783. {
  3784. struct mem_cgroup *memcg;
  3785. int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
  3786. if (!swapout) /* this was a swap cache but the swap is unused ! */
  3787. ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
  3788. memcg = __mem_cgroup_uncharge_common(page, ctype, false);
  3789. /*
  3790. * record memcg information, if swapout && memcg != NULL,
  3791. * css_get() was called in uncharge().
  3792. */
  3793. if (do_swap_account && swapout && memcg)
  3794. swap_cgroup_record(ent, css_id(&memcg->css));
  3795. }
  3796. #endif
  3797. #ifdef CONFIG_MEMCG_SWAP
  3798. /*
  3799. * called from swap_entry_free(). remove record in swap_cgroup and
  3800. * uncharge "memsw" account.
  3801. */
  3802. void mem_cgroup_uncharge_swap(swp_entry_t ent)
  3803. {
  3804. struct mem_cgroup *memcg;
  3805. unsigned short id;
  3806. if (!do_swap_account)
  3807. return;
  3808. id = swap_cgroup_record(ent, 0);
  3809. rcu_read_lock();
  3810. memcg = mem_cgroup_lookup(id);
  3811. if (memcg) {
  3812. /*
  3813. * We uncharge this because swap is freed.
  3814. * This memcg can be obsolete one. We avoid calling css_tryget
  3815. */
  3816. if (!mem_cgroup_is_root(memcg))
  3817. res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
  3818. mem_cgroup_swap_statistics(memcg, false);
  3819. css_put(&memcg->css);
  3820. }
  3821. rcu_read_unlock();
  3822. }
  3823. /**
  3824. * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
  3825. * @entry: swap entry to be moved
  3826. * @from: mem_cgroup which the entry is moved from
  3827. * @to: mem_cgroup which the entry is moved to
  3828. *
  3829. * It succeeds only when the swap_cgroup's record for this entry is the same
  3830. * as the mem_cgroup's id of @from.
  3831. *
  3832. * Returns 0 on success, -EINVAL on failure.
  3833. *
  3834. * The caller must have charged to @to, IOW, called res_counter_charge() about
  3835. * both res and memsw, and called css_get().
  3836. */
  3837. static int mem_cgroup_move_swap_account(swp_entry_t entry,
  3838. struct mem_cgroup *from, struct mem_cgroup *to)
  3839. {
  3840. unsigned short old_id, new_id;
  3841. old_id = css_id(&from->css);
  3842. new_id = css_id(&to->css);
  3843. if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
  3844. mem_cgroup_swap_statistics(from, false);
  3845. mem_cgroup_swap_statistics(to, true);
  3846. /*
  3847. * This function is only called from task migration context now.
  3848. * It postpones res_counter and refcount handling till the end
  3849. * of task migration(mem_cgroup_clear_mc()) for performance
  3850. * improvement. But we cannot postpone css_get(to) because if
  3851. * the process that has been moved to @to does swap-in, the
  3852. * refcount of @to might be decreased to 0.
  3853. *
  3854. * We are in attach() phase, so the cgroup is guaranteed to be
  3855. * alive, so we can just call css_get().
  3856. */
  3857. css_get(&to->css);
  3858. return 0;
  3859. }
  3860. return -EINVAL;
  3861. }
  3862. #else
  3863. static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
  3864. struct mem_cgroup *from, struct mem_cgroup *to)
  3865. {
  3866. return -EINVAL;
  3867. }
  3868. #endif
  3869. /*
  3870. * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
  3871. * page belongs to.
  3872. */
  3873. void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
  3874. struct mem_cgroup **memcgp)
  3875. {
  3876. struct mem_cgroup *memcg = NULL;
  3877. unsigned int nr_pages = 1;
  3878. struct page_cgroup *pc;
  3879. enum charge_type ctype;
  3880. *memcgp = NULL;
  3881. if (mem_cgroup_disabled())
  3882. return;
  3883. if (PageTransHuge(page))
  3884. nr_pages <<= compound_order(page);
  3885. pc = lookup_page_cgroup(page);
  3886. lock_page_cgroup(pc);
  3887. if (PageCgroupUsed(pc)) {
  3888. memcg = pc->mem_cgroup;
  3889. css_get(&memcg->css);
  3890. /*
  3891. * At migrating an anonymous page, its mapcount goes down
  3892. * to 0 and uncharge() will be called. But, even if it's fully
  3893. * unmapped, migration may fail and this page has to be
  3894. * charged again. We set MIGRATION flag here and delay uncharge
  3895. * until end_migration() is called
  3896. *
  3897. * Corner Case Thinking
  3898. * A)
  3899. * When the old page was mapped as Anon and it's unmap-and-freed
  3900. * while migration was ongoing.
  3901. * If unmap finds the old page, uncharge() of it will be delayed
  3902. * until end_migration(). If unmap finds a new page, it's
  3903. * uncharged when it make mapcount to be 1->0. If unmap code
  3904. * finds swap_migration_entry, the new page will not be mapped
  3905. * and end_migration() will find it(mapcount==0).
  3906. *
  3907. * B)
  3908. * When the old page was mapped but migraion fails, the kernel
  3909. * remaps it. A charge for it is kept by MIGRATION flag even
  3910. * if mapcount goes down to 0. We can do remap successfully
  3911. * without charging it again.
  3912. *
  3913. * C)
  3914. * The "old" page is under lock_page() until the end of
  3915. * migration, so, the old page itself will not be swapped-out.
  3916. * If the new page is swapped out before end_migraton, our
  3917. * hook to usual swap-out path will catch the event.
  3918. */
  3919. if (PageAnon(page))
  3920. SetPageCgroupMigration(pc);
  3921. }
  3922. unlock_page_cgroup(pc);
  3923. /*
  3924. * If the page is not charged at this point,
  3925. * we return here.
  3926. */
  3927. if (!memcg)
  3928. return;
  3929. *memcgp = memcg;
  3930. /*
  3931. * We charge new page before it's used/mapped. So, even if unlock_page()
  3932. * is called before end_migration, we can catch all events on this new
  3933. * page. In the case new page is migrated but not remapped, new page's
  3934. * mapcount will be finally 0 and we call uncharge in end_migration().
  3935. */
  3936. if (PageAnon(page))
  3937. ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
  3938. else
  3939. ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  3940. /*
  3941. * The page is committed to the memcg, but it's not actually
  3942. * charged to the res_counter since we plan on replacing the
  3943. * old one and only one page is going to be left afterwards.
  3944. */
  3945. __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
  3946. }
  3947. /* remove redundant charge if migration failed*/
  3948. void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  3949. struct page *oldpage, struct page *newpage, bool migration_ok)
  3950. {
  3951. struct page *used, *unused;
  3952. struct page_cgroup *pc;
  3953. bool anon;
  3954. if (!memcg)
  3955. return;
  3956. if (!migration_ok) {
  3957. used = oldpage;
  3958. unused = newpage;
  3959. } else {
  3960. used = newpage;
  3961. unused = oldpage;
  3962. }
  3963. anon = PageAnon(used);
  3964. __mem_cgroup_uncharge_common(unused,
  3965. anon ? MEM_CGROUP_CHARGE_TYPE_ANON
  3966. : MEM_CGROUP_CHARGE_TYPE_CACHE,
  3967. true);
  3968. css_put(&memcg->css);
  3969. /*
  3970. * We disallowed uncharge of pages under migration because mapcount
  3971. * of the page goes down to zero, temporarly.
  3972. * Clear the flag and check the page should be charged.
  3973. */
  3974. pc = lookup_page_cgroup(oldpage);
  3975. lock_page_cgroup(pc);
  3976. ClearPageCgroupMigration(pc);
  3977. unlock_page_cgroup(pc);
  3978. /*
  3979. * If a page is a file cache, radix-tree replacement is very atomic
  3980. * and we can skip this check. When it was an Anon page, its mapcount
  3981. * goes down to 0. But because we added MIGRATION flage, it's not
  3982. * uncharged yet. There are several case but page->mapcount check
  3983. * and USED bit check in mem_cgroup_uncharge_page() will do enough
  3984. * check. (see prepare_charge() also)
  3985. */
  3986. if (anon)
  3987. mem_cgroup_uncharge_page(used);
  3988. }
  3989. /*
  3990. * At replace page cache, newpage is not under any memcg but it's on
  3991. * LRU. So, this function doesn't touch res_counter but handles LRU
  3992. * in correct way. Both pages are locked so we cannot race with uncharge.
  3993. */
  3994. void mem_cgroup_replace_page_cache(struct page *oldpage,
  3995. struct page *newpage)
  3996. {
  3997. struct mem_cgroup *memcg = NULL;
  3998. struct page_cgroup *pc;
  3999. enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
  4000. if (mem_cgroup_disabled())
  4001. return;
  4002. pc = lookup_page_cgroup(oldpage);
  4003. /* fix accounting on old pages */
  4004. lock_page_cgroup(pc);
  4005. if (PageCgroupUsed(pc)) {
  4006. memcg = pc->mem_cgroup;
  4007. mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
  4008. ClearPageCgroupUsed(pc);
  4009. }
  4010. unlock_page_cgroup(pc);
  4011. /*
  4012. * When called from shmem_replace_page(), in some cases the
  4013. * oldpage has already been charged, and in some cases not.
  4014. */
  4015. if (!memcg)
  4016. return;
  4017. /*
  4018. * Even if newpage->mapping was NULL before starting replacement,
  4019. * the newpage may be on LRU(or pagevec for LRU) already. We lock
  4020. * LRU while we overwrite pc->mem_cgroup.
  4021. */
  4022. __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
  4023. }
  4024. #ifdef CONFIG_DEBUG_VM
  4025. static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
  4026. {
  4027. struct page_cgroup *pc;
  4028. pc = lookup_page_cgroup(page);
  4029. /*
  4030. * Can be NULL while feeding pages into the page allocator for
  4031. * the first time, i.e. during boot or memory hotplug;
  4032. * or when mem_cgroup_disabled().
  4033. */
  4034. if (likely(pc) && PageCgroupUsed(pc))
  4035. return pc;
  4036. return NULL;
  4037. }
  4038. bool mem_cgroup_bad_page_check(struct page *page)
  4039. {
  4040. if (mem_cgroup_disabled())
  4041. return false;
  4042. return lookup_page_cgroup_used(page) != NULL;
  4043. }
  4044. void mem_cgroup_print_bad_page(struct page *page)
  4045. {
  4046. struct page_cgroup *pc;
  4047. pc = lookup_page_cgroup_used(page);
  4048. if (pc) {
  4049. pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
  4050. pc, pc->flags, pc->mem_cgroup);
  4051. }
  4052. }
  4053. #endif
  4054. static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
  4055. unsigned long long val)
  4056. {
  4057. int retry_count;
  4058. u64 memswlimit, memlimit;
  4059. int ret = 0;
  4060. int children = mem_cgroup_count_children(memcg);
  4061. u64 curusage, oldusage;
  4062. int enlarge;
  4063. /*
  4064. * For keeping hierarchical_reclaim simple, how long we should retry
  4065. * is depends on callers. We set our retry-count to be function
  4066. * of # of children which we should visit in this loop.
  4067. */
  4068. retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
  4069. oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  4070. enlarge = 0;
  4071. while (retry_count) {
  4072. if (signal_pending(current)) {
  4073. ret = -EINTR;
  4074. break;
  4075. }
  4076. /*
  4077. * Rather than hide all in some function, I do this in
  4078. * open coded manner. You see what this really does.
  4079. * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
  4080. */
  4081. mutex_lock(&set_limit_mutex);
  4082. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  4083. if (memswlimit < val) {
  4084. ret = -EINVAL;
  4085. mutex_unlock(&set_limit_mutex);
  4086. break;
  4087. }
  4088. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  4089. if (memlimit < val)
  4090. enlarge = 1;
  4091. ret = res_counter_set_limit(&memcg->res, val);
  4092. if (!ret) {
  4093. if (memswlimit == val)
  4094. memcg->memsw_is_minimum = true;
  4095. else
  4096. memcg->memsw_is_minimum = false;
  4097. }
  4098. mutex_unlock(&set_limit_mutex);
  4099. if (!ret)
  4100. break;
  4101. mem_cgroup_reclaim(memcg, GFP_KERNEL,
  4102. MEM_CGROUP_RECLAIM_SHRINK);
  4103. curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  4104. /* Usage is reduced ? */
  4105. if (curusage >= oldusage)
  4106. retry_count--;
  4107. else
  4108. oldusage = curusage;
  4109. }
  4110. if (!ret && enlarge)
  4111. memcg_oom_recover(memcg);
  4112. return ret;
  4113. }
  4114. static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
  4115. unsigned long long val)
  4116. {
  4117. int retry_count;
  4118. u64 memlimit, memswlimit, oldusage, curusage;
  4119. int children = mem_cgroup_count_children(memcg);
  4120. int ret = -EBUSY;
  4121. int enlarge = 0;
  4122. /* see mem_cgroup_resize_res_limit */
  4123. retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
  4124. oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  4125. while (retry_count) {
  4126. if (signal_pending(current)) {
  4127. ret = -EINTR;
  4128. break;
  4129. }
  4130. /*
  4131. * Rather than hide all in some function, I do this in
  4132. * open coded manner. You see what this really does.
  4133. * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
  4134. */
  4135. mutex_lock(&set_limit_mutex);
  4136. memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  4137. if (memlimit > val) {
  4138. ret = -EINVAL;
  4139. mutex_unlock(&set_limit_mutex);
  4140. break;
  4141. }
  4142. memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  4143. if (memswlimit < val)
  4144. enlarge = 1;
  4145. ret = res_counter_set_limit(&memcg->memsw, val);
  4146. if (!ret) {
  4147. if (memlimit == val)
  4148. memcg->memsw_is_minimum = true;
  4149. else
  4150. memcg->memsw_is_minimum = false;
  4151. }
  4152. mutex_unlock(&set_limit_mutex);
  4153. if (!ret)
  4154. break;
  4155. mem_cgroup_reclaim(memcg, GFP_KERNEL,
  4156. MEM_CGROUP_RECLAIM_NOSWAP |
  4157. MEM_CGROUP_RECLAIM_SHRINK);
  4158. curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
  4159. /* Usage is reduced ? */
  4160. if (curusage >= oldusage)
  4161. retry_count--;
  4162. else
  4163. oldusage = curusage;
  4164. }
  4165. if (!ret && enlarge)
  4166. memcg_oom_recover(memcg);
  4167. return ret;
  4168. }
  4169. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  4170. gfp_t gfp_mask,
  4171. unsigned long *total_scanned)
  4172. {
  4173. unsigned long nr_reclaimed = 0;
  4174. struct mem_cgroup_per_zone *mz, *next_mz = NULL;
  4175. unsigned long reclaimed;
  4176. int loop = 0;
  4177. struct mem_cgroup_tree_per_zone *mctz;
  4178. unsigned long long excess;
  4179. unsigned long nr_scanned;
  4180. if (order > 0)
  4181. return 0;
  4182. mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
  4183. /*
  4184. * This loop can run a while, specially if mem_cgroup's continuously
  4185. * keep exceeding their soft limit and putting the system under
  4186. * pressure
  4187. */
  4188. do {
  4189. if (next_mz)
  4190. mz = next_mz;
  4191. else
  4192. mz = mem_cgroup_largest_soft_limit_node(mctz);
  4193. if (!mz)
  4194. break;
  4195. nr_scanned = 0;
  4196. reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
  4197. gfp_mask, &nr_scanned);
  4198. nr_reclaimed += reclaimed;
  4199. *total_scanned += nr_scanned;
  4200. spin_lock(&mctz->lock);
  4201. /*
  4202. * If we failed to reclaim anything from this memory cgroup
  4203. * it is time to move on to the next cgroup
  4204. */
  4205. next_mz = NULL;
  4206. if (!reclaimed) {
  4207. do {
  4208. /*
  4209. * Loop until we find yet another one.
  4210. *
  4211. * By the time we get the soft_limit lock
  4212. * again, someone might have aded the
  4213. * group back on the RB tree. Iterate to
  4214. * make sure we get a different mem.
  4215. * mem_cgroup_largest_soft_limit_node returns
  4216. * NULL if no other cgroup is present on
  4217. * the tree
  4218. */
  4219. next_mz =
  4220. __mem_cgroup_largest_soft_limit_node(mctz);
  4221. if (next_mz == mz)
  4222. css_put(&next_mz->memcg->css);
  4223. else /* next_mz == NULL or other memcg */
  4224. break;
  4225. } while (1);
  4226. }
  4227. __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
  4228. excess = res_counter_soft_limit_excess(&mz->memcg->res);
  4229. /*
  4230. * One school of thought says that we should not add
  4231. * back the node to the tree if reclaim returns 0.
  4232. * But our reclaim could return 0, simply because due
  4233. * to priority we are exposing a smaller subset of
  4234. * memory to reclaim from. Consider this as a longer
  4235. * term TODO.
  4236. */
  4237. /* If excess == 0, no tree ops */
  4238. __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
  4239. spin_unlock(&mctz->lock);
  4240. css_put(&mz->memcg->css);
  4241. loop++;
  4242. /*
  4243. * Could not reclaim anything and there are no more
  4244. * mem cgroups to try or we seem to be looping without
  4245. * reclaiming anything.
  4246. */
  4247. if (!nr_reclaimed &&
  4248. (next_mz == NULL ||
  4249. loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
  4250. break;
  4251. } while (!nr_reclaimed);
  4252. if (next_mz)
  4253. css_put(&next_mz->memcg->css);
  4254. return nr_reclaimed;
  4255. }
  4256. /**
  4257. * mem_cgroup_force_empty_list - clears LRU of a group
  4258. * @memcg: group to clear
  4259. * @node: NUMA node
  4260. * @zid: zone id
  4261. * @lru: lru to to clear
  4262. *
  4263. * Traverse a specified page_cgroup list and try to drop them all. This doesn't
  4264. * reclaim the pages page themselves - pages are moved to the parent (or root)
  4265. * group.
  4266. */
  4267. static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
  4268. int node, int zid, enum lru_list lru)
  4269. {
  4270. struct lruvec *lruvec;
  4271. unsigned long flags;
  4272. struct list_head *list;
  4273. struct page *busy;
  4274. struct zone *zone;
  4275. zone = &NODE_DATA(node)->node_zones[zid];
  4276. lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  4277. list = &lruvec->lists[lru];
  4278. busy = NULL;
  4279. do {
  4280. struct page_cgroup *pc;
  4281. struct page *page;
  4282. spin_lock_irqsave(&zone->lru_lock, flags);
  4283. if (list_empty(list)) {
  4284. spin_unlock_irqrestore(&zone->lru_lock, flags);
  4285. break;
  4286. }
  4287. page = list_entry(list->prev, struct page, lru);
  4288. if (busy == page) {
  4289. list_move(&page->lru, list);
  4290. busy = NULL;
  4291. spin_unlock_irqrestore(&zone->lru_lock, flags);
  4292. continue;
  4293. }
  4294. spin_unlock_irqrestore(&zone->lru_lock, flags);
  4295. pc = lookup_page_cgroup(page);
  4296. if (mem_cgroup_move_parent(page, pc, memcg)) {
  4297. /* found lock contention or "pc" is obsolete. */
  4298. busy = page;
  4299. cond_resched();
  4300. } else
  4301. busy = NULL;
  4302. } while (!list_empty(list));
  4303. }
  4304. /*
  4305. * make mem_cgroup's charge to be 0 if there is no task by moving
  4306. * all the charges and pages to the parent.
  4307. * This enables deleting this mem_cgroup.
  4308. *
  4309. * Caller is responsible for holding css reference on the memcg.
  4310. */
  4311. static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
  4312. {
  4313. int node, zid;
  4314. u64 usage;
  4315. do {
  4316. /* This is for making all *used* pages to be on LRU. */
  4317. lru_add_drain_all();
  4318. drain_all_stock_sync(memcg);
  4319. mem_cgroup_start_move(memcg);
  4320. for_each_node_state(node, N_MEMORY) {
  4321. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  4322. enum lru_list lru;
  4323. for_each_lru(lru) {
  4324. mem_cgroup_force_empty_list(memcg,
  4325. node, zid, lru);
  4326. }
  4327. }
  4328. }
  4329. mem_cgroup_end_move(memcg);
  4330. memcg_oom_recover(memcg);
  4331. cond_resched();
  4332. /*
  4333. * Kernel memory may not necessarily be trackable to a specific
  4334. * process. So they are not migrated, and therefore we can't
  4335. * expect their value to drop to 0 here.
  4336. * Having res filled up with kmem only is enough.
  4337. *
  4338. * This is a safety check because mem_cgroup_force_empty_list
  4339. * could have raced with mem_cgroup_replace_page_cache callers
  4340. * so the lru seemed empty but the page could have been added
  4341. * right after the check. RES_USAGE should be safe as we always
  4342. * charge before adding to the LRU.
  4343. */
  4344. usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
  4345. res_counter_read_u64(&memcg->kmem, RES_USAGE);
  4346. } while (usage > 0);
  4347. }
  4348. /*
  4349. * This mainly exists for tests during the setting of set of use_hierarchy.
  4350. * Since this is the very setting we are changing, the current hierarchy value
  4351. * is meaningless
  4352. */
  4353. static inline bool __memcg_has_children(struct mem_cgroup *memcg)
  4354. {
  4355. struct cgroup *pos;
  4356. /* bounce at first found */
  4357. cgroup_for_each_child(pos, memcg->css.cgroup)
  4358. return true;
  4359. return false;
  4360. }
  4361. /*
  4362. * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
  4363. * to be already dead (as in mem_cgroup_force_empty, for instance). This is
  4364. * from mem_cgroup_count_children(), in the sense that we don't really care how
  4365. * many children we have; we only need to know if we have any. It also counts
  4366. * any memcg without hierarchy as infertile.
  4367. */
  4368. static inline bool memcg_has_children(struct mem_cgroup *memcg)
  4369. {
  4370. return memcg->use_hierarchy && __memcg_has_children(memcg);
  4371. }
  4372. /*
  4373. * Reclaims as many pages from the given memcg as possible and moves
  4374. * the rest to the parent.
  4375. *
  4376. * Caller is responsible for holding css reference for memcg.
  4377. */
  4378. static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
  4379. {
  4380. int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
  4381. struct cgroup *cgrp = memcg->css.cgroup;
  4382. /* returns EBUSY if there is a task or if we come here twice. */
  4383. if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
  4384. return -EBUSY;
  4385. /* we call try-to-free pages for make this cgroup empty */
  4386. lru_add_drain_all();
  4387. /* try to free all pages in this cgroup */
  4388. while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
  4389. int progress;
  4390. if (signal_pending(current))
  4391. return -EINTR;
  4392. progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
  4393. false);
  4394. if (!progress) {
  4395. nr_retries--;
  4396. /* maybe some writeback is necessary */
  4397. congestion_wait(BLK_RW_ASYNC, HZ/10);
  4398. }
  4399. }
  4400. lru_add_drain();
  4401. mem_cgroup_reparent_charges(memcg);
  4402. return 0;
  4403. }
  4404. static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
  4405. {
  4406. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4407. int ret;
  4408. if (mem_cgroup_is_root(memcg))
  4409. return -EINVAL;
  4410. css_get(&memcg->css);
  4411. ret = mem_cgroup_force_empty(memcg);
  4412. css_put(&memcg->css);
  4413. return ret;
  4414. }
  4415. static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
  4416. {
  4417. return mem_cgroup_from_cont(cont)->use_hierarchy;
  4418. }
  4419. static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
  4420. u64 val)
  4421. {
  4422. int retval = 0;
  4423. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4424. struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
  4425. mutex_lock(&memcg_create_mutex);
  4426. if (memcg->use_hierarchy == val)
  4427. goto out;
  4428. /*
  4429. * If parent's use_hierarchy is set, we can't make any modifications
  4430. * in the child subtrees. If it is unset, then the change can
  4431. * occur, provided the current cgroup has no children.
  4432. *
  4433. * For the root cgroup, parent_mem is NULL, we allow value to be
  4434. * set if there are no children.
  4435. */
  4436. if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
  4437. (val == 1 || val == 0)) {
  4438. if (!__memcg_has_children(memcg))
  4439. memcg->use_hierarchy = val;
  4440. else
  4441. retval = -EBUSY;
  4442. } else
  4443. retval = -EINVAL;
  4444. out:
  4445. mutex_unlock(&memcg_create_mutex);
  4446. return retval;
  4447. }
  4448. static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
  4449. enum mem_cgroup_stat_index idx)
  4450. {
  4451. struct mem_cgroup *iter;
  4452. long val = 0;
  4453. /* Per-cpu values can be negative, use a signed accumulator */
  4454. for_each_mem_cgroup_tree(iter, memcg)
  4455. val += mem_cgroup_read_stat(iter, idx);
  4456. if (val < 0) /* race ? */
  4457. val = 0;
  4458. return val;
  4459. }
  4460. static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
  4461. {
  4462. u64 val;
  4463. if (!mem_cgroup_is_root(memcg)) {
  4464. if (!swap)
  4465. return res_counter_read_u64(&memcg->res, RES_USAGE);
  4466. else
  4467. return res_counter_read_u64(&memcg->memsw, RES_USAGE);
  4468. }
  4469. /*
  4470. * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
  4471. * as well as in MEM_CGROUP_STAT_RSS_HUGE.
  4472. */
  4473. val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
  4474. val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
  4475. if (swap)
  4476. val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
  4477. return val << PAGE_SHIFT;
  4478. }
  4479. static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
  4480. struct file *file, char __user *buf,
  4481. size_t nbytes, loff_t *ppos)
  4482. {
  4483. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4484. char str[64];
  4485. u64 val;
  4486. int name, len;
  4487. enum res_type type;
  4488. type = MEMFILE_TYPE(cft->private);
  4489. name = MEMFILE_ATTR(cft->private);
  4490. switch (type) {
  4491. case _MEM:
  4492. if (name == RES_USAGE)
  4493. val = mem_cgroup_usage(memcg, false);
  4494. else
  4495. val = res_counter_read_u64(&memcg->res, name);
  4496. break;
  4497. case _MEMSWAP:
  4498. if (name == RES_USAGE)
  4499. val = mem_cgroup_usage(memcg, true);
  4500. else
  4501. val = res_counter_read_u64(&memcg->memsw, name);
  4502. break;
  4503. case _KMEM:
  4504. val = res_counter_read_u64(&memcg->kmem, name);
  4505. break;
  4506. default:
  4507. BUG();
  4508. }
  4509. len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
  4510. return simple_read_from_buffer(buf, nbytes, ppos, str, len);
  4511. }
  4512. static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
  4513. {
  4514. int ret = -EINVAL;
  4515. #ifdef CONFIG_MEMCG_KMEM
  4516. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4517. /*
  4518. * For simplicity, we won't allow this to be disabled. It also can't
  4519. * be changed if the cgroup has children already, or if tasks had
  4520. * already joined.
  4521. *
  4522. * If tasks join before we set the limit, a person looking at
  4523. * kmem.usage_in_bytes will have no way to determine when it took
  4524. * place, which makes the value quite meaningless.
  4525. *
  4526. * After it first became limited, changes in the value of the limit are
  4527. * of course permitted.
  4528. */
  4529. mutex_lock(&memcg_create_mutex);
  4530. mutex_lock(&set_limit_mutex);
  4531. if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
  4532. if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
  4533. ret = -EBUSY;
  4534. goto out;
  4535. }
  4536. ret = res_counter_set_limit(&memcg->kmem, val);
  4537. VM_BUG_ON(ret);
  4538. ret = memcg_update_cache_sizes(memcg);
  4539. if (ret) {
  4540. res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
  4541. goto out;
  4542. }
  4543. static_key_slow_inc(&memcg_kmem_enabled_key);
  4544. /*
  4545. * setting the active bit after the inc will guarantee no one
  4546. * starts accounting before all call sites are patched
  4547. */
  4548. memcg_kmem_set_active(memcg);
  4549. } else
  4550. ret = res_counter_set_limit(&memcg->kmem, val);
  4551. out:
  4552. mutex_unlock(&set_limit_mutex);
  4553. mutex_unlock(&memcg_create_mutex);
  4554. #endif
  4555. return ret;
  4556. }
  4557. #ifdef CONFIG_MEMCG_KMEM
  4558. static int memcg_propagate_kmem(struct mem_cgroup *memcg)
  4559. {
  4560. int ret = 0;
  4561. struct mem_cgroup *parent = parent_mem_cgroup(memcg);
  4562. if (!parent)
  4563. goto out;
  4564. memcg->kmem_account_flags = parent->kmem_account_flags;
  4565. /*
  4566. * When that happen, we need to disable the static branch only on those
  4567. * memcgs that enabled it. To achieve this, we would be forced to
  4568. * complicate the code by keeping track of which memcgs were the ones
  4569. * that actually enabled limits, and which ones got it from its
  4570. * parents.
  4571. *
  4572. * It is a lot simpler just to do static_key_slow_inc() on every child
  4573. * that is accounted.
  4574. */
  4575. if (!memcg_kmem_is_active(memcg))
  4576. goto out;
  4577. /*
  4578. * __mem_cgroup_free() will issue static_key_slow_dec() because this
  4579. * memcg is active already. If the later initialization fails then the
  4580. * cgroup core triggers the cleanup so we do not have to do it here.
  4581. */
  4582. static_key_slow_inc(&memcg_kmem_enabled_key);
  4583. mutex_lock(&set_limit_mutex);
  4584. memcg_stop_kmem_account();
  4585. ret = memcg_update_cache_sizes(memcg);
  4586. memcg_resume_kmem_account();
  4587. mutex_unlock(&set_limit_mutex);
  4588. out:
  4589. return ret;
  4590. }
  4591. #endif /* CONFIG_MEMCG_KMEM */
  4592. /*
  4593. * The user of this function is...
  4594. * RES_LIMIT.
  4595. */
  4596. static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  4597. const char *buffer)
  4598. {
  4599. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4600. enum res_type type;
  4601. int name;
  4602. unsigned long long val;
  4603. int ret;
  4604. type = MEMFILE_TYPE(cft->private);
  4605. name = MEMFILE_ATTR(cft->private);
  4606. switch (name) {
  4607. case RES_LIMIT:
  4608. if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
  4609. ret = -EINVAL;
  4610. break;
  4611. }
  4612. /* This function does all necessary parse...reuse it */
  4613. ret = res_counter_memparse_write_strategy(buffer, &val);
  4614. if (ret)
  4615. break;
  4616. if (type == _MEM)
  4617. ret = mem_cgroup_resize_limit(memcg, val);
  4618. else if (type == _MEMSWAP)
  4619. ret = mem_cgroup_resize_memsw_limit(memcg, val);
  4620. else if (type == _KMEM)
  4621. ret = memcg_update_kmem_limit(cont, val);
  4622. else
  4623. return -EINVAL;
  4624. break;
  4625. case RES_SOFT_LIMIT:
  4626. ret = res_counter_memparse_write_strategy(buffer, &val);
  4627. if (ret)
  4628. break;
  4629. /*
  4630. * For memsw, soft limits are hard to implement in terms
  4631. * of semantics, for now, we support soft limits for
  4632. * control without swap
  4633. */
  4634. if (type == _MEM)
  4635. ret = res_counter_set_soft_limit(&memcg->res, val);
  4636. else
  4637. ret = -EINVAL;
  4638. break;
  4639. default:
  4640. ret = -EINVAL; /* should be BUG() ? */
  4641. break;
  4642. }
  4643. return ret;
  4644. }
  4645. static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
  4646. unsigned long long *mem_limit, unsigned long long *memsw_limit)
  4647. {
  4648. unsigned long long min_limit, min_memsw_limit, tmp;
  4649. min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  4650. min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  4651. if (!memcg->use_hierarchy)
  4652. goto out;
  4653. while (css_parent(&memcg->css)) {
  4654. memcg = mem_cgroup_from_css(css_parent(&memcg->css));
  4655. if (!memcg->use_hierarchy)
  4656. break;
  4657. tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
  4658. min_limit = min(min_limit, tmp);
  4659. tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  4660. min_memsw_limit = min(min_memsw_limit, tmp);
  4661. }
  4662. out:
  4663. *mem_limit = min_limit;
  4664. *memsw_limit = min_memsw_limit;
  4665. }
  4666. static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  4667. {
  4668. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4669. int name;
  4670. enum res_type type;
  4671. type = MEMFILE_TYPE(event);
  4672. name = MEMFILE_ATTR(event);
  4673. switch (name) {
  4674. case RES_MAX_USAGE:
  4675. if (type == _MEM)
  4676. res_counter_reset_max(&memcg->res);
  4677. else if (type == _MEMSWAP)
  4678. res_counter_reset_max(&memcg->memsw);
  4679. else if (type == _KMEM)
  4680. res_counter_reset_max(&memcg->kmem);
  4681. else
  4682. return -EINVAL;
  4683. break;
  4684. case RES_FAILCNT:
  4685. if (type == _MEM)
  4686. res_counter_reset_failcnt(&memcg->res);
  4687. else if (type == _MEMSWAP)
  4688. res_counter_reset_failcnt(&memcg->memsw);
  4689. else if (type == _KMEM)
  4690. res_counter_reset_failcnt(&memcg->kmem);
  4691. else
  4692. return -EINVAL;
  4693. break;
  4694. }
  4695. return 0;
  4696. }
  4697. static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
  4698. struct cftype *cft)
  4699. {
  4700. return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
  4701. }
  4702. #ifdef CONFIG_MMU
  4703. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  4704. struct cftype *cft, u64 val)
  4705. {
  4706. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4707. if (val >= (1 << NR_MOVE_TYPE))
  4708. return -EINVAL;
  4709. /*
  4710. * No kind of locking is needed in here, because ->can_attach() will
  4711. * check this value once in the beginning of the process, and then carry
  4712. * on with stale data. This means that changes to this value will only
  4713. * affect task migrations starting after the change.
  4714. */
  4715. memcg->move_charge_at_immigrate = val;
  4716. return 0;
  4717. }
  4718. #else
  4719. static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  4720. struct cftype *cft, u64 val)
  4721. {
  4722. return -ENOSYS;
  4723. }
  4724. #endif
  4725. #ifdef CONFIG_NUMA
  4726. static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
  4727. struct seq_file *m)
  4728. {
  4729. int nid;
  4730. unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
  4731. unsigned long node_nr;
  4732. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4733. total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
  4734. seq_printf(m, "total=%lu", total_nr);
  4735. for_each_node_state(nid, N_MEMORY) {
  4736. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
  4737. seq_printf(m, " N%d=%lu", nid, node_nr);
  4738. }
  4739. seq_putc(m, '\n');
  4740. file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
  4741. seq_printf(m, "file=%lu", file_nr);
  4742. for_each_node_state(nid, N_MEMORY) {
  4743. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  4744. LRU_ALL_FILE);
  4745. seq_printf(m, " N%d=%lu", nid, node_nr);
  4746. }
  4747. seq_putc(m, '\n');
  4748. anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
  4749. seq_printf(m, "anon=%lu", anon_nr);
  4750. for_each_node_state(nid, N_MEMORY) {
  4751. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  4752. LRU_ALL_ANON);
  4753. seq_printf(m, " N%d=%lu", nid, node_nr);
  4754. }
  4755. seq_putc(m, '\n');
  4756. unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
  4757. seq_printf(m, "unevictable=%lu", unevictable_nr);
  4758. for_each_node_state(nid, N_MEMORY) {
  4759. node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
  4760. BIT(LRU_UNEVICTABLE));
  4761. seq_printf(m, " N%d=%lu", nid, node_nr);
  4762. }
  4763. seq_putc(m, '\n');
  4764. return 0;
  4765. }
  4766. #endif /* CONFIG_NUMA */
  4767. static inline void mem_cgroup_lru_names_not_uptodate(void)
  4768. {
  4769. BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
  4770. }
  4771. static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
  4772. struct seq_file *m)
  4773. {
  4774. struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  4775. struct mem_cgroup *mi;
  4776. unsigned int i;
  4777. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  4778. if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
  4779. continue;
  4780. seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
  4781. mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
  4782. }
  4783. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
  4784. seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
  4785. mem_cgroup_read_events(memcg, i));
  4786. for (i = 0; i < NR_LRU_LISTS; i++)
  4787. seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
  4788. mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
  4789. /* Hierarchical information */
  4790. {
  4791. unsigned long long limit, memsw_limit;
  4792. memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
  4793. seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
  4794. if (do_swap_account)
  4795. seq_printf(m, "hierarchical_memsw_limit %llu\n",
  4796. memsw_limit);
  4797. }
  4798. for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
  4799. long long val = 0;
  4800. if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
  4801. continue;
  4802. for_each_mem_cgroup_tree(mi, memcg)
  4803. val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
  4804. seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
  4805. }
  4806. for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
  4807. unsigned long long val = 0;
  4808. for_each_mem_cgroup_tree(mi, memcg)
  4809. val += mem_cgroup_read_events(mi, i);
  4810. seq_printf(m, "total_%s %llu\n",
  4811. mem_cgroup_events_names[i], val);
  4812. }
  4813. for (i = 0; i < NR_LRU_LISTS; i++) {
  4814. unsigned long long val = 0;
  4815. for_each_mem_cgroup_tree(mi, memcg)
  4816. val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
  4817. seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
  4818. }
  4819. #ifdef CONFIG_DEBUG_VM
  4820. {
  4821. int nid, zid;
  4822. struct mem_cgroup_per_zone *mz;
  4823. struct zone_reclaim_stat *rstat;
  4824. unsigned long recent_rotated[2] = {0, 0};
  4825. unsigned long recent_scanned[2] = {0, 0};
  4826. for_each_online_node(nid)
  4827. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  4828. mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  4829. rstat = &mz->lruvec.reclaim_stat;
  4830. recent_rotated[0] += rstat->recent_rotated[0];
  4831. recent_rotated[1] += rstat->recent_rotated[1];
  4832. recent_scanned[0] += rstat->recent_scanned[0];
  4833. recent_scanned[1] += rstat->recent_scanned[1];
  4834. }
  4835. seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
  4836. seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
  4837. seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
  4838. seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
  4839. }
  4840. #endif
  4841. return 0;
  4842. }
  4843. static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
  4844. {
  4845. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4846. return mem_cgroup_swappiness(memcg);
  4847. }
  4848. static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
  4849. u64 val)
  4850. {
  4851. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4852. struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
  4853. if (val > 100 || !parent)
  4854. return -EINVAL;
  4855. mutex_lock(&memcg_create_mutex);
  4856. /* If under hierarchy, only empty-root can set this value */
  4857. if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
  4858. mutex_unlock(&memcg_create_mutex);
  4859. return -EINVAL;
  4860. }
  4861. memcg->swappiness = val;
  4862. mutex_unlock(&memcg_create_mutex);
  4863. return 0;
  4864. }
  4865. static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
  4866. {
  4867. struct mem_cgroup_threshold_ary *t;
  4868. u64 usage;
  4869. int i;
  4870. rcu_read_lock();
  4871. if (!swap)
  4872. t = rcu_dereference(memcg->thresholds.primary);
  4873. else
  4874. t = rcu_dereference(memcg->memsw_thresholds.primary);
  4875. if (!t)
  4876. goto unlock;
  4877. usage = mem_cgroup_usage(memcg, swap);
  4878. /*
  4879. * current_threshold points to threshold just below or equal to usage.
  4880. * If it's not true, a threshold was crossed after last
  4881. * call of __mem_cgroup_threshold().
  4882. */
  4883. i = t->current_threshold;
  4884. /*
  4885. * Iterate backward over array of thresholds starting from
  4886. * current_threshold and check if a threshold is crossed.
  4887. * If none of thresholds below usage is crossed, we read
  4888. * only one element of the array here.
  4889. */
  4890. for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
  4891. eventfd_signal(t->entries[i].eventfd, 1);
  4892. /* i = current_threshold + 1 */
  4893. i++;
  4894. /*
  4895. * Iterate forward over array of thresholds starting from
  4896. * current_threshold+1 and check if a threshold is crossed.
  4897. * If none of thresholds above usage is crossed, we read
  4898. * only one element of the array here.
  4899. */
  4900. for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
  4901. eventfd_signal(t->entries[i].eventfd, 1);
  4902. /* Update current_threshold */
  4903. t->current_threshold = i - 1;
  4904. unlock:
  4905. rcu_read_unlock();
  4906. }
  4907. static void mem_cgroup_threshold(struct mem_cgroup *memcg)
  4908. {
  4909. while (memcg) {
  4910. __mem_cgroup_threshold(memcg, false);
  4911. if (do_swap_account)
  4912. __mem_cgroup_threshold(memcg, true);
  4913. memcg = parent_mem_cgroup(memcg);
  4914. }
  4915. }
  4916. static int compare_thresholds(const void *a, const void *b)
  4917. {
  4918. const struct mem_cgroup_threshold *_a = a;
  4919. const struct mem_cgroup_threshold *_b = b;
  4920. return _a->threshold - _b->threshold;
  4921. }
  4922. static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
  4923. {
  4924. struct mem_cgroup_eventfd_list *ev;
  4925. list_for_each_entry(ev, &memcg->oom_notify, list)
  4926. eventfd_signal(ev->eventfd, 1);
  4927. return 0;
  4928. }
  4929. static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
  4930. {
  4931. struct mem_cgroup *iter;
  4932. for_each_mem_cgroup_tree(iter, memcg)
  4933. mem_cgroup_oom_notify_cb(iter);
  4934. }
  4935. static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
  4936. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  4937. {
  4938. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  4939. struct mem_cgroup_thresholds *thresholds;
  4940. struct mem_cgroup_threshold_ary *new;
  4941. enum res_type type = MEMFILE_TYPE(cft->private);
  4942. u64 threshold, usage;
  4943. int i, size, ret;
  4944. ret = res_counter_memparse_write_strategy(args, &threshold);
  4945. if (ret)
  4946. return ret;
  4947. mutex_lock(&memcg->thresholds_lock);
  4948. if (type == _MEM)
  4949. thresholds = &memcg->thresholds;
  4950. else if (type == _MEMSWAP)
  4951. thresholds = &memcg->memsw_thresholds;
  4952. else
  4953. BUG();
  4954. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  4955. /* Check if a threshold crossed before adding a new one */
  4956. if (thresholds->primary)
  4957. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  4958. size = thresholds->primary ? thresholds->primary->size + 1 : 1;
  4959. /* Allocate memory for new array of thresholds */
  4960. new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
  4961. GFP_KERNEL);
  4962. if (!new) {
  4963. ret = -ENOMEM;
  4964. goto unlock;
  4965. }
  4966. new->size = size;
  4967. /* Copy thresholds (if any) to new array */
  4968. if (thresholds->primary) {
  4969. memcpy(new->entries, thresholds->primary->entries, (size - 1) *
  4970. sizeof(struct mem_cgroup_threshold));
  4971. }
  4972. /* Add new threshold */
  4973. new->entries[size - 1].eventfd = eventfd;
  4974. new->entries[size - 1].threshold = threshold;
  4975. /* Sort thresholds. Registering of new threshold isn't time-critical */
  4976. sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
  4977. compare_thresholds, NULL);
  4978. /* Find current threshold */
  4979. new->current_threshold = -1;
  4980. for (i = 0; i < size; i++) {
  4981. if (new->entries[i].threshold <= usage) {
  4982. /*
  4983. * new->current_threshold will not be used until
  4984. * rcu_assign_pointer(), so it's safe to increment
  4985. * it here.
  4986. */
  4987. ++new->current_threshold;
  4988. } else
  4989. break;
  4990. }
  4991. /* Free old spare buffer and save old primary buffer as spare */
  4992. kfree(thresholds->spare);
  4993. thresholds->spare = thresholds->primary;
  4994. rcu_assign_pointer(thresholds->primary, new);
  4995. /* To be sure that nobody uses thresholds */
  4996. synchronize_rcu();
  4997. unlock:
  4998. mutex_unlock(&memcg->thresholds_lock);
  4999. return ret;
  5000. }
  5001. static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
  5002. struct cftype *cft, struct eventfd_ctx *eventfd)
  5003. {
  5004. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  5005. struct mem_cgroup_thresholds *thresholds;
  5006. struct mem_cgroup_threshold_ary *new;
  5007. enum res_type type = MEMFILE_TYPE(cft->private);
  5008. u64 usage;
  5009. int i, j, size;
  5010. mutex_lock(&memcg->thresholds_lock);
  5011. if (type == _MEM)
  5012. thresholds = &memcg->thresholds;
  5013. else if (type == _MEMSWAP)
  5014. thresholds = &memcg->memsw_thresholds;
  5015. else
  5016. BUG();
  5017. if (!thresholds->primary)
  5018. goto unlock;
  5019. usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  5020. /* Check if a threshold crossed before removing */
  5021. __mem_cgroup_threshold(memcg, type == _MEMSWAP);
  5022. /* Calculate new number of threshold */
  5023. size = 0;
  5024. for (i = 0; i < thresholds->primary->size; i++) {
  5025. if (thresholds->primary->entries[i].eventfd != eventfd)
  5026. size++;
  5027. }
  5028. new = thresholds->spare;
  5029. /* Set thresholds array to NULL if we don't have thresholds */
  5030. if (!size) {
  5031. kfree(new);
  5032. new = NULL;
  5033. goto swap_buffers;
  5034. }
  5035. new->size = size;
  5036. /* Copy thresholds and find current threshold */
  5037. new->current_threshold = -1;
  5038. for (i = 0, j = 0; i < thresholds->primary->size; i++) {
  5039. if (thresholds->primary->entries[i].eventfd == eventfd)
  5040. continue;
  5041. new->entries[j] = thresholds->primary->entries[i];
  5042. if (new->entries[j].threshold <= usage) {
  5043. /*
  5044. * new->current_threshold will not be used
  5045. * until rcu_assign_pointer(), so it's safe to increment
  5046. * it here.
  5047. */
  5048. ++new->current_threshold;
  5049. }
  5050. j++;
  5051. }
  5052. swap_buffers:
  5053. /* Swap primary and spare array */
  5054. thresholds->spare = thresholds->primary;
  5055. /* If all events are unregistered, free the spare array */
  5056. if (!new) {
  5057. kfree(thresholds->spare);
  5058. thresholds->spare = NULL;
  5059. }
  5060. rcu_assign_pointer(thresholds->primary, new);
  5061. /* To be sure that nobody uses thresholds */
  5062. synchronize_rcu();
  5063. unlock:
  5064. mutex_unlock(&memcg->thresholds_lock);
  5065. }
  5066. static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
  5067. struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  5068. {
  5069. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  5070. struct mem_cgroup_eventfd_list *event;
  5071. enum res_type type = MEMFILE_TYPE(cft->private);
  5072. BUG_ON(type != _OOM_TYPE);
  5073. event = kmalloc(sizeof(*event), GFP_KERNEL);
  5074. if (!event)
  5075. return -ENOMEM;
  5076. spin_lock(&memcg_oom_lock);
  5077. event->eventfd = eventfd;
  5078. list_add(&event->list, &memcg->oom_notify);
  5079. /* already in OOM ? */
  5080. if (atomic_read(&memcg->under_oom))
  5081. eventfd_signal(eventfd, 1);
  5082. spin_unlock(&memcg_oom_lock);
  5083. return 0;
  5084. }
  5085. static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
  5086. struct cftype *cft, struct eventfd_ctx *eventfd)
  5087. {
  5088. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  5089. struct mem_cgroup_eventfd_list *ev, *tmp;
  5090. enum res_type type = MEMFILE_TYPE(cft->private);
  5091. BUG_ON(type != _OOM_TYPE);
  5092. spin_lock(&memcg_oom_lock);
  5093. list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
  5094. if (ev->eventfd == eventfd) {
  5095. list_del(&ev->list);
  5096. kfree(ev);
  5097. }
  5098. }
  5099. spin_unlock(&memcg_oom_lock);
  5100. }
  5101. static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
  5102. struct cftype *cft, struct cgroup_map_cb *cb)
  5103. {
  5104. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  5105. cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
  5106. if (atomic_read(&memcg->under_oom))
  5107. cb->fill(cb, "under_oom", 1);
  5108. else
  5109. cb->fill(cb, "under_oom", 0);
  5110. return 0;
  5111. }
  5112. static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
  5113. struct cftype *cft, u64 val)
  5114. {
  5115. struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  5116. struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
  5117. /* cannot set to root cgroup and only 0 and 1 are allowed */
  5118. if (!parent || !((val == 0) || (val == 1)))
  5119. return -EINVAL;
  5120. mutex_lock(&memcg_create_mutex);
  5121. /* oom-kill-disable is a flag for subhierarchy. */
  5122. if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
  5123. mutex_unlock(&memcg_create_mutex);
  5124. return -EINVAL;
  5125. }
  5126. memcg->oom_kill_disable = val;
  5127. if (!val)
  5128. memcg_oom_recover(memcg);
  5129. mutex_unlock(&memcg_create_mutex);
  5130. return 0;
  5131. }
  5132. #ifdef CONFIG_MEMCG_KMEM
  5133. static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  5134. {
  5135. int ret;
  5136. memcg->kmemcg_id = -1;
  5137. ret = memcg_propagate_kmem(memcg);
  5138. if (ret)
  5139. return ret;
  5140. return mem_cgroup_sockets_init(memcg, ss);
  5141. }
  5142. static void memcg_destroy_kmem(struct mem_cgroup *memcg)
  5143. {
  5144. mem_cgroup_sockets_destroy(memcg);
  5145. }
  5146. static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
  5147. {
  5148. if (!memcg_kmem_is_active(memcg))
  5149. return;
  5150. /*
  5151. * kmem charges can outlive the cgroup. In the case of slab
  5152. * pages, for instance, a page contain objects from various
  5153. * processes. As we prevent from taking a reference for every
  5154. * such allocation we have to be careful when doing uncharge
  5155. * (see memcg_uncharge_kmem) and here during offlining.
  5156. *
  5157. * The idea is that that only the _last_ uncharge which sees
  5158. * the dead memcg will drop the last reference. An additional
  5159. * reference is taken here before the group is marked dead
  5160. * which is then paired with css_put during uncharge resp. here.
  5161. *
  5162. * Although this might sound strange as this path is called from
  5163. * css_offline() when the referencemight have dropped down to 0
  5164. * and shouldn't be incremented anymore (css_tryget would fail)
  5165. * we do not have other options because of the kmem allocations
  5166. * lifetime.
  5167. */
  5168. css_get(&memcg->css);
  5169. memcg_kmem_mark_dead(memcg);
  5170. if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
  5171. return;
  5172. if (memcg_kmem_test_and_clear_dead(memcg))
  5173. css_put(&memcg->css);
  5174. }
  5175. #else
  5176. static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  5177. {
  5178. return 0;
  5179. }
  5180. static void memcg_destroy_kmem(struct mem_cgroup *memcg)
  5181. {
  5182. }
  5183. static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
  5184. {
  5185. }
  5186. #endif
  5187. static struct cftype mem_cgroup_files[] = {
  5188. {
  5189. .name = "usage_in_bytes",
  5190. .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
  5191. .read = mem_cgroup_read,
  5192. .register_event = mem_cgroup_usage_register_event,
  5193. .unregister_event = mem_cgroup_usage_unregister_event,
  5194. },
  5195. {
  5196. .name = "max_usage_in_bytes",
  5197. .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
  5198. .trigger = mem_cgroup_reset,
  5199. .read = mem_cgroup_read,
  5200. },
  5201. {
  5202. .name = "limit_in_bytes",
  5203. .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
  5204. .write_string = mem_cgroup_write,
  5205. .read = mem_cgroup_read,
  5206. },
  5207. {
  5208. .name = "soft_limit_in_bytes",
  5209. .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
  5210. .write_string = mem_cgroup_write,
  5211. .read = mem_cgroup_read,
  5212. },
  5213. {
  5214. .name = "failcnt",
  5215. .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
  5216. .trigger = mem_cgroup_reset,
  5217. .read = mem_cgroup_read,
  5218. },
  5219. {
  5220. .name = "stat",
  5221. .read_seq_string = memcg_stat_show,
  5222. },
  5223. {
  5224. .name = "force_empty",
  5225. .trigger = mem_cgroup_force_empty_write,
  5226. },
  5227. {
  5228. .name = "use_hierarchy",
  5229. .flags = CFTYPE_INSANE,
  5230. .write_u64 = mem_cgroup_hierarchy_write,
  5231. .read_u64 = mem_cgroup_hierarchy_read,
  5232. },
  5233. {
  5234. .name = "swappiness",
  5235. .read_u64 = mem_cgroup_swappiness_read,
  5236. .write_u64 = mem_cgroup_swappiness_write,
  5237. },
  5238. {
  5239. .name = "move_charge_at_immigrate",
  5240. .read_u64 = mem_cgroup_move_charge_read,
  5241. .write_u64 = mem_cgroup_move_charge_write,
  5242. },
  5243. {
  5244. .name = "oom_control",
  5245. .read_map = mem_cgroup_oom_control_read,
  5246. .write_u64 = mem_cgroup_oom_control_write,
  5247. .register_event = mem_cgroup_oom_register_event,
  5248. .unregister_event = mem_cgroup_oom_unregister_event,
  5249. .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
  5250. },
  5251. {
  5252. .name = "pressure_level",
  5253. .register_event = vmpressure_register_event,
  5254. .unregister_event = vmpressure_unregister_event,
  5255. },
  5256. #ifdef CONFIG_NUMA
  5257. {
  5258. .name = "numa_stat",
  5259. .read_seq_string = memcg_numa_stat_show,
  5260. },
  5261. #endif
  5262. #ifdef CONFIG_MEMCG_KMEM
  5263. {
  5264. .name = "kmem.limit_in_bytes",
  5265. .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
  5266. .write_string = mem_cgroup_write,
  5267. .read = mem_cgroup_read,
  5268. },
  5269. {
  5270. .name = "kmem.usage_in_bytes",
  5271. .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
  5272. .read = mem_cgroup_read,
  5273. },
  5274. {
  5275. .name = "kmem.failcnt",
  5276. .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
  5277. .trigger = mem_cgroup_reset,
  5278. .read = mem_cgroup_read,
  5279. },
  5280. {
  5281. .name = "kmem.max_usage_in_bytes",
  5282. .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
  5283. .trigger = mem_cgroup_reset,
  5284. .read = mem_cgroup_read,
  5285. },
  5286. #ifdef CONFIG_SLABINFO
  5287. {
  5288. .name = "kmem.slabinfo",
  5289. .read_seq_string = mem_cgroup_slabinfo_read,
  5290. },
  5291. #endif
  5292. #endif
  5293. { }, /* terminate */
  5294. };
  5295. #ifdef CONFIG_MEMCG_SWAP
  5296. static struct cftype memsw_cgroup_files[] = {
  5297. {
  5298. .name = "memsw.usage_in_bytes",
  5299. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
  5300. .read = mem_cgroup_read,
  5301. .register_event = mem_cgroup_usage_register_event,
  5302. .unregister_event = mem_cgroup_usage_unregister_event,
  5303. },
  5304. {
  5305. .name = "memsw.max_usage_in_bytes",
  5306. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
  5307. .trigger = mem_cgroup_reset,
  5308. .read = mem_cgroup_read,
  5309. },
  5310. {
  5311. .name = "memsw.limit_in_bytes",
  5312. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
  5313. .write_string = mem_cgroup_write,
  5314. .read = mem_cgroup_read,
  5315. },
  5316. {
  5317. .name = "memsw.failcnt",
  5318. .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
  5319. .trigger = mem_cgroup_reset,
  5320. .read = mem_cgroup_read,
  5321. },
  5322. { }, /* terminate */
  5323. };
  5324. #endif
  5325. static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  5326. {
  5327. struct mem_cgroup_per_node *pn;
  5328. struct mem_cgroup_per_zone *mz;
  5329. int zone, tmp = node;
  5330. /*
  5331. * This routine is called against possible nodes.
  5332. * But it's BUG to call kmalloc() against offline node.
  5333. *
  5334. * TODO: this routine can waste much memory for nodes which will
  5335. * never be onlined. It's better to use memory hotplug callback
  5336. * function.
  5337. */
  5338. if (!node_state(node, N_NORMAL_MEMORY))
  5339. tmp = -1;
  5340. pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
  5341. if (!pn)
  5342. return 1;
  5343. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  5344. mz = &pn->zoneinfo[zone];
  5345. lruvec_init(&mz->lruvec);
  5346. mz->usage_in_excess = 0;
  5347. mz->on_tree = false;
  5348. mz->memcg = memcg;
  5349. }
  5350. memcg->nodeinfo[node] = pn;
  5351. return 0;
  5352. }
  5353. static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  5354. {
  5355. kfree(memcg->nodeinfo[node]);
  5356. }
  5357. static struct mem_cgroup *mem_cgroup_alloc(void)
  5358. {
  5359. struct mem_cgroup *memcg;
  5360. size_t size = memcg_size();
  5361. /* Can be very big if nr_node_ids is very big */
  5362. if (size < PAGE_SIZE)
  5363. memcg = kzalloc(size, GFP_KERNEL);
  5364. else
  5365. memcg = vzalloc(size);
  5366. if (!memcg)
  5367. return NULL;
  5368. memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
  5369. if (!memcg->stat)
  5370. goto out_free;
  5371. spin_lock_init(&memcg->pcp_counter_lock);
  5372. return memcg;
  5373. out_free:
  5374. if (size < PAGE_SIZE)
  5375. kfree(memcg);
  5376. else
  5377. vfree(memcg);
  5378. return NULL;
  5379. }
  5380. /*
  5381. * At destroying mem_cgroup, references from swap_cgroup can remain.
  5382. * (scanning all at force_empty is too costly...)
  5383. *
  5384. * Instead of clearing all references at force_empty, we remember
  5385. * the number of reference from swap_cgroup and free mem_cgroup when
  5386. * it goes down to 0.
  5387. *
  5388. * Removal of cgroup itself succeeds regardless of refs from swap.
  5389. */
  5390. static void __mem_cgroup_free(struct mem_cgroup *memcg)
  5391. {
  5392. int node;
  5393. size_t size = memcg_size();
  5394. mem_cgroup_remove_from_trees(memcg);
  5395. free_css_id(&mem_cgroup_subsys, &memcg->css);
  5396. for_each_node(node)
  5397. free_mem_cgroup_per_zone_info(memcg, node);
  5398. free_percpu(memcg->stat);
  5399. /*
  5400. * We need to make sure that (at least for now), the jump label
  5401. * destruction code runs outside of the cgroup lock. This is because
  5402. * get_online_cpus(), which is called from the static_branch update,
  5403. * can't be called inside the cgroup_lock. cpusets are the ones
  5404. * enforcing this dependency, so if they ever change, we might as well.
  5405. *
  5406. * schedule_work() will guarantee this happens. Be careful if you need
  5407. * to move this code around, and make sure it is outside
  5408. * the cgroup_lock.
  5409. */
  5410. disarm_static_keys(memcg);
  5411. if (size < PAGE_SIZE)
  5412. kfree(memcg);
  5413. else
  5414. vfree(memcg);
  5415. }
  5416. /*
  5417. * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  5418. */
  5419. struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  5420. {
  5421. if (!memcg->res.parent)
  5422. return NULL;
  5423. return mem_cgroup_from_res_counter(memcg->res.parent, res);
  5424. }
  5425. EXPORT_SYMBOL(parent_mem_cgroup);
  5426. static void __init mem_cgroup_soft_limit_tree_init(void)
  5427. {
  5428. struct mem_cgroup_tree_per_node *rtpn;
  5429. struct mem_cgroup_tree_per_zone *rtpz;
  5430. int tmp, node, zone;
  5431. for_each_node(node) {
  5432. tmp = node;
  5433. if (!node_state(node, N_NORMAL_MEMORY))
  5434. tmp = -1;
  5435. rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
  5436. BUG_ON(!rtpn);
  5437. soft_limit_tree.rb_tree_per_node[node] = rtpn;
  5438. for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  5439. rtpz = &rtpn->rb_tree_per_zone[zone];
  5440. rtpz->rb_root = RB_ROOT;
  5441. spin_lock_init(&rtpz->lock);
  5442. }
  5443. }
  5444. }
  5445. static struct cgroup_subsys_state * __ref
  5446. mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
  5447. {
  5448. struct mem_cgroup *memcg;
  5449. long error = -ENOMEM;
  5450. int node;
  5451. memcg = mem_cgroup_alloc();
  5452. if (!memcg)
  5453. return ERR_PTR(error);
  5454. for_each_node(node)
  5455. if (alloc_mem_cgroup_per_zone_info(memcg, node))
  5456. goto free_out;
  5457. /* root ? */
  5458. if (parent_css == NULL) {
  5459. root_mem_cgroup = memcg;
  5460. res_counter_init(&memcg->res, NULL);
  5461. res_counter_init(&memcg->memsw, NULL);
  5462. res_counter_init(&memcg->kmem, NULL);
  5463. }
  5464. memcg->last_scanned_node = MAX_NUMNODES;
  5465. INIT_LIST_HEAD(&memcg->oom_notify);
  5466. memcg->move_charge_at_immigrate = 0;
  5467. mutex_init(&memcg->thresholds_lock);
  5468. spin_lock_init(&memcg->move_lock);
  5469. vmpressure_init(&memcg->vmpressure);
  5470. return &memcg->css;
  5471. free_out:
  5472. __mem_cgroup_free(memcg);
  5473. return ERR_PTR(error);
  5474. }
  5475. static int
  5476. mem_cgroup_css_online(struct cgroup_subsys_state *css)
  5477. {
  5478. struct mem_cgroup *memcg = mem_cgroup_from_css(css);
  5479. struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
  5480. int error = 0;
  5481. if (!parent)
  5482. return 0;
  5483. mutex_lock(&memcg_create_mutex);
  5484. memcg->use_hierarchy = parent->use_hierarchy;
  5485. memcg->oom_kill_disable = parent->oom_kill_disable;
  5486. memcg->swappiness = mem_cgroup_swappiness(parent);
  5487. if (parent->use_hierarchy) {
  5488. res_counter_init(&memcg->res, &parent->res);
  5489. res_counter_init(&memcg->memsw, &parent->memsw);
  5490. res_counter_init(&memcg->kmem, &parent->kmem);
  5491. /*
  5492. * No need to take a reference to the parent because cgroup
  5493. * core guarantees its existence.
  5494. */
  5495. } else {
  5496. res_counter_init(&memcg->res, NULL);
  5497. res_counter_init(&memcg->memsw, NULL);
  5498. res_counter_init(&memcg->kmem, NULL);
  5499. /*
  5500. * Deeper hierachy with use_hierarchy == false doesn't make
  5501. * much sense so let cgroup subsystem know about this
  5502. * unfortunate state in our controller.
  5503. */
  5504. if (parent != root_mem_cgroup)
  5505. mem_cgroup_subsys.broken_hierarchy = true;
  5506. }
  5507. error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
  5508. mutex_unlock(&memcg_create_mutex);
  5509. return error;
  5510. }
  5511. /*
  5512. * Announce all parents that a group from their hierarchy is gone.
  5513. */
  5514. static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
  5515. {
  5516. struct mem_cgroup *parent = memcg;
  5517. while ((parent = parent_mem_cgroup(parent)))
  5518. mem_cgroup_iter_invalidate(parent);
  5519. /*
  5520. * if the root memcg is not hierarchical we have to check it
  5521. * explicitely.
  5522. */
  5523. if (!root_mem_cgroup->use_hierarchy)
  5524. mem_cgroup_iter_invalidate(root_mem_cgroup);
  5525. }
  5526. static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
  5527. {
  5528. struct mem_cgroup *memcg = mem_cgroup_from_css(css);
  5529. kmem_cgroup_css_offline(memcg);
  5530. mem_cgroup_invalidate_reclaim_iterators(memcg);
  5531. mem_cgroup_reparent_charges(memcg);
  5532. mem_cgroup_destroy_all_caches(memcg);
  5533. }
  5534. static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
  5535. {
  5536. struct mem_cgroup *memcg = mem_cgroup_from_css(css);
  5537. memcg_destroy_kmem(memcg);
  5538. __mem_cgroup_free(memcg);
  5539. }
  5540. #ifdef CONFIG_MMU
  5541. /* Handlers for move charge at task migration. */
  5542. #define PRECHARGE_COUNT_AT_ONCE 256
  5543. static int mem_cgroup_do_precharge(unsigned long count)
  5544. {
  5545. int ret = 0;
  5546. int batch_count = PRECHARGE_COUNT_AT_ONCE;
  5547. struct mem_cgroup *memcg = mc.to;
  5548. if (mem_cgroup_is_root(memcg)) {
  5549. mc.precharge += count;
  5550. /* we don't need css_get for root */
  5551. return ret;
  5552. }
  5553. /* try to charge at once */
  5554. if (count > 1) {
  5555. struct res_counter *dummy;
  5556. /*
  5557. * "memcg" cannot be under rmdir() because we've already checked
  5558. * by cgroup_lock_live_cgroup() that it is not removed and we
  5559. * are still under the same cgroup_mutex. So we can postpone
  5560. * css_get().
  5561. */
  5562. if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
  5563. goto one_by_one;
  5564. if (do_swap_account && res_counter_charge(&memcg->memsw,
  5565. PAGE_SIZE * count, &dummy)) {
  5566. res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
  5567. goto one_by_one;
  5568. }
  5569. mc.precharge += count;
  5570. return ret;
  5571. }
  5572. one_by_one:
  5573. /* fall back to one by one charge */
  5574. while (count--) {
  5575. if (signal_pending(current)) {
  5576. ret = -EINTR;
  5577. break;
  5578. }
  5579. if (!batch_count--) {
  5580. batch_count = PRECHARGE_COUNT_AT_ONCE;
  5581. cond_resched();
  5582. }
  5583. ret = __mem_cgroup_try_charge(NULL,
  5584. GFP_KERNEL, 1, &memcg, false);
  5585. if (ret)
  5586. /* mem_cgroup_clear_mc() will do uncharge later */
  5587. return ret;
  5588. mc.precharge++;
  5589. }
  5590. return ret;
  5591. }
  5592. /**
  5593. * get_mctgt_type - get target type of moving charge
  5594. * @vma: the vma the pte to be checked belongs
  5595. * @addr: the address corresponding to the pte to be checked
  5596. * @ptent: the pte to be checked
  5597. * @target: the pointer the target page or swap ent will be stored(can be NULL)
  5598. *
  5599. * Returns
  5600. * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
  5601. * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
  5602. * move charge. if @target is not NULL, the page is stored in target->page
  5603. * with extra refcnt got(Callers should handle it).
  5604. * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
  5605. * target for charge migration. if @target is not NULL, the entry is stored
  5606. * in target->ent.
  5607. *
  5608. * Called with pte lock held.
  5609. */
  5610. union mc_target {
  5611. struct page *page;
  5612. swp_entry_t ent;
  5613. };
  5614. enum mc_target_type {
  5615. MC_TARGET_NONE = 0,
  5616. MC_TARGET_PAGE,
  5617. MC_TARGET_SWAP,
  5618. };
  5619. static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
  5620. unsigned long addr, pte_t ptent)
  5621. {
  5622. struct page *page = vm_normal_page(vma, addr, ptent);
  5623. if (!page || !page_mapped(page))
  5624. return NULL;
  5625. if (PageAnon(page)) {
  5626. /* we don't move shared anon */
  5627. if (!move_anon())
  5628. return NULL;
  5629. } else if (!move_file())
  5630. /* we ignore mapcount for file pages */
  5631. return NULL;
  5632. if (!get_page_unless_zero(page))
  5633. return NULL;
  5634. return page;
  5635. }
  5636. #ifdef CONFIG_SWAP
  5637. static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  5638. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  5639. {
  5640. struct page *page = NULL;
  5641. swp_entry_t ent = pte_to_swp_entry(ptent);
  5642. if (!move_anon() || non_swap_entry(ent))
  5643. return NULL;
  5644. /*
  5645. * Because lookup_swap_cache() updates some statistics counter,
  5646. * we call find_get_page() with swapper_space directly.
  5647. */
  5648. page = find_get_page(swap_address_space(ent), ent.val);
  5649. if (do_swap_account)
  5650. entry->val = ent.val;
  5651. return page;
  5652. }
  5653. #else
  5654. static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  5655. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  5656. {
  5657. return NULL;
  5658. }
  5659. #endif
  5660. static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
  5661. unsigned long addr, pte_t ptent, swp_entry_t *entry)
  5662. {
  5663. struct page *page = NULL;
  5664. struct address_space *mapping;
  5665. pgoff_t pgoff;
  5666. if (!vma->vm_file) /* anonymous vma */
  5667. return NULL;
  5668. if (!move_file())
  5669. return NULL;
  5670. mapping = vma->vm_file->f_mapping;
  5671. if (pte_none(ptent))
  5672. pgoff = linear_page_index(vma, addr);
  5673. else /* pte_file(ptent) is true */
  5674. pgoff = pte_to_pgoff(ptent);
  5675. /* page is moved even if it's not RSS of this task(page-faulted). */
  5676. page = find_get_page(mapping, pgoff);
  5677. #ifdef CONFIG_SWAP
  5678. /* shmem/tmpfs may report page out on swap: account for that too. */
  5679. if (radix_tree_exceptional_entry(page)) {
  5680. swp_entry_t swap = radix_to_swp_entry(page);
  5681. if (do_swap_account)
  5682. *entry = swap;
  5683. page = find_get_page(swap_address_space(swap), swap.val);
  5684. }
  5685. #endif
  5686. return page;
  5687. }
  5688. static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
  5689. unsigned long addr, pte_t ptent, union mc_target *target)
  5690. {
  5691. struct page *page = NULL;
  5692. struct page_cgroup *pc;
  5693. enum mc_target_type ret = MC_TARGET_NONE;
  5694. swp_entry_t ent = { .val = 0 };
  5695. if (pte_present(ptent))
  5696. page = mc_handle_present_pte(vma, addr, ptent);
  5697. else if (is_swap_pte(ptent))
  5698. page = mc_handle_swap_pte(vma, addr, ptent, &ent);
  5699. else if (pte_none(ptent) || pte_file(ptent))
  5700. page = mc_handle_file_pte(vma, addr, ptent, &ent);
  5701. if (!page && !ent.val)
  5702. return ret;
  5703. if (page) {
  5704. pc = lookup_page_cgroup(page);
  5705. /*
  5706. * Do only loose check w/o page_cgroup lock.
  5707. * mem_cgroup_move_account() checks the pc is valid or not under
  5708. * the lock.
  5709. */
  5710. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  5711. ret = MC_TARGET_PAGE;
  5712. if (target)
  5713. target->page = page;
  5714. }
  5715. if (!ret || !target)
  5716. put_page(page);
  5717. }
  5718. /* There is a swap entry and a page doesn't exist or isn't charged */
  5719. if (ent.val && !ret &&
  5720. css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
  5721. ret = MC_TARGET_SWAP;
  5722. if (target)
  5723. target->ent = ent;
  5724. }
  5725. return ret;
  5726. }
  5727. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  5728. /*
  5729. * We don't consider swapping or file mapped pages because THP does not
  5730. * support them for now.
  5731. * Caller should make sure that pmd_trans_huge(pmd) is true.
  5732. */
  5733. static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
  5734. unsigned long addr, pmd_t pmd, union mc_target *target)
  5735. {
  5736. struct page *page = NULL;
  5737. struct page_cgroup *pc;
  5738. enum mc_target_type ret = MC_TARGET_NONE;
  5739. page = pmd_page(pmd);
  5740. VM_BUG_ON(!page || !PageHead(page));
  5741. if (!move_anon())
  5742. return ret;
  5743. pc = lookup_page_cgroup(page);
  5744. if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  5745. ret = MC_TARGET_PAGE;
  5746. if (target) {
  5747. get_page(page);
  5748. target->page = page;
  5749. }
  5750. }
  5751. return ret;
  5752. }
  5753. #else
  5754. static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
  5755. unsigned long addr, pmd_t pmd, union mc_target *target)
  5756. {
  5757. return MC_TARGET_NONE;
  5758. }
  5759. #endif
  5760. static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
  5761. unsigned long addr, unsigned long end,
  5762. struct mm_walk *walk)
  5763. {
  5764. struct vm_area_struct *vma = walk->private;
  5765. pte_t *pte;
  5766. spinlock_t *ptl;
  5767. if (pmd_trans_huge_lock(pmd, vma) == 1) {
  5768. if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
  5769. mc.precharge += HPAGE_PMD_NR;
  5770. spin_unlock(&vma->vm_mm->page_table_lock);
  5771. return 0;
  5772. }
  5773. if (pmd_trans_unstable(pmd))
  5774. return 0;
  5775. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  5776. for (; addr != end; pte++, addr += PAGE_SIZE)
  5777. if (get_mctgt_type(vma, addr, *pte, NULL))
  5778. mc.precharge++; /* increment precharge temporarily */
  5779. pte_unmap_unlock(pte - 1, ptl);
  5780. cond_resched();
  5781. return 0;
  5782. }
  5783. static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
  5784. {
  5785. unsigned long precharge;
  5786. struct vm_area_struct *vma;
  5787. down_read(&mm->mmap_sem);
  5788. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  5789. struct mm_walk mem_cgroup_count_precharge_walk = {
  5790. .pmd_entry = mem_cgroup_count_precharge_pte_range,
  5791. .mm = mm,
  5792. .private = vma,
  5793. };
  5794. if (is_vm_hugetlb_page(vma))
  5795. continue;
  5796. walk_page_range(vma->vm_start, vma->vm_end,
  5797. &mem_cgroup_count_precharge_walk);
  5798. }
  5799. up_read(&mm->mmap_sem);
  5800. precharge = mc.precharge;
  5801. mc.precharge = 0;
  5802. return precharge;
  5803. }
  5804. static int mem_cgroup_precharge_mc(struct mm_struct *mm)
  5805. {
  5806. unsigned long precharge = mem_cgroup_count_precharge(mm);
  5807. VM_BUG_ON(mc.moving_task);
  5808. mc.moving_task = current;
  5809. return mem_cgroup_do_precharge(precharge);
  5810. }
  5811. /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
  5812. static void __mem_cgroup_clear_mc(void)
  5813. {
  5814. struct mem_cgroup *from = mc.from;
  5815. struct mem_cgroup *to = mc.to;
  5816. int i;
  5817. /* we must uncharge all the leftover precharges from mc.to */
  5818. if (mc.precharge) {
  5819. __mem_cgroup_cancel_charge(mc.to, mc.precharge);
  5820. mc.precharge = 0;
  5821. }
  5822. /*
  5823. * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
  5824. * we must uncharge here.
  5825. */
  5826. if (mc.moved_charge) {
  5827. __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
  5828. mc.moved_charge = 0;
  5829. }
  5830. /* we must fixup refcnts and charges */
  5831. if (mc.moved_swap) {
  5832. /* uncharge swap account from the old cgroup */
  5833. if (!mem_cgroup_is_root(mc.from))
  5834. res_counter_uncharge(&mc.from->memsw,
  5835. PAGE_SIZE * mc.moved_swap);
  5836. for (i = 0; i < mc.moved_swap; i++)
  5837. css_put(&mc.from->css);
  5838. if (!mem_cgroup_is_root(mc.to)) {
  5839. /*
  5840. * we charged both to->res and to->memsw, so we should
  5841. * uncharge to->res.
  5842. */
  5843. res_counter_uncharge(&mc.to->res,
  5844. PAGE_SIZE * mc.moved_swap);
  5845. }
  5846. /* we've already done css_get(mc.to) */
  5847. mc.moved_swap = 0;
  5848. }
  5849. memcg_oom_recover(from);
  5850. memcg_oom_recover(to);
  5851. wake_up_all(&mc.waitq);
  5852. }
  5853. static void mem_cgroup_clear_mc(void)
  5854. {
  5855. struct mem_cgroup *from = mc.from;
  5856. /*
  5857. * we must clear moving_task before waking up waiters at the end of
  5858. * task migration.
  5859. */
  5860. mc.moving_task = NULL;
  5861. __mem_cgroup_clear_mc();
  5862. spin_lock(&mc.lock);
  5863. mc.from = NULL;
  5864. mc.to = NULL;
  5865. spin_unlock(&mc.lock);
  5866. mem_cgroup_end_move(from);
  5867. }
  5868. static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
  5869. struct cgroup_taskset *tset)
  5870. {
  5871. struct task_struct *p = cgroup_taskset_first(tset);
  5872. int ret = 0;
  5873. struct mem_cgroup *memcg = mem_cgroup_from_css(css);
  5874. unsigned long move_charge_at_immigrate;
  5875. /*
  5876. * We are now commited to this value whatever it is. Changes in this
  5877. * tunable will only affect upcoming migrations, not the current one.
  5878. * So we need to save it, and keep it going.
  5879. */
  5880. move_charge_at_immigrate = memcg->move_charge_at_immigrate;
  5881. if (move_charge_at_immigrate) {
  5882. struct mm_struct *mm;
  5883. struct mem_cgroup *from = mem_cgroup_from_task(p);
  5884. VM_BUG_ON(from == memcg);
  5885. mm = get_task_mm(p);
  5886. if (!mm)
  5887. return 0;
  5888. /* We move charges only when we move a owner of the mm */
  5889. if (mm->owner == p) {
  5890. VM_BUG_ON(mc.from);
  5891. VM_BUG_ON(mc.to);
  5892. VM_BUG_ON(mc.precharge);
  5893. VM_BUG_ON(mc.moved_charge);
  5894. VM_BUG_ON(mc.moved_swap);
  5895. mem_cgroup_start_move(from);
  5896. spin_lock(&mc.lock);
  5897. mc.from = from;
  5898. mc.to = memcg;
  5899. mc.immigrate_flags = move_charge_at_immigrate;
  5900. spin_unlock(&mc.lock);
  5901. /* We set mc.moving_task later */
  5902. ret = mem_cgroup_precharge_mc(mm);
  5903. if (ret)
  5904. mem_cgroup_clear_mc();
  5905. }
  5906. mmput(mm);
  5907. }
  5908. return ret;
  5909. }
  5910. static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
  5911. struct cgroup_taskset *tset)
  5912. {
  5913. mem_cgroup_clear_mc();
  5914. }
  5915. static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
  5916. unsigned long addr, unsigned long end,
  5917. struct mm_walk *walk)
  5918. {
  5919. int ret = 0;
  5920. struct vm_area_struct *vma = walk->private;
  5921. pte_t *pte;
  5922. spinlock_t *ptl;
  5923. enum mc_target_type target_type;
  5924. union mc_target target;
  5925. struct page *page;
  5926. struct page_cgroup *pc;
  5927. /*
  5928. * We don't take compound_lock() here but no race with splitting thp
  5929. * happens because:
  5930. * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
  5931. * under splitting, which means there's no concurrent thp split,
  5932. * - if another thread runs into split_huge_page() just after we
  5933. * entered this if-block, the thread must wait for page table lock
  5934. * to be unlocked in __split_huge_page_splitting(), where the main
  5935. * part of thp split is not executed yet.
  5936. */
  5937. if (pmd_trans_huge_lock(pmd, vma) == 1) {
  5938. if (mc.precharge < HPAGE_PMD_NR) {
  5939. spin_unlock(&vma->vm_mm->page_table_lock);
  5940. return 0;
  5941. }
  5942. target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
  5943. if (target_type == MC_TARGET_PAGE) {
  5944. page = target.page;
  5945. if (!isolate_lru_page(page)) {
  5946. pc = lookup_page_cgroup(page);
  5947. if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
  5948. pc, mc.from, mc.to)) {
  5949. mc.precharge -= HPAGE_PMD_NR;
  5950. mc.moved_charge += HPAGE_PMD_NR;
  5951. }
  5952. putback_lru_page(page);
  5953. }
  5954. put_page(page);
  5955. }
  5956. spin_unlock(&vma->vm_mm->page_table_lock);
  5957. return 0;
  5958. }
  5959. if (pmd_trans_unstable(pmd))
  5960. return 0;
  5961. retry:
  5962. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  5963. for (; addr != end; addr += PAGE_SIZE) {
  5964. pte_t ptent = *(pte++);
  5965. swp_entry_t ent;
  5966. if (!mc.precharge)
  5967. break;
  5968. switch (get_mctgt_type(vma, addr, ptent, &target)) {
  5969. case MC_TARGET_PAGE:
  5970. page = target.page;
  5971. if (isolate_lru_page(page))
  5972. goto put;
  5973. pc = lookup_page_cgroup(page);
  5974. if (!mem_cgroup_move_account(page, 1, pc,
  5975. mc.from, mc.to)) {
  5976. mc.precharge--;
  5977. /* we uncharge from mc.from later. */
  5978. mc.moved_charge++;
  5979. }
  5980. putback_lru_page(page);
  5981. put: /* get_mctgt_type() gets the page */
  5982. put_page(page);
  5983. break;
  5984. case MC_TARGET_SWAP:
  5985. ent = target.ent;
  5986. if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
  5987. mc.precharge--;
  5988. /* we fixup refcnts and charges later. */
  5989. mc.moved_swap++;
  5990. }
  5991. break;
  5992. default:
  5993. break;
  5994. }
  5995. }
  5996. pte_unmap_unlock(pte - 1, ptl);
  5997. cond_resched();
  5998. if (addr != end) {
  5999. /*
  6000. * We have consumed all precharges we got in can_attach().
  6001. * We try charge one by one, but don't do any additional
  6002. * charges to mc.to if we have failed in charge once in attach()
  6003. * phase.
  6004. */
  6005. ret = mem_cgroup_do_precharge(1);
  6006. if (!ret)
  6007. goto retry;
  6008. }
  6009. return ret;
  6010. }
  6011. static void mem_cgroup_move_charge(struct mm_struct *mm)
  6012. {
  6013. struct vm_area_struct *vma;
  6014. lru_add_drain_all();
  6015. retry:
  6016. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  6017. /*
  6018. * Someone who are holding the mmap_sem might be waiting in
  6019. * waitq. So we cancel all extra charges, wake up all waiters,
  6020. * and retry. Because we cancel precharges, we might not be able
  6021. * to move enough charges, but moving charge is a best-effort
  6022. * feature anyway, so it wouldn't be a big problem.
  6023. */
  6024. __mem_cgroup_clear_mc();
  6025. cond_resched();
  6026. goto retry;
  6027. }
  6028. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  6029. int ret;
  6030. struct mm_walk mem_cgroup_move_charge_walk = {
  6031. .pmd_entry = mem_cgroup_move_charge_pte_range,
  6032. .mm = mm,
  6033. .private = vma,
  6034. };
  6035. if (is_vm_hugetlb_page(vma))
  6036. continue;
  6037. ret = walk_page_range(vma->vm_start, vma->vm_end,
  6038. &mem_cgroup_move_charge_walk);
  6039. if (ret)
  6040. /*
  6041. * means we have consumed all precharges and failed in
  6042. * doing additional charge. Just abandon here.
  6043. */
  6044. break;
  6045. }
  6046. up_read(&mm->mmap_sem);
  6047. }
  6048. static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
  6049. struct cgroup_taskset *tset)
  6050. {
  6051. struct task_struct *p = cgroup_taskset_first(tset);
  6052. struct mm_struct *mm = get_task_mm(p);
  6053. if (mm) {
  6054. if (mc.to)
  6055. mem_cgroup_move_charge(mm);
  6056. mmput(mm);
  6057. }
  6058. if (mc.to)
  6059. mem_cgroup_clear_mc();
  6060. }
  6061. #else /* !CONFIG_MMU */
  6062. static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
  6063. struct cgroup_taskset *tset)
  6064. {
  6065. return 0;
  6066. }
  6067. static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
  6068. struct cgroup_taskset *tset)
  6069. {
  6070. }
  6071. static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
  6072. struct cgroup_taskset *tset)
  6073. {
  6074. }
  6075. #endif
  6076. /*
  6077. * Cgroup retains root cgroups across [un]mount cycles making it necessary
  6078. * to verify sane_behavior flag on each mount attempt.
  6079. */
  6080. static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
  6081. {
  6082. /*
  6083. * use_hierarchy is forced with sane_behavior. cgroup core
  6084. * guarantees that @root doesn't have any children, so turning it
  6085. * on for the root memcg is enough.
  6086. */
  6087. if (cgroup_sane_behavior(root_css->cgroup))
  6088. mem_cgroup_from_css(root_css)->use_hierarchy = true;
  6089. }
  6090. struct cgroup_subsys mem_cgroup_subsys = {
  6091. .name = "memory",
  6092. .subsys_id = mem_cgroup_subsys_id,
  6093. .css_alloc = mem_cgroup_css_alloc,
  6094. .css_online = mem_cgroup_css_online,
  6095. .css_offline = mem_cgroup_css_offline,
  6096. .css_free = mem_cgroup_css_free,
  6097. .can_attach = mem_cgroup_can_attach,
  6098. .cancel_attach = mem_cgroup_cancel_attach,
  6099. .attach = mem_cgroup_move_task,
  6100. .bind = mem_cgroup_bind,
  6101. .base_cftypes = mem_cgroup_files,
  6102. .early_init = 0,
  6103. .use_id = 1,
  6104. };
  6105. #ifdef CONFIG_MEMCG_SWAP
  6106. static int __init enable_swap_account(char *s)
  6107. {
  6108. /* consider enabled if no parameter or 1 is given */
  6109. if (!strcmp(s, "1"))
  6110. really_do_swap_account = 1;
  6111. else if (!strcmp(s, "0"))
  6112. really_do_swap_account = 0;
  6113. return 1;
  6114. }
  6115. __setup("swapaccount=", enable_swap_account);
  6116. static void __init memsw_file_init(void)
  6117. {
  6118. WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
  6119. }
  6120. static void __init enable_swap_cgroup(void)
  6121. {
  6122. if (!mem_cgroup_disabled() && really_do_swap_account) {
  6123. do_swap_account = 1;
  6124. memsw_file_init();
  6125. }
  6126. }
  6127. #else
  6128. static void __init enable_swap_cgroup(void)
  6129. {
  6130. }
  6131. #endif
  6132. /*
  6133. * subsys_initcall() for memory controller.
  6134. *
  6135. * Some parts like hotcpu_notifier() have to be initialized from this context
  6136. * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
  6137. * everything that doesn't depend on a specific mem_cgroup structure should
  6138. * be initialized from here.
  6139. */
  6140. static int __init mem_cgroup_init(void)
  6141. {
  6142. hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
  6143. enable_swap_cgroup();
  6144. mem_cgroup_soft_limit_tree_init();
  6145. memcg_stock_init();
  6146. return 0;
  6147. }
  6148. subsys_initcall(mem_cgroup_init);