cgroup.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967
  1. /*
  2. * Generic process-grouping system.
  3. *
  4. * Based originally on the cpuset system, extracted by Paul Menage
  5. * Copyright (C) 2006 Google, Inc
  6. *
  7. * Notifications support
  8. * Copyright (C) 2009 Nokia Corporation
  9. * Author: Kirill A. Shutemov
  10. *
  11. * Copyright notices from the original cpuset code:
  12. * --------------------------------------------------
  13. * Copyright (C) 2003 BULL SA.
  14. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  15. *
  16. * Portions derived from Patrick Mochel's sysfs code.
  17. * sysfs is Copyright (c) 2001-3 Patrick Mochel
  18. *
  19. * 2003-10-10 Written by Simon Derr.
  20. * 2003-10-22 Updates by Stephen Hemminger.
  21. * 2004 May-July Rework by Paul Jackson.
  22. * ---------------------------------------------------
  23. *
  24. * This file is subject to the terms and conditions of the GNU General Public
  25. * License. See the file COPYING in the main directory of the Linux
  26. * distribution for more details.
  27. */
  28. #include <linux/cgroup.h>
  29. #include <linux/ctype.h>
  30. #include <linux/errno.h>
  31. #include <linux/fs.h>
  32. #include <linux/kernel.h>
  33. #include <linux/list.h>
  34. #include <linux/mm.h>
  35. #include <linux/mutex.h>
  36. #include <linux/mount.h>
  37. #include <linux/pagemap.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/rcupdate.h>
  40. #include <linux/sched.h>
  41. #include <linux/backing-dev.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/slab.h>
  44. #include <linux/magic.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/string.h>
  47. #include <linux/sort.h>
  48. #include <linux/kmod.h>
  49. #include <linux/module.h>
  50. #include <linux/delayacct.h>
  51. #include <linux/cgroupstats.h>
  52. #include <linux/hash.h>
  53. #include <linux/namei.h>
  54. #include <linux/pid_namespace.h>
  55. #include <linux/idr.h>
  56. #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
  57. #include <linux/eventfd.h>
  58. #include <linux/poll.h>
  59. #include <asm/atomic.h>
  60. static DEFINE_MUTEX(cgroup_mutex);
  61. /*
  62. * Generate an array of cgroup subsystem pointers. At boot time, this is
  63. * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
  64. * registered after that. The mutable section of this array is protected by
  65. * cgroup_mutex.
  66. */
  67. #define SUBSYS(_x) &_x ## _subsys,
  68. static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
  69. #include <linux/cgroup_subsys.h>
  70. };
  71. #define MAX_CGROUP_ROOT_NAMELEN 64
  72. /*
  73. * A cgroupfs_root represents the root of a cgroup hierarchy,
  74. * and may be associated with a superblock to form an active
  75. * hierarchy
  76. */
  77. struct cgroupfs_root {
  78. struct super_block *sb;
  79. /*
  80. * The bitmask of subsystems intended to be attached to this
  81. * hierarchy
  82. */
  83. unsigned long subsys_bits;
  84. /* Unique id for this hierarchy. */
  85. int hierarchy_id;
  86. /* The bitmask of subsystems currently attached to this hierarchy */
  87. unsigned long actual_subsys_bits;
  88. /* A list running through the attached subsystems */
  89. struct list_head subsys_list;
  90. /* The root cgroup for this hierarchy */
  91. struct cgroup top_cgroup;
  92. /* Tracks how many cgroups are currently defined in hierarchy.*/
  93. int number_of_cgroups;
  94. /* A list running through the active hierarchies */
  95. struct list_head root_list;
  96. /* Hierarchy-specific flags */
  97. unsigned long flags;
  98. /* The path to use for release notifications. */
  99. char release_agent_path[PATH_MAX];
  100. /* The name for this hierarchy - may be empty */
  101. char name[MAX_CGROUP_ROOT_NAMELEN];
  102. };
  103. /*
  104. * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
  105. * subsystems that are otherwise unattached - it never has more than a
  106. * single cgroup, and all tasks are part of that cgroup.
  107. */
  108. static struct cgroupfs_root rootnode;
  109. /*
  110. * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
  111. * cgroup_subsys->use_id != 0.
  112. */
  113. #define CSS_ID_MAX (65535)
  114. struct css_id {
  115. /*
  116. * The css to which this ID points. This pointer is set to valid value
  117. * after cgroup is populated. If cgroup is removed, this will be NULL.
  118. * This pointer is expected to be RCU-safe because destroy()
  119. * is called after synchronize_rcu(). But for safe use, css_is_removed()
  120. * css_tryget() should be used for avoiding race.
  121. */
  122. struct cgroup_subsys_state __rcu *css;
  123. /*
  124. * ID of this css.
  125. */
  126. unsigned short id;
  127. /*
  128. * Depth in hierarchy which this ID belongs to.
  129. */
  130. unsigned short depth;
  131. /*
  132. * ID is freed by RCU. (and lookup routine is RCU safe.)
  133. */
  134. struct rcu_head rcu_head;
  135. /*
  136. * Hierarchy of CSS ID belongs to.
  137. */
  138. unsigned short stack[0]; /* Array of Length (depth+1) */
  139. };
  140. /*
  141. * cgroup_event represents events which userspace want to recieve.
  142. */
  143. struct cgroup_event {
  144. /*
  145. * Cgroup which the event belongs to.
  146. */
  147. struct cgroup *cgrp;
  148. /*
  149. * Control file which the event associated.
  150. */
  151. struct cftype *cft;
  152. /*
  153. * eventfd to signal userspace about the event.
  154. */
  155. struct eventfd_ctx *eventfd;
  156. /*
  157. * Each of these stored in a list by the cgroup.
  158. */
  159. struct list_head list;
  160. /*
  161. * All fields below needed to unregister event when
  162. * userspace closes eventfd.
  163. */
  164. poll_table pt;
  165. wait_queue_head_t *wqh;
  166. wait_queue_t wait;
  167. struct work_struct remove;
  168. };
  169. /* The list of hierarchy roots */
  170. static LIST_HEAD(roots);
  171. static int root_count;
  172. static DEFINE_IDA(hierarchy_ida);
  173. static int next_hierarchy_id;
  174. static DEFINE_SPINLOCK(hierarchy_id_lock);
  175. /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
  176. #define dummytop (&rootnode.top_cgroup)
  177. /* This flag indicates whether tasks in the fork and exit paths should
  178. * check for fork/exit handlers to call. This avoids us having to do
  179. * extra work in the fork/exit path if none of the subsystems need to
  180. * be called.
  181. */
  182. static int need_forkexit_callback __read_mostly;
  183. #ifdef CONFIG_PROVE_LOCKING
  184. int cgroup_lock_is_held(void)
  185. {
  186. return lockdep_is_held(&cgroup_mutex);
  187. }
  188. #else /* #ifdef CONFIG_PROVE_LOCKING */
  189. int cgroup_lock_is_held(void)
  190. {
  191. return mutex_is_locked(&cgroup_mutex);
  192. }
  193. #endif /* #else #ifdef CONFIG_PROVE_LOCKING */
  194. EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
  195. /* convenient tests for these bits */
  196. inline int cgroup_is_removed(const struct cgroup *cgrp)
  197. {
  198. return test_bit(CGRP_REMOVED, &cgrp->flags);
  199. }
  200. /* bits in struct cgroupfs_root flags field */
  201. enum {
  202. ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
  203. };
  204. static int cgroup_is_releasable(const struct cgroup *cgrp)
  205. {
  206. const int bits =
  207. (1 << CGRP_RELEASABLE) |
  208. (1 << CGRP_NOTIFY_ON_RELEASE);
  209. return (cgrp->flags & bits) == bits;
  210. }
  211. static int notify_on_release(const struct cgroup *cgrp)
  212. {
  213. return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  214. }
  215. static int clone_children(const struct cgroup *cgrp)
  216. {
  217. return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  218. }
  219. /*
  220. * for_each_subsys() allows you to iterate on each subsystem attached to
  221. * an active hierarchy
  222. */
  223. #define for_each_subsys(_root, _ss) \
  224. list_for_each_entry(_ss, &_root->subsys_list, sibling)
  225. /* for_each_active_root() allows you to iterate across the active hierarchies */
  226. #define for_each_active_root(_root) \
  227. list_for_each_entry(_root, &roots, root_list)
  228. /* the list of cgroups eligible for automatic release. Protected by
  229. * release_list_lock */
  230. static LIST_HEAD(release_list);
  231. static DEFINE_SPINLOCK(release_list_lock);
  232. static void cgroup_release_agent(struct work_struct *work);
  233. static DECLARE_WORK(release_agent_work, cgroup_release_agent);
  234. static void check_for_release(struct cgroup *cgrp);
  235. /* Link structure for associating css_set objects with cgroups */
  236. struct cg_cgroup_link {
  237. /*
  238. * List running through cg_cgroup_links associated with a
  239. * cgroup, anchored on cgroup->css_sets
  240. */
  241. struct list_head cgrp_link_list;
  242. struct cgroup *cgrp;
  243. /*
  244. * List running through cg_cgroup_links pointing at a
  245. * single css_set object, anchored on css_set->cg_links
  246. */
  247. struct list_head cg_link_list;
  248. struct css_set *cg;
  249. };
  250. /* The default css_set - used by init and its children prior to any
  251. * hierarchies being mounted. It contains a pointer to the root state
  252. * for each subsystem. Also used to anchor the list of css_sets. Not
  253. * reference-counted, to improve performance when child cgroups
  254. * haven't been created.
  255. */
  256. static struct css_set init_css_set;
  257. static struct cg_cgroup_link init_css_set_link;
  258. static int cgroup_init_idr(struct cgroup_subsys *ss,
  259. struct cgroup_subsys_state *css);
  260. /* css_set_lock protects the list of css_set objects, and the
  261. * chain of tasks off each css_set. Nests outside task->alloc_lock
  262. * due to cgroup_iter_start() */
  263. static DEFINE_RWLOCK(css_set_lock);
  264. static int css_set_count;
  265. /*
  266. * hash table for cgroup groups. This improves the performance to find
  267. * an existing css_set. This hash doesn't (currently) take into
  268. * account cgroups in empty hierarchies.
  269. */
  270. #define CSS_SET_HASH_BITS 7
  271. #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
  272. static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
  273. static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
  274. {
  275. int i;
  276. int index;
  277. unsigned long tmp = 0UL;
  278. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
  279. tmp += (unsigned long)css[i];
  280. tmp = (tmp >> 16) ^ tmp;
  281. index = hash_long(tmp, CSS_SET_HASH_BITS);
  282. return &css_set_table[index];
  283. }
  284. static void free_css_set_rcu(struct rcu_head *obj)
  285. {
  286. struct css_set *cg = container_of(obj, struct css_set, rcu_head);
  287. kfree(cg);
  288. }
  289. /* We don't maintain the lists running through each css_set to its
  290. * task until after the first call to cgroup_iter_start(). This
  291. * reduces the fork()/exit() overhead for people who have cgroups
  292. * compiled into their kernel but not actually in use */
  293. static int use_task_css_set_links __read_mostly;
  294. static void __put_css_set(struct css_set *cg, int taskexit)
  295. {
  296. struct cg_cgroup_link *link;
  297. struct cg_cgroup_link *saved_link;
  298. /*
  299. * Ensure that the refcount doesn't hit zero while any readers
  300. * can see it. Similar to atomic_dec_and_lock(), but for an
  301. * rwlock
  302. */
  303. if (atomic_add_unless(&cg->refcount, -1, 1))
  304. return;
  305. write_lock(&css_set_lock);
  306. if (!atomic_dec_and_test(&cg->refcount)) {
  307. write_unlock(&css_set_lock);
  308. return;
  309. }
  310. /* This css_set is dead. unlink it and release cgroup refcounts */
  311. hlist_del(&cg->hlist);
  312. css_set_count--;
  313. list_for_each_entry_safe(link, saved_link, &cg->cg_links,
  314. cg_link_list) {
  315. struct cgroup *cgrp = link->cgrp;
  316. list_del(&link->cg_link_list);
  317. list_del(&link->cgrp_link_list);
  318. if (atomic_dec_and_test(&cgrp->count) &&
  319. notify_on_release(cgrp)) {
  320. if (taskexit)
  321. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  322. check_for_release(cgrp);
  323. }
  324. kfree(link);
  325. }
  326. write_unlock(&css_set_lock);
  327. call_rcu(&cg->rcu_head, free_css_set_rcu);
  328. }
  329. /*
  330. * refcounted get/put for css_set objects
  331. */
  332. static inline void get_css_set(struct css_set *cg)
  333. {
  334. atomic_inc(&cg->refcount);
  335. }
  336. static inline void put_css_set(struct css_set *cg)
  337. {
  338. __put_css_set(cg, 0);
  339. }
  340. static inline void put_css_set_taskexit(struct css_set *cg)
  341. {
  342. __put_css_set(cg, 1);
  343. }
  344. /*
  345. * compare_css_sets - helper function for find_existing_css_set().
  346. * @cg: candidate css_set being tested
  347. * @old_cg: existing css_set for a task
  348. * @new_cgrp: cgroup that's being entered by the task
  349. * @template: desired set of css pointers in css_set (pre-calculated)
  350. *
  351. * Returns true if "cg" matches "old_cg" except for the hierarchy
  352. * which "new_cgrp" belongs to, for which it should match "new_cgrp".
  353. */
  354. static bool compare_css_sets(struct css_set *cg,
  355. struct css_set *old_cg,
  356. struct cgroup *new_cgrp,
  357. struct cgroup_subsys_state *template[])
  358. {
  359. struct list_head *l1, *l2;
  360. if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
  361. /* Not all subsystems matched */
  362. return false;
  363. }
  364. /*
  365. * Compare cgroup pointers in order to distinguish between
  366. * different cgroups in heirarchies with no subsystems. We
  367. * could get by with just this check alone (and skip the
  368. * memcmp above) but on most setups the memcmp check will
  369. * avoid the need for this more expensive check on almost all
  370. * candidates.
  371. */
  372. l1 = &cg->cg_links;
  373. l2 = &old_cg->cg_links;
  374. while (1) {
  375. struct cg_cgroup_link *cgl1, *cgl2;
  376. struct cgroup *cg1, *cg2;
  377. l1 = l1->next;
  378. l2 = l2->next;
  379. /* See if we reached the end - both lists are equal length. */
  380. if (l1 == &cg->cg_links) {
  381. BUG_ON(l2 != &old_cg->cg_links);
  382. break;
  383. } else {
  384. BUG_ON(l2 == &old_cg->cg_links);
  385. }
  386. /* Locate the cgroups associated with these links. */
  387. cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
  388. cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
  389. cg1 = cgl1->cgrp;
  390. cg2 = cgl2->cgrp;
  391. /* Hierarchies should be linked in the same order. */
  392. BUG_ON(cg1->root != cg2->root);
  393. /*
  394. * If this hierarchy is the hierarchy of the cgroup
  395. * that's changing, then we need to check that this
  396. * css_set points to the new cgroup; if it's any other
  397. * hierarchy, then this css_set should point to the
  398. * same cgroup as the old css_set.
  399. */
  400. if (cg1->root == new_cgrp->root) {
  401. if (cg1 != new_cgrp)
  402. return false;
  403. } else {
  404. if (cg1 != cg2)
  405. return false;
  406. }
  407. }
  408. return true;
  409. }
  410. /*
  411. * find_existing_css_set() is a helper for
  412. * find_css_set(), and checks to see whether an existing
  413. * css_set is suitable.
  414. *
  415. * oldcg: the cgroup group that we're using before the cgroup
  416. * transition
  417. *
  418. * cgrp: the cgroup that we're moving into
  419. *
  420. * template: location in which to build the desired set of subsystem
  421. * state objects for the new cgroup group
  422. */
  423. static struct css_set *find_existing_css_set(
  424. struct css_set *oldcg,
  425. struct cgroup *cgrp,
  426. struct cgroup_subsys_state *template[])
  427. {
  428. int i;
  429. struct cgroupfs_root *root = cgrp->root;
  430. struct hlist_head *hhead;
  431. struct hlist_node *node;
  432. struct css_set *cg;
  433. /*
  434. * Build the set of subsystem state objects that we want to see in the
  435. * new css_set. while subsystems can change globally, the entries here
  436. * won't change, so no need for locking.
  437. */
  438. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  439. if (root->subsys_bits & (1UL << i)) {
  440. /* Subsystem is in this hierarchy. So we want
  441. * the subsystem state from the new
  442. * cgroup */
  443. template[i] = cgrp->subsys[i];
  444. } else {
  445. /* Subsystem is not in this hierarchy, so we
  446. * don't want to change the subsystem state */
  447. template[i] = oldcg->subsys[i];
  448. }
  449. }
  450. hhead = css_set_hash(template);
  451. hlist_for_each_entry(cg, node, hhead, hlist) {
  452. if (!compare_css_sets(cg, oldcg, cgrp, template))
  453. continue;
  454. /* This css_set matches what we need */
  455. return cg;
  456. }
  457. /* No existing cgroup group matched */
  458. return NULL;
  459. }
  460. static void free_cg_links(struct list_head *tmp)
  461. {
  462. struct cg_cgroup_link *link;
  463. struct cg_cgroup_link *saved_link;
  464. list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
  465. list_del(&link->cgrp_link_list);
  466. kfree(link);
  467. }
  468. }
  469. /*
  470. * allocate_cg_links() allocates "count" cg_cgroup_link structures
  471. * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
  472. * success or a negative error
  473. */
  474. static int allocate_cg_links(int count, struct list_head *tmp)
  475. {
  476. struct cg_cgroup_link *link;
  477. int i;
  478. INIT_LIST_HEAD(tmp);
  479. for (i = 0; i < count; i++) {
  480. link = kmalloc(sizeof(*link), GFP_KERNEL);
  481. if (!link) {
  482. free_cg_links(tmp);
  483. return -ENOMEM;
  484. }
  485. list_add(&link->cgrp_link_list, tmp);
  486. }
  487. return 0;
  488. }
  489. /**
  490. * link_css_set - a helper function to link a css_set to a cgroup
  491. * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
  492. * @cg: the css_set to be linked
  493. * @cgrp: the destination cgroup
  494. */
  495. static void link_css_set(struct list_head *tmp_cg_links,
  496. struct css_set *cg, struct cgroup *cgrp)
  497. {
  498. struct cg_cgroup_link *link;
  499. BUG_ON(list_empty(tmp_cg_links));
  500. link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
  501. cgrp_link_list);
  502. link->cg = cg;
  503. link->cgrp = cgrp;
  504. atomic_inc(&cgrp->count);
  505. list_move(&link->cgrp_link_list, &cgrp->css_sets);
  506. /*
  507. * Always add links to the tail of the list so that the list
  508. * is sorted by order of hierarchy creation
  509. */
  510. list_add_tail(&link->cg_link_list, &cg->cg_links);
  511. }
  512. /*
  513. * find_css_set() takes an existing cgroup group and a
  514. * cgroup object, and returns a css_set object that's
  515. * equivalent to the old group, but with the given cgroup
  516. * substituted into the appropriate hierarchy. Must be called with
  517. * cgroup_mutex held
  518. */
  519. static struct css_set *find_css_set(
  520. struct css_set *oldcg, struct cgroup *cgrp)
  521. {
  522. struct css_set *res;
  523. struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
  524. struct list_head tmp_cg_links;
  525. struct hlist_head *hhead;
  526. struct cg_cgroup_link *link;
  527. /* First see if we already have a cgroup group that matches
  528. * the desired set */
  529. read_lock(&css_set_lock);
  530. res = find_existing_css_set(oldcg, cgrp, template);
  531. if (res)
  532. get_css_set(res);
  533. read_unlock(&css_set_lock);
  534. if (res)
  535. return res;
  536. res = kmalloc(sizeof(*res), GFP_KERNEL);
  537. if (!res)
  538. return NULL;
  539. /* Allocate all the cg_cgroup_link objects that we'll need */
  540. if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
  541. kfree(res);
  542. return NULL;
  543. }
  544. atomic_set(&res->refcount, 1);
  545. INIT_LIST_HEAD(&res->cg_links);
  546. INIT_LIST_HEAD(&res->tasks);
  547. INIT_HLIST_NODE(&res->hlist);
  548. /* Copy the set of subsystem state objects generated in
  549. * find_existing_css_set() */
  550. memcpy(res->subsys, template, sizeof(res->subsys));
  551. write_lock(&css_set_lock);
  552. /* Add reference counts and links from the new css_set. */
  553. list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
  554. struct cgroup *c = link->cgrp;
  555. if (c->root == cgrp->root)
  556. c = cgrp;
  557. link_css_set(&tmp_cg_links, res, c);
  558. }
  559. BUG_ON(!list_empty(&tmp_cg_links));
  560. css_set_count++;
  561. /* Add this cgroup group to the hash table */
  562. hhead = css_set_hash(res->subsys);
  563. hlist_add_head(&res->hlist, hhead);
  564. write_unlock(&css_set_lock);
  565. return res;
  566. }
  567. /*
  568. * Return the cgroup for "task" from the given hierarchy. Must be
  569. * called with cgroup_mutex held.
  570. */
  571. static struct cgroup *task_cgroup_from_root(struct task_struct *task,
  572. struct cgroupfs_root *root)
  573. {
  574. struct css_set *css;
  575. struct cgroup *res = NULL;
  576. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  577. read_lock(&css_set_lock);
  578. /*
  579. * No need to lock the task - since we hold cgroup_mutex the
  580. * task can't change groups, so the only thing that can happen
  581. * is that it exits and its css is set back to init_css_set.
  582. */
  583. css = task->cgroups;
  584. if (css == &init_css_set) {
  585. res = &root->top_cgroup;
  586. } else {
  587. struct cg_cgroup_link *link;
  588. list_for_each_entry(link, &css->cg_links, cg_link_list) {
  589. struct cgroup *c = link->cgrp;
  590. if (c->root == root) {
  591. res = c;
  592. break;
  593. }
  594. }
  595. }
  596. read_unlock(&css_set_lock);
  597. BUG_ON(!res);
  598. return res;
  599. }
  600. /*
  601. * There is one global cgroup mutex. We also require taking
  602. * task_lock() when dereferencing a task's cgroup subsys pointers.
  603. * See "The task_lock() exception", at the end of this comment.
  604. *
  605. * A task must hold cgroup_mutex to modify cgroups.
  606. *
  607. * Any task can increment and decrement the count field without lock.
  608. * So in general, code holding cgroup_mutex can't rely on the count
  609. * field not changing. However, if the count goes to zero, then only
  610. * cgroup_attach_task() can increment it again. Because a count of zero
  611. * means that no tasks are currently attached, therefore there is no
  612. * way a task attached to that cgroup can fork (the other way to
  613. * increment the count). So code holding cgroup_mutex can safely
  614. * assume that if the count is zero, it will stay zero. Similarly, if
  615. * a task holds cgroup_mutex on a cgroup with zero count, it
  616. * knows that the cgroup won't be removed, as cgroup_rmdir()
  617. * needs that mutex.
  618. *
  619. * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
  620. * (usually) take cgroup_mutex. These are the two most performance
  621. * critical pieces of code here. The exception occurs on cgroup_exit(),
  622. * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
  623. * is taken, and if the cgroup count is zero, a usermode call made
  624. * to the release agent with the name of the cgroup (path relative to
  625. * the root of cgroup file system) as the argument.
  626. *
  627. * A cgroup can only be deleted if both its 'count' of using tasks
  628. * is zero, and its list of 'children' cgroups is empty. Since all
  629. * tasks in the system use _some_ cgroup, and since there is always at
  630. * least one task in the system (init, pid == 1), therefore, top_cgroup
  631. * always has either children cgroups and/or using tasks. So we don't
  632. * need a special hack to ensure that top_cgroup cannot be deleted.
  633. *
  634. * The task_lock() exception
  635. *
  636. * The need for this exception arises from the action of
  637. * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
  638. * another. It does so using cgroup_mutex, however there are
  639. * several performance critical places that need to reference
  640. * task->cgroup without the expense of grabbing a system global
  641. * mutex. Therefore except as noted below, when dereferencing or, as
  642. * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
  643. * task_lock(), which acts on a spinlock (task->alloc_lock) already in
  644. * the task_struct routinely used for such matters.
  645. *
  646. * P.S. One more locking exception. RCU is used to guard the
  647. * update of a tasks cgroup pointer by cgroup_attach_task()
  648. */
  649. /**
  650. * cgroup_lock - lock out any changes to cgroup structures
  651. *
  652. */
  653. void cgroup_lock(void)
  654. {
  655. mutex_lock(&cgroup_mutex);
  656. }
  657. EXPORT_SYMBOL_GPL(cgroup_lock);
  658. /**
  659. * cgroup_unlock - release lock on cgroup changes
  660. *
  661. * Undo the lock taken in a previous cgroup_lock() call.
  662. */
  663. void cgroup_unlock(void)
  664. {
  665. mutex_unlock(&cgroup_mutex);
  666. }
  667. EXPORT_SYMBOL_GPL(cgroup_unlock);
  668. /*
  669. * A couple of forward declarations required, due to cyclic reference loop:
  670. * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
  671. * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
  672. * -> cgroup_mkdir.
  673. */
  674. static struct dentry *cgroup_lookup(struct inode *dir,
  675. struct dentry *dentry, struct nameidata *nd);
  676. static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
  677. static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
  678. static int cgroup_populate_dir(struct cgroup *cgrp);
  679. static const struct inode_operations cgroup_dir_inode_operations;
  680. static const struct file_operations proc_cgroupstats_operations;
  681. static struct backing_dev_info cgroup_backing_dev_info = {
  682. .name = "cgroup",
  683. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  684. };
  685. static int alloc_css_id(struct cgroup_subsys *ss,
  686. struct cgroup *parent, struct cgroup *child);
  687. static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
  688. {
  689. struct inode *inode = new_inode(sb);
  690. if (inode) {
  691. inode->i_ino = get_next_ino();
  692. inode->i_mode = mode;
  693. inode->i_uid = current_fsuid();
  694. inode->i_gid = current_fsgid();
  695. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  696. inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
  697. }
  698. return inode;
  699. }
  700. /*
  701. * Call subsys's pre_destroy handler.
  702. * This is called before css refcnt check.
  703. */
  704. static int cgroup_call_pre_destroy(struct cgroup *cgrp)
  705. {
  706. struct cgroup_subsys *ss;
  707. int ret = 0;
  708. for_each_subsys(cgrp->root, ss)
  709. if (ss->pre_destroy) {
  710. ret = ss->pre_destroy(ss, cgrp);
  711. if (ret)
  712. break;
  713. }
  714. return ret;
  715. }
  716. static void free_cgroup_rcu(struct rcu_head *obj)
  717. {
  718. struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
  719. kfree(cgrp);
  720. }
  721. static void cgroup_diput(struct dentry *dentry, struct inode *inode)
  722. {
  723. /* is dentry a directory ? if so, kfree() associated cgroup */
  724. if (S_ISDIR(inode->i_mode)) {
  725. struct cgroup *cgrp = dentry->d_fsdata;
  726. struct cgroup_subsys *ss;
  727. BUG_ON(!(cgroup_is_removed(cgrp)));
  728. /* It's possible for external users to be holding css
  729. * reference counts on a cgroup; css_put() needs to
  730. * be able to access the cgroup after decrementing
  731. * the reference count in order to know if it needs to
  732. * queue the cgroup to be handled by the release
  733. * agent */
  734. synchronize_rcu();
  735. mutex_lock(&cgroup_mutex);
  736. /*
  737. * Release the subsystem state objects.
  738. */
  739. for_each_subsys(cgrp->root, ss)
  740. ss->destroy(ss, cgrp);
  741. cgrp->root->number_of_cgroups--;
  742. mutex_unlock(&cgroup_mutex);
  743. /*
  744. * Drop the active superblock reference that we took when we
  745. * created the cgroup
  746. */
  747. deactivate_super(cgrp->root->sb);
  748. /*
  749. * if we're getting rid of the cgroup, refcount should ensure
  750. * that there are no pidlists left.
  751. */
  752. BUG_ON(!list_empty(&cgrp->pidlists));
  753. call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
  754. }
  755. iput(inode);
  756. }
  757. static void remove_dir(struct dentry *d)
  758. {
  759. struct dentry *parent = dget(d->d_parent);
  760. d_delete(d);
  761. simple_rmdir(parent->d_inode, d);
  762. dput(parent);
  763. }
  764. static void cgroup_clear_directory(struct dentry *dentry)
  765. {
  766. struct list_head *node;
  767. BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
  768. spin_lock(&dentry->d_lock);
  769. node = dentry->d_subdirs.next;
  770. while (node != &dentry->d_subdirs) {
  771. struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
  772. spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
  773. list_del_init(node);
  774. if (d->d_inode) {
  775. /* This should never be called on a cgroup
  776. * directory with child cgroups */
  777. BUG_ON(d->d_inode->i_mode & S_IFDIR);
  778. dget_dlock(d);
  779. spin_unlock(&d->d_lock);
  780. spin_unlock(&dentry->d_lock);
  781. d_delete(d);
  782. simple_unlink(dentry->d_inode, d);
  783. dput(d);
  784. spin_lock(&dentry->d_lock);
  785. } else
  786. spin_unlock(&d->d_lock);
  787. node = dentry->d_subdirs.next;
  788. }
  789. spin_unlock(&dentry->d_lock);
  790. }
  791. /*
  792. * NOTE : the dentry must have been dget()'ed
  793. */
  794. static void cgroup_d_remove_dir(struct dentry *dentry)
  795. {
  796. struct dentry *parent;
  797. cgroup_clear_directory(dentry);
  798. parent = dentry->d_parent;
  799. spin_lock(&parent->d_lock);
  800. spin_lock(&dentry->d_lock);
  801. list_del_init(&dentry->d_u.d_child);
  802. spin_unlock(&dentry->d_lock);
  803. spin_unlock(&parent->d_lock);
  804. remove_dir(dentry);
  805. }
  806. /*
  807. * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
  808. * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
  809. * reference to css->refcnt. In general, this refcnt is expected to goes down
  810. * to zero, soon.
  811. *
  812. * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
  813. */
  814. DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
  815. static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
  816. {
  817. if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
  818. wake_up_all(&cgroup_rmdir_waitq);
  819. }
  820. void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
  821. {
  822. css_get(css);
  823. }
  824. void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
  825. {
  826. cgroup_wakeup_rmdir_waiter(css->cgroup);
  827. css_put(css);
  828. }
  829. /*
  830. * Call with cgroup_mutex held. Drops reference counts on modules, including
  831. * any duplicate ones that parse_cgroupfs_options took. If this function
  832. * returns an error, no reference counts are touched.
  833. */
  834. static int rebind_subsystems(struct cgroupfs_root *root,
  835. unsigned long final_bits)
  836. {
  837. unsigned long added_bits, removed_bits;
  838. struct cgroup *cgrp = &root->top_cgroup;
  839. int i;
  840. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  841. removed_bits = root->actual_subsys_bits & ~final_bits;
  842. added_bits = final_bits & ~root->actual_subsys_bits;
  843. /* Check that any added subsystems are currently free */
  844. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  845. unsigned long bit = 1UL << i;
  846. struct cgroup_subsys *ss = subsys[i];
  847. if (!(bit & added_bits))
  848. continue;
  849. /*
  850. * Nobody should tell us to do a subsys that doesn't exist:
  851. * parse_cgroupfs_options should catch that case and refcounts
  852. * ensure that subsystems won't disappear once selected.
  853. */
  854. BUG_ON(ss == NULL);
  855. if (ss->root != &rootnode) {
  856. /* Subsystem isn't free */
  857. return -EBUSY;
  858. }
  859. }
  860. /* Currently we don't handle adding/removing subsystems when
  861. * any child cgroups exist. This is theoretically supportable
  862. * but involves complex error handling, so it's being left until
  863. * later */
  864. if (root->number_of_cgroups > 1)
  865. return -EBUSY;
  866. /* Process each subsystem */
  867. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  868. struct cgroup_subsys *ss = subsys[i];
  869. unsigned long bit = 1UL << i;
  870. if (bit & added_bits) {
  871. /* We're binding this subsystem to this hierarchy */
  872. BUG_ON(ss == NULL);
  873. BUG_ON(cgrp->subsys[i]);
  874. BUG_ON(!dummytop->subsys[i]);
  875. BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
  876. mutex_lock(&ss->hierarchy_mutex);
  877. cgrp->subsys[i] = dummytop->subsys[i];
  878. cgrp->subsys[i]->cgroup = cgrp;
  879. list_move(&ss->sibling, &root->subsys_list);
  880. ss->root = root;
  881. if (ss->bind)
  882. ss->bind(ss, cgrp);
  883. mutex_unlock(&ss->hierarchy_mutex);
  884. /* refcount was already taken, and we're keeping it */
  885. } else if (bit & removed_bits) {
  886. /* We're removing this subsystem */
  887. BUG_ON(ss == NULL);
  888. BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
  889. BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
  890. mutex_lock(&ss->hierarchy_mutex);
  891. if (ss->bind)
  892. ss->bind(ss, dummytop);
  893. dummytop->subsys[i]->cgroup = dummytop;
  894. cgrp->subsys[i] = NULL;
  895. subsys[i]->root = &rootnode;
  896. list_move(&ss->sibling, &rootnode.subsys_list);
  897. mutex_unlock(&ss->hierarchy_mutex);
  898. /* subsystem is now free - drop reference on module */
  899. module_put(ss->module);
  900. } else if (bit & final_bits) {
  901. /* Subsystem state should already exist */
  902. BUG_ON(ss == NULL);
  903. BUG_ON(!cgrp->subsys[i]);
  904. /*
  905. * a refcount was taken, but we already had one, so
  906. * drop the extra reference.
  907. */
  908. module_put(ss->module);
  909. #ifdef CONFIG_MODULE_UNLOAD
  910. BUG_ON(ss->module && !module_refcount(ss->module));
  911. #endif
  912. } else {
  913. /* Subsystem state shouldn't exist */
  914. BUG_ON(cgrp->subsys[i]);
  915. }
  916. }
  917. root->subsys_bits = root->actual_subsys_bits = final_bits;
  918. synchronize_rcu();
  919. return 0;
  920. }
  921. static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
  922. {
  923. struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
  924. struct cgroup_subsys *ss;
  925. mutex_lock(&cgroup_mutex);
  926. for_each_subsys(root, ss)
  927. seq_printf(seq, ",%s", ss->name);
  928. if (test_bit(ROOT_NOPREFIX, &root->flags))
  929. seq_puts(seq, ",noprefix");
  930. if (strlen(root->release_agent_path))
  931. seq_printf(seq, ",release_agent=%s", root->release_agent_path);
  932. if (clone_children(&root->top_cgroup))
  933. seq_puts(seq, ",clone_children");
  934. if (strlen(root->name))
  935. seq_printf(seq, ",name=%s", root->name);
  936. mutex_unlock(&cgroup_mutex);
  937. return 0;
  938. }
  939. struct cgroup_sb_opts {
  940. unsigned long subsys_bits;
  941. unsigned long flags;
  942. char *release_agent;
  943. bool clone_children;
  944. char *name;
  945. /* User explicitly requested empty subsystem */
  946. bool none;
  947. struct cgroupfs_root *new_root;
  948. };
  949. /*
  950. * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
  951. * with cgroup_mutex held to protect the subsys[] array. This function takes
  952. * refcounts on subsystems to be used, unless it returns error, in which case
  953. * no refcounts are taken.
  954. */
  955. static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
  956. {
  957. char *token, *o = data;
  958. bool all_ss = false, one_ss = false;
  959. unsigned long mask = (unsigned long)-1;
  960. int i;
  961. bool module_pin_failed = false;
  962. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  963. #ifdef CONFIG_CPUSETS
  964. mask = ~(1UL << cpuset_subsys_id);
  965. #endif
  966. memset(opts, 0, sizeof(*opts));
  967. while ((token = strsep(&o, ",")) != NULL) {
  968. if (!*token)
  969. return -EINVAL;
  970. if (!strcmp(token, "none")) {
  971. /* Explicitly have no subsystems */
  972. opts->none = true;
  973. continue;
  974. }
  975. if (!strcmp(token, "all")) {
  976. /* Mutually exclusive option 'all' + subsystem name */
  977. if (one_ss)
  978. return -EINVAL;
  979. all_ss = true;
  980. continue;
  981. }
  982. if (!strcmp(token, "noprefix")) {
  983. set_bit(ROOT_NOPREFIX, &opts->flags);
  984. continue;
  985. }
  986. if (!strcmp(token, "clone_children")) {
  987. opts->clone_children = true;
  988. continue;
  989. }
  990. if (!strncmp(token, "release_agent=", 14)) {
  991. /* Specifying two release agents is forbidden */
  992. if (opts->release_agent)
  993. return -EINVAL;
  994. opts->release_agent =
  995. kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
  996. if (!opts->release_agent)
  997. return -ENOMEM;
  998. continue;
  999. }
  1000. if (!strncmp(token, "name=", 5)) {
  1001. const char *name = token + 5;
  1002. /* Can't specify an empty name */
  1003. if (!strlen(name))
  1004. return -EINVAL;
  1005. /* Must match [\w.-]+ */
  1006. for (i = 0; i < strlen(name); i++) {
  1007. char c = name[i];
  1008. if (isalnum(c))
  1009. continue;
  1010. if ((c == '.') || (c == '-') || (c == '_'))
  1011. continue;
  1012. return -EINVAL;
  1013. }
  1014. /* Specifying two names is forbidden */
  1015. if (opts->name)
  1016. return -EINVAL;
  1017. opts->name = kstrndup(name,
  1018. MAX_CGROUP_ROOT_NAMELEN - 1,
  1019. GFP_KERNEL);
  1020. if (!opts->name)
  1021. return -ENOMEM;
  1022. continue;
  1023. }
  1024. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  1025. struct cgroup_subsys *ss = subsys[i];
  1026. if (ss == NULL)
  1027. continue;
  1028. if (strcmp(token, ss->name))
  1029. continue;
  1030. if (ss->disabled)
  1031. continue;
  1032. /* Mutually exclusive option 'all' + subsystem name */
  1033. if (all_ss)
  1034. return -EINVAL;
  1035. set_bit(i, &opts->subsys_bits);
  1036. one_ss = true;
  1037. break;
  1038. }
  1039. if (i == CGROUP_SUBSYS_COUNT)
  1040. return -ENOENT;
  1041. }
  1042. /*
  1043. * If the 'all' option was specified select all the subsystems,
  1044. * otherwise 'all, 'none' and a subsystem name options were not
  1045. * specified, let's default to 'all'
  1046. */
  1047. if (all_ss || (!all_ss && !one_ss && !opts->none)) {
  1048. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  1049. struct cgroup_subsys *ss = subsys[i];
  1050. if (ss == NULL)
  1051. continue;
  1052. if (ss->disabled)
  1053. continue;
  1054. set_bit(i, &opts->subsys_bits);
  1055. }
  1056. }
  1057. /* Consistency checks */
  1058. /*
  1059. * Option noprefix was introduced just for backward compatibility
  1060. * with the old cpuset, so we allow noprefix only if mounting just
  1061. * the cpuset subsystem.
  1062. */
  1063. if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
  1064. (opts->subsys_bits & mask))
  1065. return -EINVAL;
  1066. /* Can't specify "none" and some subsystems */
  1067. if (opts->subsys_bits && opts->none)
  1068. return -EINVAL;
  1069. /*
  1070. * We either have to specify by name or by subsystems. (So all
  1071. * empty hierarchies must have a name).
  1072. */
  1073. if (!opts->subsys_bits && !opts->name)
  1074. return -EINVAL;
  1075. /*
  1076. * Grab references on all the modules we'll need, so the subsystems
  1077. * don't dance around before rebind_subsystems attaches them. This may
  1078. * take duplicate reference counts on a subsystem that's already used,
  1079. * but rebind_subsystems handles this case.
  1080. */
  1081. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  1082. unsigned long bit = 1UL << i;
  1083. if (!(bit & opts->subsys_bits))
  1084. continue;
  1085. if (!try_module_get(subsys[i]->module)) {
  1086. module_pin_failed = true;
  1087. break;
  1088. }
  1089. }
  1090. if (module_pin_failed) {
  1091. /*
  1092. * oops, one of the modules was going away. this means that we
  1093. * raced with a module_delete call, and to the user this is
  1094. * essentially a "subsystem doesn't exist" case.
  1095. */
  1096. for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
  1097. /* drop refcounts only on the ones we took */
  1098. unsigned long bit = 1UL << i;
  1099. if (!(bit & opts->subsys_bits))
  1100. continue;
  1101. module_put(subsys[i]->module);
  1102. }
  1103. return -ENOENT;
  1104. }
  1105. return 0;
  1106. }
  1107. static void drop_parsed_module_refcounts(unsigned long subsys_bits)
  1108. {
  1109. int i;
  1110. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  1111. unsigned long bit = 1UL << i;
  1112. if (!(bit & subsys_bits))
  1113. continue;
  1114. module_put(subsys[i]->module);
  1115. }
  1116. }
  1117. static int cgroup_remount(struct super_block *sb, int *flags, char *data)
  1118. {
  1119. int ret = 0;
  1120. struct cgroupfs_root *root = sb->s_fs_info;
  1121. struct cgroup *cgrp = &root->top_cgroup;
  1122. struct cgroup_sb_opts opts;
  1123. mutex_lock(&cgrp->dentry->d_inode->i_mutex);
  1124. mutex_lock(&cgroup_mutex);
  1125. /* See what subsystems are wanted */
  1126. ret = parse_cgroupfs_options(data, &opts);
  1127. if (ret)
  1128. goto out_unlock;
  1129. /* Don't allow flags or name to change at remount */
  1130. if (opts.flags != root->flags ||
  1131. (opts.name && strcmp(opts.name, root->name))) {
  1132. ret = -EINVAL;
  1133. drop_parsed_module_refcounts(opts.subsys_bits);
  1134. goto out_unlock;
  1135. }
  1136. ret = rebind_subsystems(root, opts.subsys_bits);
  1137. if (ret) {
  1138. drop_parsed_module_refcounts(opts.subsys_bits);
  1139. goto out_unlock;
  1140. }
  1141. /* (re)populate subsystem files */
  1142. cgroup_populate_dir(cgrp);
  1143. if (opts.release_agent)
  1144. strcpy(root->release_agent_path, opts.release_agent);
  1145. out_unlock:
  1146. kfree(opts.release_agent);
  1147. kfree(opts.name);
  1148. mutex_unlock(&cgroup_mutex);
  1149. mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
  1150. return ret;
  1151. }
  1152. static const struct super_operations cgroup_ops = {
  1153. .statfs = simple_statfs,
  1154. .drop_inode = generic_delete_inode,
  1155. .show_options = cgroup_show_options,
  1156. .remount_fs = cgroup_remount,
  1157. };
  1158. static void init_cgroup_housekeeping(struct cgroup *cgrp)
  1159. {
  1160. INIT_LIST_HEAD(&cgrp->sibling);
  1161. INIT_LIST_HEAD(&cgrp->children);
  1162. INIT_LIST_HEAD(&cgrp->css_sets);
  1163. INIT_LIST_HEAD(&cgrp->release_list);
  1164. INIT_LIST_HEAD(&cgrp->pidlists);
  1165. mutex_init(&cgrp->pidlist_mutex);
  1166. INIT_LIST_HEAD(&cgrp->event_list);
  1167. spin_lock_init(&cgrp->event_list_lock);
  1168. }
  1169. static void init_cgroup_root(struct cgroupfs_root *root)
  1170. {
  1171. struct cgroup *cgrp = &root->top_cgroup;
  1172. INIT_LIST_HEAD(&root->subsys_list);
  1173. INIT_LIST_HEAD(&root->root_list);
  1174. root->number_of_cgroups = 1;
  1175. cgrp->root = root;
  1176. cgrp->top_cgroup = cgrp;
  1177. init_cgroup_housekeeping(cgrp);
  1178. }
  1179. static bool init_root_id(struct cgroupfs_root *root)
  1180. {
  1181. int ret = 0;
  1182. do {
  1183. if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
  1184. return false;
  1185. spin_lock(&hierarchy_id_lock);
  1186. /* Try to allocate the next unused ID */
  1187. ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
  1188. &root->hierarchy_id);
  1189. if (ret == -ENOSPC)
  1190. /* Try again starting from 0 */
  1191. ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
  1192. if (!ret) {
  1193. next_hierarchy_id = root->hierarchy_id + 1;
  1194. } else if (ret != -EAGAIN) {
  1195. /* Can only get here if the 31-bit IDR is full ... */
  1196. BUG_ON(ret);
  1197. }
  1198. spin_unlock(&hierarchy_id_lock);
  1199. } while (ret);
  1200. return true;
  1201. }
  1202. static int cgroup_test_super(struct super_block *sb, void *data)
  1203. {
  1204. struct cgroup_sb_opts *opts = data;
  1205. struct cgroupfs_root *root = sb->s_fs_info;
  1206. /* If we asked for a name then it must match */
  1207. if (opts->name && strcmp(opts->name, root->name))
  1208. return 0;
  1209. /*
  1210. * If we asked for subsystems (or explicitly for no
  1211. * subsystems) then they must match
  1212. */
  1213. if ((opts->subsys_bits || opts->none)
  1214. && (opts->subsys_bits != root->subsys_bits))
  1215. return 0;
  1216. return 1;
  1217. }
  1218. static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
  1219. {
  1220. struct cgroupfs_root *root;
  1221. if (!opts->subsys_bits && !opts->none)
  1222. return NULL;
  1223. root = kzalloc(sizeof(*root), GFP_KERNEL);
  1224. if (!root)
  1225. return ERR_PTR(-ENOMEM);
  1226. if (!init_root_id(root)) {
  1227. kfree(root);
  1228. return ERR_PTR(-ENOMEM);
  1229. }
  1230. init_cgroup_root(root);
  1231. root->subsys_bits = opts->subsys_bits;
  1232. root->flags = opts->flags;
  1233. if (opts->release_agent)
  1234. strcpy(root->release_agent_path, opts->release_agent);
  1235. if (opts->name)
  1236. strcpy(root->name, opts->name);
  1237. if (opts->clone_children)
  1238. set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
  1239. return root;
  1240. }
  1241. static void cgroup_drop_root(struct cgroupfs_root *root)
  1242. {
  1243. if (!root)
  1244. return;
  1245. BUG_ON(!root->hierarchy_id);
  1246. spin_lock(&hierarchy_id_lock);
  1247. ida_remove(&hierarchy_ida, root->hierarchy_id);
  1248. spin_unlock(&hierarchy_id_lock);
  1249. kfree(root);
  1250. }
  1251. static int cgroup_set_super(struct super_block *sb, void *data)
  1252. {
  1253. int ret;
  1254. struct cgroup_sb_opts *opts = data;
  1255. /* If we don't have a new root, we can't set up a new sb */
  1256. if (!opts->new_root)
  1257. return -EINVAL;
  1258. BUG_ON(!opts->subsys_bits && !opts->none);
  1259. ret = set_anon_super(sb, NULL);
  1260. if (ret)
  1261. return ret;
  1262. sb->s_fs_info = opts->new_root;
  1263. opts->new_root->sb = sb;
  1264. sb->s_blocksize = PAGE_CACHE_SIZE;
  1265. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  1266. sb->s_magic = CGROUP_SUPER_MAGIC;
  1267. sb->s_op = &cgroup_ops;
  1268. return 0;
  1269. }
  1270. static int cgroup_get_rootdir(struct super_block *sb)
  1271. {
  1272. struct inode *inode =
  1273. cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
  1274. struct dentry *dentry;
  1275. if (!inode)
  1276. return -ENOMEM;
  1277. inode->i_fop = &simple_dir_operations;
  1278. inode->i_op = &cgroup_dir_inode_operations;
  1279. /* directories start off with i_nlink == 2 (for "." entry) */
  1280. inc_nlink(inode);
  1281. dentry = d_alloc_root(inode);
  1282. if (!dentry) {
  1283. iput(inode);
  1284. return -ENOMEM;
  1285. }
  1286. sb->s_root = dentry;
  1287. return 0;
  1288. }
  1289. static struct dentry *cgroup_mount(struct file_system_type *fs_type,
  1290. int flags, const char *unused_dev_name,
  1291. void *data)
  1292. {
  1293. struct cgroup_sb_opts opts;
  1294. struct cgroupfs_root *root;
  1295. int ret = 0;
  1296. struct super_block *sb;
  1297. struct cgroupfs_root *new_root;
  1298. /* First find the desired set of subsystems */
  1299. mutex_lock(&cgroup_mutex);
  1300. ret = parse_cgroupfs_options(data, &opts);
  1301. mutex_unlock(&cgroup_mutex);
  1302. if (ret)
  1303. goto out_err;
  1304. /*
  1305. * Allocate a new cgroup root. We may not need it if we're
  1306. * reusing an existing hierarchy.
  1307. */
  1308. new_root = cgroup_root_from_opts(&opts);
  1309. if (IS_ERR(new_root)) {
  1310. ret = PTR_ERR(new_root);
  1311. goto drop_modules;
  1312. }
  1313. opts.new_root = new_root;
  1314. /* Locate an existing or new sb for this hierarchy */
  1315. sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
  1316. if (IS_ERR(sb)) {
  1317. ret = PTR_ERR(sb);
  1318. cgroup_drop_root(opts.new_root);
  1319. goto drop_modules;
  1320. }
  1321. root = sb->s_fs_info;
  1322. BUG_ON(!root);
  1323. if (root == opts.new_root) {
  1324. /* We used the new root structure, so this is a new hierarchy */
  1325. struct list_head tmp_cg_links;
  1326. struct cgroup *root_cgrp = &root->top_cgroup;
  1327. struct inode *inode;
  1328. struct cgroupfs_root *existing_root;
  1329. int i;
  1330. BUG_ON(sb->s_root != NULL);
  1331. ret = cgroup_get_rootdir(sb);
  1332. if (ret)
  1333. goto drop_new_super;
  1334. inode = sb->s_root->d_inode;
  1335. mutex_lock(&inode->i_mutex);
  1336. mutex_lock(&cgroup_mutex);
  1337. if (strlen(root->name)) {
  1338. /* Check for name clashes with existing mounts */
  1339. for_each_active_root(existing_root) {
  1340. if (!strcmp(existing_root->name, root->name)) {
  1341. ret = -EBUSY;
  1342. mutex_unlock(&cgroup_mutex);
  1343. mutex_unlock(&inode->i_mutex);
  1344. goto drop_new_super;
  1345. }
  1346. }
  1347. }
  1348. /*
  1349. * We're accessing css_set_count without locking
  1350. * css_set_lock here, but that's OK - it can only be
  1351. * increased by someone holding cgroup_lock, and
  1352. * that's us. The worst that can happen is that we
  1353. * have some link structures left over
  1354. */
  1355. ret = allocate_cg_links(css_set_count, &tmp_cg_links);
  1356. if (ret) {
  1357. mutex_unlock(&cgroup_mutex);
  1358. mutex_unlock(&inode->i_mutex);
  1359. goto drop_new_super;
  1360. }
  1361. ret = rebind_subsystems(root, root->subsys_bits);
  1362. if (ret == -EBUSY) {
  1363. mutex_unlock(&cgroup_mutex);
  1364. mutex_unlock(&inode->i_mutex);
  1365. free_cg_links(&tmp_cg_links);
  1366. goto drop_new_super;
  1367. }
  1368. /*
  1369. * There must be no failure case after here, since rebinding
  1370. * takes care of subsystems' refcounts, which are explicitly
  1371. * dropped in the failure exit path.
  1372. */
  1373. /* EBUSY should be the only error here */
  1374. BUG_ON(ret);
  1375. list_add(&root->root_list, &roots);
  1376. root_count++;
  1377. sb->s_root->d_fsdata = root_cgrp;
  1378. root->top_cgroup.dentry = sb->s_root;
  1379. /* Link the top cgroup in this hierarchy into all
  1380. * the css_set objects */
  1381. write_lock(&css_set_lock);
  1382. for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
  1383. struct hlist_head *hhead = &css_set_table[i];
  1384. struct hlist_node *node;
  1385. struct css_set *cg;
  1386. hlist_for_each_entry(cg, node, hhead, hlist)
  1387. link_css_set(&tmp_cg_links, cg, root_cgrp);
  1388. }
  1389. write_unlock(&css_set_lock);
  1390. free_cg_links(&tmp_cg_links);
  1391. BUG_ON(!list_empty(&root_cgrp->sibling));
  1392. BUG_ON(!list_empty(&root_cgrp->children));
  1393. BUG_ON(root->number_of_cgroups != 1);
  1394. cgroup_populate_dir(root_cgrp);
  1395. mutex_unlock(&cgroup_mutex);
  1396. mutex_unlock(&inode->i_mutex);
  1397. } else {
  1398. /*
  1399. * We re-used an existing hierarchy - the new root (if
  1400. * any) is not needed
  1401. */
  1402. cgroup_drop_root(opts.new_root);
  1403. /* no subsys rebinding, so refcounts don't change */
  1404. drop_parsed_module_refcounts(opts.subsys_bits);
  1405. }
  1406. kfree(opts.release_agent);
  1407. kfree(opts.name);
  1408. return dget(sb->s_root);
  1409. drop_new_super:
  1410. deactivate_locked_super(sb);
  1411. drop_modules:
  1412. drop_parsed_module_refcounts(opts.subsys_bits);
  1413. out_err:
  1414. kfree(opts.release_agent);
  1415. kfree(opts.name);
  1416. return ERR_PTR(ret);
  1417. }
  1418. static void cgroup_kill_sb(struct super_block *sb) {
  1419. struct cgroupfs_root *root = sb->s_fs_info;
  1420. struct cgroup *cgrp = &root->top_cgroup;
  1421. int ret;
  1422. struct cg_cgroup_link *link;
  1423. struct cg_cgroup_link *saved_link;
  1424. BUG_ON(!root);
  1425. BUG_ON(root->number_of_cgroups != 1);
  1426. BUG_ON(!list_empty(&cgrp->children));
  1427. BUG_ON(!list_empty(&cgrp->sibling));
  1428. mutex_lock(&cgroup_mutex);
  1429. /* Rebind all subsystems back to the default hierarchy */
  1430. ret = rebind_subsystems(root, 0);
  1431. /* Shouldn't be able to fail ... */
  1432. BUG_ON(ret);
  1433. /*
  1434. * Release all the links from css_sets to this hierarchy's
  1435. * root cgroup
  1436. */
  1437. write_lock(&css_set_lock);
  1438. list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
  1439. cgrp_link_list) {
  1440. list_del(&link->cg_link_list);
  1441. list_del(&link->cgrp_link_list);
  1442. kfree(link);
  1443. }
  1444. write_unlock(&css_set_lock);
  1445. if (!list_empty(&root->root_list)) {
  1446. list_del(&root->root_list);
  1447. root_count--;
  1448. }
  1449. mutex_unlock(&cgroup_mutex);
  1450. kill_litter_super(sb);
  1451. cgroup_drop_root(root);
  1452. }
  1453. static struct file_system_type cgroup_fs_type = {
  1454. .name = "cgroup",
  1455. .mount = cgroup_mount,
  1456. .kill_sb = cgroup_kill_sb,
  1457. };
  1458. static struct kobject *cgroup_kobj;
  1459. static inline struct cgroup *__d_cgrp(struct dentry *dentry)
  1460. {
  1461. return dentry->d_fsdata;
  1462. }
  1463. static inline struct cftype *__d_cft(struct dentry *dentry)
  1464. {
  1465. return dentry->d_fsdata;
  1466. }
  1467. /**
  1468. * cgroup_path - generate the path of a cgroup
  1469. * @cgrp: the cgroup in question
  1470. * @buf: the buffer to write the path into
  1471. * @buflen: the length of the buffer
  1472. *
  1473. * Called with cgroup_mutex held or else with an RCU-protected cgroup
  1474. * reference. Writes path of cgroup into buf. Returns 0 on success,
  1475. * -errno on error.
  1476. */
  1477. int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
  1478. {
  1479. char *start;
  1480. struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
  1481. rcu_read_lock_held() ||
  1482. cgroup_lock_is_held());
  1483. if (!dentry || cgrp == dummytop) {
  1484. /*
  1485. * Inactive subsystems have no dentry for their root
  1486. * cgroup
  1487. */
  1488. strcpy(buf, "/");
  1489. return 0;
  1490. }
  1491. start = buf + buflen;
  1492. *--start = '\0';
  1493. for (;;) {
  1494. int len = dentry->d_name.len;
  1495. if ((start -= len) < buf)
  1496. return -ENAMETOOLONG;
  1497. memcpy(start, dentry->d_name.name, len);
  1498. cgrp = cgrp->parent;
  1499. if (!cgrp)
  1500. break;
  1501. dentry = rcu_dereference_check(cgrp->dentry,
  1502. rcu_read_lock_held() ||
  1503. cgroup_lock_is_held());
  1504. if (!cgrp->parent)
  1505. continue;
  1506. if (--start < buf)
  1507. return -ENAMETOOLONG;
  1508. *start = '/';
  1509. }
  1510. memmove(buf, start, buf + buflen - start);
  1511. return 0;
  1512. }
  1513. EXPORT_SYMBOL_GPL(cgroup_path);
  1514. /**
  1515. * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
  1516. * @cgrp: the cgroup the task is attaching to
  1517. * @tsk: the task to be attached
  1518. *
  1519. * Call holding cgroup_mutex. May take task_lock of
  1520. * the task 'tsk' during call.
  1521. */
  1522. int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  1523. {
  1524. int retval = 0;
  1525. struct cgroup_subsys *ss, *failed_ss = NULL;
  1526. struct cgroup *oldcgrp;
  1527. struct css_set *cg;
  1528. struct css_set *newcg;
  1529. struct cgroupfs_root *root = cgrp->root;
  1530. /* Nothing to do if the task is already in that cgroup */
  1531. oldcgrp = task_cgroup_from_root(tsk, root);
  1532. if (cgrp == oldcgrp)
  1533. return 0;
  1534. for_each_subsys(root, ss) {
  1535. if (ss->can_attach) {
  1536. retval = ss->can_attach(ss, cgrp, tsk, false);
  1537. if (retval) {
  1538. /*
  1539. * Remember on which subsystem the can_attach()
  1540. * failed, so that we only call cancel_attach()
  1541. * against the subsystems whose can_attach()
  1542. * succeeded. (See below)
  1543. */
  1544. failed_ss = ss;
  1545. goto out;
  1546. }
  1547. }
  1548. }
  1549. task_lock(tsk);
  1550. cg = tsk->cgroups;
  1551. get_css_set(cg);
  1552. task_unlock(tsk);
  1553. /*
  1554. * Locate or allocate a new css_set for this task,
  1555. * based on its final set of cgroups
  1556. */
  1557. newcg = find_css_set(cg, cgrp);
  1558. put_css_set(cg);
  1559. if (!newcg) {
  1560. retval = -ENOMEM;
  1561. goto out;
  1562. }
  1563. task_lock(tsk);
  1564. if (tsk->flags & PF_EXITING) {
  1565. task_unlock(tsk);
  1566. put_css_set(newcg);
  1567. retval = -ESRCH;
  1568. goto out;
  1569. }
  1570. rcu_assign_pointer(tsk->cgroups, newcg);
  1571. task_unlock(tsk);
  1572. /* Update the css_set linked lists if we're using them */
  1573. write_lock(&css_set_lock);
  1574. if (!list_empty(&tsk->cg_list)) {
  1575. list_del(&tsk->cg_list);
  1576. list_add(&tsk->cg_list, &newcg->tasks);
  1577. }
  1578. write_unlock(&css_set_lock);
  1579. for_each_subsys(root, ss) {
  1580. if (ss->attach)
  1581. ss->attach(ss, cgrp, oldcgrp, tsk, false);
  1582. }
  1583. set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
  1584. synchronize_rcu();
  1585. put_css_set(cg);
  1586. /*
  1587. * wake up rmdir() waiter. the rmdir should fail since the cgroup
  1588. * is no longer empty.
  1589. */
  1590. cgroup_wakeup_rmdir_waiter(cgrp);
  1591. out:
  1592. if (retval) {
  1593. for_each_subsys(root, ss) {
  1594. if (ss == failed_ss)
  1595. /*
  1596. * This subsystem was the one that failed the
  1597. * can_attach() check earlier, so we don't need
  1598. * to call cancel_attach() against it or any
  1599. * remaining subsystems.
  1600. */
  1601. break;
  1602. if (ss->cancel_attach)
  1603. ss->cancel_attach(ss, cgrp, tsk, false);
  1604. }
  1605. }
  1606. return retval;
  1607. }
  1608. /**
  1609. * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  1610. * @from: attach to all cgroups of a given task
  1611. * @tsk: the task to be attached
  1612. */
  1613. int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  1614. {
  1615. struct cgroupfs_root *root;
  1616. int retval = 0;
  1617. cgroup_lock();
  1618. for_each_active_root(root) {
  1619. struct cgroup *from_cg = task_cgroup_from_root(from, root);
  1620. retval = cgroup_attach_task(from_cg, tsk);
  1621. if (retval)
  1622. break;
  1623. }
  1624. cgroup_unlock();
  1625. return retval;
  1626. }
  1627. EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  1628. /*
  1629. * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
  1630. * held. May take task_lock of task
  1631. */
  1632. static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
  1633. {
  1634. struct task_struct *tsk;
  1635. const struct cred *cred = current_cred(), *tcred;
  1636. int ret;
  1637. if (pid) {
  1638. rcu_read_lock();
  1639. tsk = find_task_by_vpid(pid);
  1640. if (!tsk || tsk->flags & PF_EXITING) {
  1641. rcu_read_unlock();
  1642. return -ESRCH;
  1643. }
  1644. tcred = __task_cred(tsk);
  1645. if (cred->euid &&
  1646. cred->euid != tcred->uid &&
  1647. cred->euid != tcred->suid) {
  1648. rcu_read_unlock();
  1649. return -EACCES;
  1650. }
  1651. get_task_struct(tsk);
  1652. rcu_read_unlock();
  1653. } else {
  1654. tsk = current;
  1655. get_task_struct(tsk);
  1656. }
  1657. ret = cgroup_attach_task(cgrp, tsk);
  1658. put_task_struct(tsk);
  1659. return ret;
  1660. }
  1661. static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
  1662. {
  1663. int ret;
  1664. if (!cgroup_lock_live_group(cgrp))
  1665. return -ENODEV;
  1666. ret = attach_task_by_pid(cgrp, pid);
  1667. cgroup_unlock();
  1668. return ret;
  1669. }
  1670. /**
  1671. * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
  1672. * @cgrp: the cgroup to be checked for liveness
  1673. *
  1674. * On success, returns true; the lock should be later released with
  1675. * cgroup_unlock(). On failure returns false with no lock held.
  1676. */
  1677. bool cgroup_lock_live_group(struct cgroup *cgrp)
  1678. {
  1679. mutex_lock(&cgroup_mutex);
  1680. if (cgroup_is_removed(cgrp)) {
  1681. mutex_unlock(&cgroup_mutex);
  1682. return false;
  1683. }
  1684. return true;
  1685. }
  1686. EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
  1687. static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
  1688. const char *buffer)
  1689. {
  1690. BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
  1691. if (strlen(buffer) >= PATH_MAX)
  1692. return -EINVAL;
  1693. if (!cgroup_lock_live_group(cgrp))
  1694. return -ENODEV;
  1695. strcpy(cgrp->root->release_agent_path, buffer);
  1696. cgroup_unlock();
  1697. return 0;
  1698. }
  1699. static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
  1700. struct seq_file *seq)
  1701. {
  1702. if (!cgroup_lock_live_group(cgrp))
  1703. return -ENODEV;
  1704. seq_puts(seq, cgrp->root->release_agent_path);
  1705. seq_putc(seq, '\n');
  1706. cgroup_unlock();
  1707. return 0;
  1708. }
  1709. /* A buffer size big enough for numbers or short strings */
  1710. #define CGROUP_LOCAL_BUFFER_SIZE 64
  1711. static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
  1712. struct file *file,
  1713. const char __user *userbuf,
  1714. size_t nbytes, loff_t *unused_ppos)
  1715. {
  1716. char buffer[CGROUP_LOCAL_BUFFER_SIZE];
  1717. int retval = 0;
  1718. char *end;
  1719. if (!nbytes)
  1720. return -EINVAL;
  1721. if (nbytes >= sizeof(buffer))
  1722. return -E2BIG;
  1723. if (copy_from_user(buffer, userbuf, nbytes))
  1724. return -EFAULT;
  1725. buffer[nbytes] = 0; /* nul-terminate */
  1726. if (cft->write_u64) {
  1727. u64 val = simple_strtoull(strstrip(buffer), &end, 0);
  1728. if (*end)
  1729. return -EINVAL;
  1730. retval = cft->write_u64(cgrp, cft, val);
  1731. } else {
  1732. s64 val = simple_strtoll(strstrip(buffer), &end, 0);
  1733. if (*end)
  1734. return -EINVAL;
  1735. retval = cft->write_s64(cgrp, cft, val);
  1736. }
  1737. if (!retval)
  1738. retval = nbytes;
  1739. return retval;
  1740. }
  1741. static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
  1742. struct file *file,
  1743. const char __user *userbuf,
  1744. size_t nbytes, loff_t *unused_ppos)
  1745. {
  1746. char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
  1747. int retval = 0;
  1748. size_t max_bytes = cft->max_write_len;
  1749. char *buffer = local_buffer;
  1750. if (!max_bytes)
  1751. max_bytes = sizeof(local_buffer) - 1;
  1752. if (nbytes >= max_bytes)
  1753. return -E2BIG;
  1754. /* Allocate a dynamic buffer if we need one */
  1755. if (nbytes >= sizeof(local_buffer)) {
  1756. buffer = kmalloc(nbytes + 1, GFP_KERNEL);
  1757. if (buffer == NULL)
  1758. return -ENOMEM;
  1759. }
  1760. if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
  1761. retval = -EFAULT;
  1762. goto out;
  1763. }
  1764. buffer[nbytes] = 0; /* nul-terminate */
  1765. retval = cft->write_string(cgrp, cft, strstrip(buffer));
  1766. if (!retval)
  1767. retval = nbytes;
  1768. out:
  1769. if (buffer != local_buffer)
  1770. kfree(buffer);
  1771. return retval;
  1772. }
  1773. static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
  1774. size_t nbytes, loff_t *ppos)
  1775. {
  1776. struct cftype *cft = __d_cft(file->f_dentry);
  1777. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  1778. if (cgroup_is_removed(cgrp))
  1779. return -ENODEV;
  1780. if (cft->write)
  1781. return cft->write(cgrp, cft, file, buf, nbytes, ppos);
  1782. if (cft->write_u64 || cft->write_s64)
  1783. return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
  1784. if (cft->write_string)
  1785. return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
  1786. if (cft->trigger) {
  1787. int ret = cft->trigger(cgrp, (unsigned int)cft->private);
  1788. return ret ? ret : nbytes;
  1789. }
  1790. return -EINVAL;
  1791. }
  1792. static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
  1793. struct file *file,
  1794. char __user *buf, size_t nbytes,
  1795. loff_t *ppos)
  1796. {
  1797. char tmp[CGROUP_LOCAL_BUFFER_SIZE];
  1798. u64 val = cft->read_u64(cgrp, cft);
  1799. int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
  1800. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  1801. }
  1802. static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
  1803. struct file *file,
  1804. char __user *buf, size_t nbytes,
  1805. loff_t *ppos)
  1806. {
  1807. char tmp[CGROUP_LOCAL_BUFFER_SIZE];
  1808. s64 val = cft->read_s64(cgrp, cft);
  1809. int len = sprintf(tmp, "%lld\n", (long long) val);
  1810. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  1811. }
  1812. static ssize_t cgroup_file_read(struct file *file, char __user *buf,
  1813. size_t nbytes, loff_t *ppos)
  1814. {
  1815. struct cftype *cft = __d_cft(file->f_dentry);
  1816. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  1817. if (cgroup_is_removed(cgrp))
  1818. return -ENODEV;
  1819. if (cft->read)
  1820. return cft->read(cgrp, cft, file, buf, nbytes, ppos);
  1821. if (cft->read_u64)
  1822. return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
  1823. if (cft->read_s64)
  1824. return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
  1825. return -EINVAL;
  1826. }
  1827. /*
  1828. * seqfile ops/methods for returning structured data. Currently just
  1829. * supports string->u64 maps, but can be extended in future.
  1830. */
  1831. struct cgroup_seqfile_state {
  1832. struct cftype *cft;
  1833. struct cgroup *cgroup;
  1834. };
  1835. static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
  1836. {
  1837. struct seq_file *sf = cb->state;
  1838. return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
  1839. }
  1840. static int cgroup_seqfile_show(struct seq_file *m, void *arg)
  1841. {
  1842. struct cgroup_seqfile_state *state = m->private;
  1843. struct cftype *cft = state->cft;
  1844. if (cft->read_map) {
  1845. struct cgroup_map_cb cb = {
  1846. .fill = cgroup_map_add,
  1847. .state = m,
  1848. };
  1849. return cft->read_map(state->cgroup, cft, &cb);
  1850. }
  1851. return cft->read_seq_string(state->cgroup, cft, m);
  1852. }
  1853. static int cgroup_seqfile_release(struct inode *inode, struct file *file)
  1854. {
  1855. struct seq_file *seq = file->private_data;
  1856. kfree(seq->private);
  1857. return single_release(inode, file);
  1858. }
  1859. static const struct file_operations cgroup_seqfile_operations = {
  1860. .read = seq_read,
  1861. .write = cgroup_file_write,
  1862. .llseek = seq_lseek,
  1863. .release = cgroup_seqfile_release,
  1864. };
  1865. static int cgroup_file_open(struct inode *inode, struct file *file)
  1866. {
  1867. int err;
  1868. struct cftype *cft;
  1869. err = generic_file_open(inode, file);
  1870. if (err)
  1871. return err;
  1872. cft = __d_cft(file->f_dentry);
  1873. if (cft->read_map || cft->read_seq_string) {
  1874. struct cgroup_seqfile_state *state =
  1875. kzalloc(sizeof(*state), GFP_USER);
  1876. if (!state)
  1877. return -ENOMEM;
  1878. state->cft = cft;
  1879. state->cgroup = __d_cgrp(file->f_dentry->d_parent);
  1880. file->f_op = &cgroup_seqfile_operations;
  1881. err = single_open(file, cgroup_seqfile_show, state);
  1882. if (err < 0)
  1883. kfree(state);
  1884. } else if (cft->open)
  1885. err = cft->open(inode, file);
  1886. else
  1887. err = 0;
  1888. return err;
  1889. }
  1890. static int cgroup_file_release(struct inode *inode, struct file *file)
  1891. {
  1892. struct cftype *cft = __d_cft(file->f_dentry);
  1893. if (cft->release)
  1894. return cft->release(inode, file);
  1895. return 0;
  1896. }
  1897. /*
  1898. * cgroup_rename - Only allow simple rename of directories in place.
  1899. */
  1900. static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
  1901. struct inode *new_dir, struct dentry *new_dentry)
  1902. {
  1903. if (!S_ISDIR(old_dentry->d_inode->i_mode))
  1904. return -ENOTDIR;
  1905. if (new_dentry->d_inode)
  1906. return -EEXIST;
  1907. if (old_dir != new_dir)
  1908. return -EIO;
  1909. return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
  1910. }
  1911. static const struct file_operations cgroup_file_operations = {
  1912. .read = cgroup_file_read,
  1913. .write = cgroup_file_write,
  1914. .llseek = generic_file_llseek,
  1915. .open = cgroup_file_open,
  1916. .release = cgroup_file_release,
  1917. };
  1918. static const struct inode_operations cgroup_dir_inode_operations = {
  1919. .lookup = cgroup_lookup,
  1920. .mkdir = cgroup_mkdir,
  1921. .rmdir = cgroup_rmdir,
  1922. .rename = cgroup_rename,
  1923. };
  1924. /*
  1925. * Check if a file is a control file
  1926. */
  1927. static inline struct cftype *__file_cft(struct file *file)
  1928. {
  1929. if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
  1930. return ERR_PTR(-EINVAL);
  1931. return __d_cft(file->f_dentry);
  1932. }
  1933. static int cgroup_delete_dentry(const struct dentry *dentry)
  1934. {
  1935. return 1;
  1936. }
  1937. static struct dentry *cgroup_lookup(struct inode *dir,
  1938. struct dentry *dentry, struct nameidata *nd)
  1939. {
  1940. static const struct dentry_operations cgroup_dentry_operations = {
  1941. .d_delete = cgroup_delete_dentry,
  1942. .d_iput = cgroup_diput,
  1943. };
  1944. if (dentry->d_name.len > NAME_MAX)
  1945. return ERR_PTR(-ENAMETOOLONG);
  1946. d_set_d_op(dentry, &cgroup_dentry_operations);
  1947. d_add(dentry, NULL);
  1948. return NULL;
  1949. }
  1950. static int cgroup_create_file(struct dentry *dentry, mode_t mode,
  1951. struct super_block *sb)
  1952. {
  1953. struct inode *inode;
  1954. if (!dentry)
  1955. return -ENOENT;
  1956. if (dentry->d_inode)
  1957. return -EEXIST;
  1958. inode = cgroup_new_inode(mode, sb);
  1959. if (!inode)
  1960. return -ENOMEM;
  1961. if (S_ISDIR(mode)) {
  1962. inode->i_op = &cgroup_dir_inode_operations;
  1963. inode->i_fop = &simple_dir_operations;
  1964. /* start off with i_nlink == 2 (for "." entry) */
  1965. inc_nlink(inode);
  1966. /* start with the directory inode held, so that we can
  1967. * populate it without racing with another mkdir */
  1968. mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
  1969. } else if (S_ISREG(mode)) {
  1970. inode->i_size = 0;
  1971. inode->i_fop = &cgroup_file_operations;
  1972. }
  1973. d_instantiate(dentry, inode);
  1974. dget(dentry); /* Extra count - pin the dentry in core */
  1975. return 0;
  1976. }
  1977. /*
  1978. * cgroup_create_dir - create a directory for an object.
  1979. * @cgrp: the cgroup we create the directory for. It must have a valid
  1980. * ->parent field. And we are going to fill its ->dentry field.
  1981. * @dentry: dentry of the new cgroup
  1982. * @mode: mode to set on new directory.
  1983. */
  1984. static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
  1985. mode_t mode)
  1986. {
  1987. struct dentry *parent;
  1988. int error = 0;
  1989. parent = cgrp->parent->dentry;
  1990. error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
  1991. if (!error) {
  1992. dentry->d_fsdata = cgrp;
  1993. inc_nlink(parent->d_inode);
  1994. rcu_assign_pointer(cgrp->dentry, dentry);
  1995. dget(dentry);
  1996. }
  1997. dput(dentry);
  1998. return error;
  1999. }
  2000. /**
  2001. * cgroup_file_mode - deduce file mode of a control file
  2002. * @cft: the control file in question
  2003. *
  2004. * returns cft->mode if ->mode is not 0
  2005. * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
  2006. * returns S_IRUGO if it has only a read handler
  2007. * returns S_IWUSR if it has only a write hander
  2008. */
  2009. static mode_t cgroup_file_mode(const struct cftype *cft)
  2010. {
  2011. mode_t mode = 0;
  2012. if (cft->mode)
  2013. return cft->mode;
  2014. if (cft->read || cft->read_u64 || cft->read_s64 ||
  2015. cft->read_map || cft->read_seq_string)
  2016. mode |= S_IRUGO;
  2017. if (cft->write || cft->write_u64 || cft->write_s64 ||
  2018. cft->write_string || cft->trigger)
  2019. mode |= S_IWUSR;
  2020. return mode;
  2021. }
  2022. int cgroup_add_file(struct cgroup *cgrp,
  2023. struct cgroup_subsys *subsys,
  2024. const struct cftype *cft)
  2025. {
  2026. struct dentry *dir = cgrp->dentry;
  2027. struct dentry *dentry;
  2028. int error;
  2029. mode_t mode;
  2030. char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
  2031. if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
  2032. strcpy(name, subsys->name);
  2033. strcat(name, ".");
  2034. }
  2035. strcat(name, cft->name);
  2036. BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
  2037. dentry = lookup_one_len(name, dir, strlen(name));
  2038. if (!IS_ERR(dentry)) {
  2039. mode = cgroup_file_mode(cft);
  2040. error = cgroup_create_file(dentry, mode | S_IFREG,
  2041. cgrp->root->sb);
  2042. if (!error)
  2043. dentry->d_fsdata = (void *)cft;
  2044. dput(dentry);
  2045. } else
  2046. error = PTR_ERR(dentry);
  2047. return error;
  2048. }
  2049. EXPORT_SYMBOL_GPL(cgroup_add_file);
  2050. int cgroup_add_files(struct cgroup *cgrp,
  2051. struct cgroup_subsys *subsys,
  2052. const struct cftype cft[],
  2053. int count)
  2054. {
  2055. int i, err;
  2056. for (i = 0; i < count; i++) {
  2057. err = cgroup_add_file(cgrp, subsys, &cft[i]);
  2058. if (err)
  2059. return err;
  2060. }
  2061. return 0;
  2062. }
  2063. EXPORT_SYMBOL_GPL(cgroup_add_files);
  2064. /**
  2065. * cgroup_task_count - count the number of tasks in a cgroup.
  2066. * @cgrp: the cgroup in question
  2067. *
  2068. * Return the number of tasks in the cgroup.
  2069. */
  2070. int cgroup_task_count(const struct cgroup *cgrp)
  2071. {
  2072. int count = 0;
  2073. struct cg_cgroup_link *link;
  2074. read_lock(&css_set_lock);
  2075. list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
  2076. count += atomic_read(&link->cg->refcount);
  2077. }
  2078. read_unlock(&css_set_lock);
  2079. return count;
  2080. }
  2081. /*
  2082. * Advance a list_head iterator. The iterator should be positioned at
  2083. * the start of a css_set
  2084. */
  2085. static void cgroup_advance_iter(struct cgroup *cgrp,
  2086. struct cgroup_iter *it)
  2087. {
  2088. struct list_head *l = it->cg_link;
  2089. struct cg_cgroup_link *link;
  2090. struct css_set *cg;
  2091. /* Advance to the next non-empty css_set */
  2092. do {
  2093. l = l->next;
  2094. if (l == &cgrp->css_sets) {
  2095. it->cg_link = NULL;
  2096. return;
  2097. }
  2098. link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
  2099. cg = link->cg;
  2100. } while (list_empty(&cg->tasks));
  2101. it->cg_link = l;
  2102. it->task = cg->tasks.next;
  2103. }
  2104. /*
  2105. * To reduce the fork() overhead for systems that are not actually
  2106. * using their cgroups capability, we don't maintain the lists running
  2107. * through each css_set to its tasks until we see the list actually
  2108. * used - in other words after the first call to cgroup_iter_start().
  2109. *
  2110. * The tasklist_lock is not held here, as do_each_thread() and
  2111. * while_each_thread() are protected by RCU.
  2112. */
  2113. static void cgroup_enable_task_cg_lists(void)
  2114. {
  2115. struct task_struct *p, *g;
  2116. write_lock(&css_set_lock);
  2117. use_task_css_set_links = 1;
  2118. do_each_thread(g, p) {
  2119. task_lock(p);
  2120. /*
  2121. * We should check if the process is exiting, otherwise
  2122. * it will race with cgroup_exit() in that the list
  2123. * entry won't be deleted though the process has exited.
  2124. */
  2125. if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
  2126. list_add(&p->cg_list, &p->cgroups->tasks);
  2127. task_unlock(p);
  2128. } while_each_thread(g, p);
  2129. write_unlock(&css_set_lock);
  2130. }
  2131. void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
  2132. {
  2133. /*
  2134. * The first time anyone tries to iterate across a cgroup,
  2135. * we need to enable the list linking each css_set to its
  2136. * tasks, and fix up all existing tasks.
  2137. */
  2138. if (!use_task_css_set_links)
  2139. cgroup_enable_task_cg_lists();
  2140. read_lock(&css_set_lock);
  2141. it->cg_link = &cgrp->css_sets;
  2142. cgroup_advance_iter(cgrp, it);
  2143. }
  2144. struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
  2145. struct cgroup_iter *it)
  2146. {
  2147. struct task_struct *res;
  2148. struct list_head *l = it->task;
  2149. struct cg_cgroup_link *link;
  2150. /* If the iterator cg is NULL, we have no tasks */
  2151. if (!it->cg_link)
  2152. return NULL;
  2153. res = list_entry(l, struct task_struct, cg_list);
  2154. /* Advance iterator to find next entry */
  2155. l = l->next;
  2156. link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
  2157. if (l == &link->cg->tasks) {
  2158. /* We reached the end of this task list - move on to
  2159. * the next cg_cgroup_link */
  2160. cgroup_advance_iter(cgrp, it);
  2161. } else {
  2162. it->task = l;
  2163. }
  2164. return res;
  2165. }
  2166. void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
  2167. {
  2168. read_unlock(&css_set_lock);
  2169. }
  2170. static inline int started_after_time(struct task_struct *t1,
  2171. struct timespec *time,
  2172. struct task_struct *t2)
  2173. {
  2174. int start_diff = timespec_compare(&t1->start_time, time);
  2175. if (start_diff > 0) {
  2176. return 1;
  2177. } else if (start_diff < 0) {
  2178. return 0;
  2179. } else {
  2180. /*
  2181. * Arbitrarily, if two processes started at the same
  2182. * time, we'll say that the lower pointer value
  2183. * started first. Note that t2 may have exited by now
  2184. * so this may not be a valid pointer any longer, but
  2185. * that's fine - it still serves to distinguish
  2186. * between two tasks started (effectively) simultaneously.
  2187. */
  2188. return t1 > t2;
  2189. }
  2190. }
  2191. /*
  2192. * This function is a callback from heap_insert() and is used to order
  2193. * the heap.
  2194. * In this case we order the heap in descending task start time.
  2195. */
  2196. static inline int started_after(void *p1, void *p2)
  2197. {
  2198. struct task_struct *t1 = p1;
  2199. struct task_struct *t2 = p2;
  2200. return started_after_time(t1, &t2->start_time, t2);
  2201. }
  2202. /**
  2203. * cgroup_scan_tasks - iterate though all the tasks in a cgroup
  2204. * @scan: struct cgroup_scanner containing arguments for the scan
  2205. *
  2206. * Arguments include pointers to callback functions test_task() and
  2207. * process_task().
  2208. * Iterate through all the tasks in a cgroup, calling test_task() for each,
  2209. * and if it returns true, call process_task() for it also.
  2210. * The test_task pointer may be NULL, meaning always true (select all tasks).
  2211. * Effectively duplicates cgroup_iter_{start,next,end}()
  2212. * but does not lock css_set_lock for the call to process_task().
  2213. * The struct cgroup_scanner may be embedded in any structure of the caller's
  2214. * creation.
  2215. * It is guaranteed that process_task() will act on every task that
  2216. * is a member of the cgroup for the duration of this call. This
  2217. * function may or may not call process_task() for tasks that exit
  2218. * or move to a different cgroup during the call, or are forked or
  2219. * move into the cgroup during the call.
  2220. *
  2221. * Note that test_task() may be called with locks held, and may in some
  2222. * situations be called multiple times for the same task, so it should
  2223. * be cheap.
  2224. * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
  2225. * pre-allocated and will be used for heap operations (and its "gt" member will
  2226. * be overwritten), else a temporary heap will be used (allocation of which
  2227. * may cause this function to fail).
  2228. */
  2229. int cgroup_scan_tasks(struct cgroup_scanner *scan)
  2230. {
  2231. int retval, i;
  2232. struct cgroup_iter it;
  2233. struct task_struct *p, *dropped;
  2234. /* Never dereference latest_task, since it's not refcounted */
  2235. struct task_struct *latest_task = NULL;
  2236. struct ptr_heap tmp_heap;
  2237. struct ptr_heap *heap;
  2238. struct timespec latest_time = { 0, 0 };
  2239. if (scan->heap) {
  2240. /* The caller supplied our heap and pre-allocated its memory */
  2241. heap = scan->heap;
  2242. heap->gt = &started_after;
  2243. } else {
  2244. /* We need to allocate our own heap memory */
  2245. heap = &tmp_heap;
  2246. retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
  2247. if (retval)
  2248. /* cannot allocate the heap */
  2249. return retval;
  2250. }
  2251. again:
  2252. /*
  2253. * Scan tasks in the cgroup, using the scanner's "test_task" callback
  2254. * to determine which are of interest, and using the scanner's
  2255. * "process_task" callback to process any of them that need an update.
  2256. * Since we don't want to hold any locks during the task updates,
  2257. * gather tasks to be processed in a heap structure.
  2258. * The heap is sorted by descending task start time.
  2259. * If the statically-sized heap fills up, we overflow tasks that
  2260. * started later, and in future iterations only consider tasks that
  2261. * started after the latest task in the previous pass. This
  2262. * guarantees forward progress and that we don't miss any tasks.
  2263. */
  2264. heap->size = 0;
  2265. cgroup_iter_start(scan->cg, &it);
  2266. while ((p = cgroup_iter_next(scan->cg, &it))) {
  2267. /*
  2268. * Only affect tasks that qualify per the caller's callback,
  2269. * if he provided one
  2270. */
  2271. if (scan->test_task && !scan->test_task(p, scan))
  2272. continue;
  2273. /*
  2274. * Only process tasks that started after the last task
  2275. * we processed
  2276. */
  2277. if (!started_after_time(p, &latest_time, latest_task))
  2278. continue;
  2279. dropped = heap_insert(heap, p);
  2280. if (dropped == NULL) {
  2281. /*
  2282. * The new task was inserted; the heap wasn't
  2283. * previously full
  2284. */
  2285. get_task_struct(p);
  2286. } else if (dropped != p) {
  2287. /*
  2288. * The new task was inserted, and pushed out a
  2289. * different task
  2290. */
  2291. get_task_struct(p);
  2292. put_task_struct(dropped);
  2293. }
  2294. /*
  2295. * Else the new task was newer than anything already in
  2296. * the heap and wasn't inserted
  2297. */
  2298. }
  2299. cgroup_iter_end(scan->cg, &it);
  2300. if (heap->size) {
  2301. for (i = 0; i < heap->size; i++) {
  2302. struct task_struct *q = heap->ptrs[i];
  2303. if (i == 0) {
  2304. latest_time = q->start_time;
  2305. latest_task = q;
  2306. }
  2307. /* Process the task per the caller's callback */
  2308. scan->process_task(q, scan);
  2309. put_task_struct(q);
  2310. }
  2311. /*
  2312. * If we had to process any tasks at all, scan again
  2313. * in case some of them were in the middle of forking
  2314. * children that didn't get processed.
  2315. * Not the most efficient way to do it, but it avoids
  2316. * having to take callback_mutex in the fork path
  2317. */
  2318. goto again;
  2319. }
  2320. if (heap == &tmp_heap)
  2321. heap_free(&tmp_heap);
  2322. return 0;
  2323. }
  2324. /*
  2325. * Stuff for reading the 'tasks'/'procs' files.
  2326. *
  2327. * Reading this file can return large amounts of data if a cgroup has
  2328. * *lots* of attached tasks. So it may need several calls to read(),
  2329. * but we cannot guarantee that the information we produce is correct
  2330. * unless we produce it entirely atomically.
  2331. *
  2332. */
  2333. /*
  2334. * The following two functions "fix" the issue where there are more pids
  2335. * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
  2336. * TODO: replace with a kernel-wide solution to this problem
  2337. */
  2338. #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
  2339. static void *pidlist_allocate(int count)
  2340. {
  2341. if (PIDLIST_TOO_LARGE(count))
  2342. return vmalloc(count * sizeof(pid_t));
  2343. else
  2344. return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
  2345. }
  2346. static void pidlist_free(void *p)
  2347. {
  2348. if (is_vmalloc_addr(p))
  2349. vfree(p);
  2350. else
  2351. kfree(p);
  2352. }
  2353. static void *pidlist_resize(void *p, int newcount)
  2354. {
  2355. void *newlist;
  2356. /* note: if new alloc fails, old p will still be valid either way */
  2357. if (is_vmalloc_addr(p)) {
  2358. newlist = vmalloc(newcount * sizeof(pid_t));
  2359. if (!newlist)
  2360. return NULL;
  2361. memcpy(newlist, p, newcount * sizeof(pid_t));
  2362. vfree(p);
  2363. } else {
  2364. newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
  2365. }
  2366. return newlist;
  2367. }
  2368. /*
  2369. * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
  2370. * If the new stripped list is sufficiently smaller and there's enough memory
  2371. * to allocate a new buffer, will let go of the unneeded memory. Returns the
  2372. * number of unique elements.
  2373. */
  2374. /* is the size difference enough that we should re-allocate the array? */
  2375. #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
  2376. static int pidlist_uniq(pid_t **p, int length)
  2377. {
  2378. int src, dest = 1;
  2379. pid_t *list = *p;
  2380. pid_t *newlist;
  2381. /*
  2382. * we presume the 0th element is unique, so i starts at 1. trivial
  2383. * edge cases first; no work needs to be done for either
  2384. */
  2385. if (length == 0 || length == 1)
  2386. return length;
  2387. /* src and dest walk down the list; dest counts unique elements */
  2388. for (src = 1; src < length; src++) {
  2389. /* find next unique element */
  2390. while (list[src] == list[src-1]) {
  2391. src++;
  2392. if (src == length)
  2393. goto after;
  2394. }
  2395. /* dest always points to where the next unique element goes */
  2396. list[dest] = list[src];
  2397. dest++;
  2398. }
  2399. after:
  2400. /*
  2401. * if the length difference is large enough, we want to allocate a
  2402. * smaller buffer to save memory. if this fails due to out of memory,
  2403. * we'll just stay with what we've got.
  2404. */
  2405. if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
  2406. newlist = pidlist_resize(list, dest);
  2407. if (newlist)
  2408. *p = newlist;
  2409. }
  2410. return dest;
  2411. }
  2412. static int cmppid(const void *a, const void *b)
  2413. {
  2414. return *(pid_t *)a - *(pid_t *)b;
  2415. }
  2416. /*
  2417. * find the appropriate pidlist for our purpose (given procs vs tasks)
  2418. * returns with the lock on that pidlist already held, and takes care
  2419. * of the use count, or returns NULL with no locks held if we're out of
  2420. * memory.
  2421. */
  2422. static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
  2423. enum cgroup_filetype type)
  2424. {
  2425. struct cgroup_pidlist *l;
  2426. /* don't need task_nsproxy() if we're looking at ourself */
  2427. struct pid_namespace *ns = current->nsproxy->pid_ns;
  2428. /*
  2429. * We can't drop the pidlist_mutex before taking the l->mutex in case
  2430. * the last ref-holder is trying to remove l from the list at the same
  2431. * time. Holding the pidlist_mutex precludes somebody taking whichever
  2432. * list we find out from under us - compare release_pid_array().
  2433. */
  2434. mutex_lock(&cgrp->pidlist_mutex);
  2435. list_for_each_entry(l, &cgrp->pidlists, links) {
  2436. if (l->key.type == type && l->key.ns == ns) {
  2437. /* make sure l doesn't vanish out from under us */
  2438. down_write(&l->mutex);
  2439. mutex_unlock(&cgrp->pidlist_mutex);
  2440. return l;
  2441. }
  2442. }
  2443. /* entry not found; create a new one */
  2444. l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
  2445. if (!l) {
  2446. mutex_unlock(&cgrp->pidlist_mutex);
  2447. return l;
  2448. }
  2449. init_rwsem(&l->mutex);
  2450. down_write(&l->mutex);
  2451. l->key.type = type;
  2452. l->key.ns = get_pid_ns(ns);
  2453. l->use_count = 0; /* don't increment here */
  2454. l->list = NULL;
  2455. l->owner = cgrp;
  2456. list_add(&l->links, &cgrp->pidlists);
  2457. mutex_unlock(&cgrp->pidlist_mutex);
  2458. return l;
  2459. }
  2460. /*
  2461. * Load a cgroup's pidarray with either procs' tgids or tasks' pids
  2462. */
  2463. static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
  2464. struct cgroup_pidlist **lp)
  2465. {
  2466. pid_t *array;
  2467. int length;
  2468. int pid, n = 0; /* used for populating the array */
  2469. struct cgroup_iter it;
  2470. struct task_struct *tsk;
  2471. struct cgroup_pidlist *l;
  2472. /*
  2473. * If cgroup gets more users after we read count, we won't have
  2474. * enough space - tough. This race is indistinguishable to the
  2475. * caller from the case that the additional cgroup users didn't
  2476. * show up until sometime later on.
  2477. */
  2478. length = cgroup_task_count(cgrp);
  2479. array = pidlist_allocate(length);
  2480. if (!array)
  2481. return -ENOMEM;
  2482. /* now, populate the array */
  2483. cgroup_iter_start(cgrp, &it);
  2484. while ((tsk = cgroup_iter_next(cgrp, &it))) {
  2485. if (unlikely(n == length))
  2486. break;
  2487. /* get tgid or pid for procs or tasks file respectively */
  2488. if (type == CGROUP_FILE_PROCS)
  2489. pid = task_tgid_vnr(tsk);
  2490. else
  2491. pid = task_pid_vnr(tsk);
  2492. if (pid > 0) /* make sure to only use valid results */
  2493. array[n++] = pid;
  2494. }
  2495. cgroup_iter_end(cgrp, &it);
  2496. length = n;
  2497. /* now sort & (if procs) strip out duplicates */
  2498. sort(array, length, sizeof(pid_t), cmppid, NULL);
  2499. if (type == CGROUP_FILE_PROCS)
  2500. length = pidlist_uniq(&array, length);
  2501. l = cgroup_pidlist_find(cgrp, type);
  2502. if (!l) {
  2503. pidlist_free(array);
  2504. return -ENOMEM;
  2505. }
  2506. /* store array, freeing old if necessary - lock already held */
  2507. pidlist_free(l->list);
  2508. l->list = array;
  2509. l->length = length;
  2510. l->use_count++;
  2511. up_write(&l->mutex);
  2512. *lp = l;
  2513. return 0;
  2514. }
  2515. /**
  2516. * cgroupstats_build - build and fill cgroupstats
  2517. * @stats: cgroupstats to fill information into
  2518. * @dentry: A dentry entry belonging to the cgroup for which stats have
  2519. * been requested.
  2520. *
  2521. * Build and fill cgroupstats so that taskstats can export it to user
  2522. * space.
  2523. */
  2524. int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
  2525. {
  2526. int ret = -EINVAL;
  2527. struct cgroup *cgrp;
  2528. struct cgroup_iter it;
  2529. struct task_struct *tsk;
  2530. /*
  2531. * Validate dentry by checking the superblock operations,
  2532. * and make sure it's a directory.
  2533. */
  2534. if (dentry->d_sb->s_op != &cgroup_ops ||
  2535. !S_ISDIR(dentry->d_inode->i_mode))
  2536. goto err;
  2537. ret = 0;
  2538. cgrp = dentry->d_fsdata;
  2539. cgroup_iter_start(cgrp, &it);
  2540. while ((tsk = cgroup_iter_next(cgrp, &it))) {
  2541. switch (tsk->state) {
  2542. case TASK_RUNNING:
  2543. stats->nr_running++;
  2544. break;
  2545. case TASK_INTERRUPTIBLE:
  2546. stats->nr_sleeping++;
  2547. break;
  2548. case TASK_UNINTERRUPTIBLE:
  2549. stats->nr_uninterruptible++;
  2550. break;
  2551. case TASK_STOPPED:
  2552. stats->nr_stopped++;
  2553. break;
  2554. default:
  2555. if (delayacct_is_task_waiting_on_io(tsk))
  2556. stats->nr_io_wait++;
  2557. break;
  2558. }
  2559. }
  2560. cgroup_iter_end(cgrp, &it);
  2561. err:
  2562. return ret;
  2563. }
  2564. /*
  2565. * seq_file methods for the tasks/procs files. The seq_file position is the
  2566. * next pid to display; the seq_file iterator is a pointer to the pid
  2567. * in the cgroup->l->list array.
  2568. */
  2569. static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
  2570. {
  2571. /*
  2572. * Initially we receive a position value that corresponds to
  2573. * one more than the last pid shown (or 0 on the first call or
  2574. * after a seek to the start). Use a binary-search to find the
  2575. * next pid to display, if any
  2576. */
  2577. struct cgroup_pidlist *l = s->private;
  2578. int index = 0, pid = *pos;
  2579. int *iter;
  2580. down_read(&l->mutex);
  2581. if (pid) {
  2582. int end = l->length;
  2583. while (index < end) {
  2584. int mid = (index + end) / 2;
  2585. if (l->list[mid] == pid) {
  2586. index = mid;
  2587. break;
  2588. } else if (l->list[mid] <= pid)
  2589. index = mid + 1;
  2590. else
  2591. end = mid;
  2592. }
  2593. }
  2594. /* If we're off the end of the array, we're done */
  2595. if (index >= l->length)
  2596. return NULL;
  2597. /* Update the abstract position to be the actual pid that we found */
  2598. iter = l->list + index;
  2599. *pos = *iter;
  2600. return iter;
  2601. }
  2602. static void cgroup_pidlist_stop(struct seq_file *s, void *v)
  2603. {
  2604. struct cgroup_pidlist *l = s->private;
  2605. up_read(&l->mutex);
  2606. }
  2607. static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
  2608. {
  2609. struct cgroup_pidlist *l = s->private;
  2610. pid_t *p = v;
  2611. pid_t *end = l->list + l->length;
  2612. /*
  2613. * Advance to the next pid in the array. If this goes off the
  2614. * end, we're done
  2615. */
  2616. p++;
  2617. if (p >= end) {
  2618. return NULL;
  2619. } else {
  2620. *pos = *p;
  2621. return p;
  2622. }
  2623. }
  2624. static int cgroup_pidlist_show(struct seq_file *s, void *v)
  2625. {
  2626. return seq_printf(s, "%d\n", *(int *)v);
  2627. }
  2628. /*
  2629. * seq_operations functions for iterating on pidlists through seq_file -
  2630. * independent of whether it's tasks or procs
  2631. */
  2632. static const struct seq_operations cgroup_pidlist_seq_operations = {
  2633. .start = cgroup_pidlist_start,
  2634. .stop = cgroup_pidlist_stop,
  2635. .next = cgroup_pidlist_next,
  2636. .show = cgroup_pidlist_show,
  2637. };
  2638. static void cgroup_release_pid_array(struct cgroup_pidlist *l)
  2639. {
  2640. /*
  2641. * the case where we're the last user of this particular pidlist will
  2642. * have us remove it from the cgroup's list, which entails taking the
  2643. * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
  2644. * pidlist_mutex, we have to take pidlist_mutex first.
  2645. */
  2646. mutex_lock(&l->owner->pidlist_mutex);
  2647. down_write(&l->mutex);
  2648. BUG_ON(!l->use_count);
  2649. if (!--l->use_count) {
  2650. /* we're the last user if refcount is 0; remove and free */
  2651. list_del(&l->links);
  2652. mutex_unlock(&l->owner->pidlist_mutex);
  2653. pidlist_free(l->list);
  2654. put_pid_ns(l->key.ns);
  2655. up_write(&l->mutex);
  2656. kfree(l);
  2657. return;
  2658. }
  2659. mutex_unlock(&l->owner->pidlist_mutex);
  2660. up_write(&l->mutex);
  2661. }
  2662. static int cgroup_pidlist_release(struct inode *inode, struct file *file)
  2663. {
  2664. struct cgroup_pidlist *l;
  2665. if (!(file->f_mode & FMODE_READ))
  2666. return 0;
  2667. /*
  2668. * the seq_file will only be initialized if the file was opened for
  2669. * reading; hence we check if it's not null only in that case.
  2670. */
  2671. l = ((struct seq_file *)file->private_data)->private;
  2672. cgroup_release_pid_array(l);
  2673. return seq_release(inode, file);
  2674. }
  2675. static const struct file_operations cgroup_pidlist_operations = {
  2676. .read = seq_read,
  2677. .llseek = seq_lseek,
  2678. .write = cgroup_file_write,
  2679. .release = cgroup_pidlist_release,
  2680. };
  2681. /*
  2682. * The following functions handle opens on a file that displays a pidlist
  2683. * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
  2684. * in the cgroup.
  2685. */
  2686. /* helper function for the two below it */
  2687. static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
  2688. {
  2689. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  2690. struct cgroup_pidlist *l;
  2691. int retval;
  2692. /* Nothing to do for write-only files */
  2693. if (!(file->f_mode & FMODE_READ))
  2694. return 0;
  2695. /* have the array populated */
  2696. retval = pidlist_array_load(cgrp, type, &l);
  2697. if (retval)
  2698. return retval;
  2699. /* configure file information */
  2700. file->f_op = &cgroup_pidlist_operations;
  2701. retval = seq_open(file, &cgroup_pidlist_seq_operations);
  2702. if (retval) {
  2703. cgroup_release_pid_array(l);
  2704. return retval;
  2705. }
  2706. ((struct seq_file *)file->private_data)->private = l;
  2707. return 0;
  2708. }
  2709. static int cgroup_tasks_open(struct inode *unused, struct file *file)
  2710. {
  2711. return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
  2712. }
  2713. static int cgroup_procs_open(struct inode *unused, struct file *file)
  2714. {
  2715. return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
  2716. }
  2717. static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
  2718. struct cftype *cft)
  2719. {
  2720. return notify_on_release(cgrp);
  2721. }
  2722. static int cgroup_write_notify_on_release(struct cgroup *cgrp,
  2723. struct cftype *cft,
  2724. u64 val)
  2725. {
  2726. clear_bit(CGRP_RELEASABLE, &cgrp->flags);
  2727. if (val)
  2728. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2729. else
  2730. clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2731. return 0;
  2732. }
  2733. /*
  2734. * Unregister event and free resources.
  2735. *
  2736. * Gets called from workqueue.
  2737. */
  2738. static void cgroup_event_remove(struct work_struct *work)
  2739. {
  2740. struct cgroup_event *event = container_of(work, struct cgroup_event,
  2741. remove);
  2742. struct cgroup *cgrp = event->cgrp;
  2743. event->cft->unregister_event(cgrp, event->cft, event->eventfd);
  2744. eventfd_ctx_put(event->eventfd);
  2745. kfree(event);
  2746. dput(cgrp->dentry);
  2747. }
  2748. /*
  2749. * Gets called on POLLHUP on eventfd when user closes it.
  2750. *
  2751. * Called with wqh->lock held and interrupts disabled.
  2752. */
  2753. static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
  2754. int sync, void *key)
  2755. {
  2756. struct cgroup_event *event = container_of(wait,
  2757. struct cgroup_event, wait);
  2758. struct cgroup *cgrp = event->cgrp;
  2759. unsigned long flags = (unsigned long)key;
  2760. if (flags & POLLHUP) {
  2761. __remove_wait_queue(event->wqh, &event->wait);
  2762. spin_lock(&cgrp->event_list_lock);
  2763. list_del(&event->list);
  2764. spin_unlock(&cgrp->event_list_lock);
  2765. /*
  2766. * We are in atomic context, but cgroup_event_remove() may
  2767. * sleep, so we have to call it in workqueue.
  2768. */
  2769. schedule_work(&event->remove);
  2770. }
  2771. return 0;
  2772. }
  2773. static void cgroup_event_ptable_queue_proc(struct file *file,
  2774. wait_queue_head_t *wqh, poll_table *pt)
  2775. {
  2776. struct cgroup_event *event = container_of(pt,
  2777. struct cgroup_event, pt);
  2778. event->wqh = wqh;
  2779. add_wait_queue(wqh, &event->wait);
  2780. }
  2781. /*
  2782. * Parse input and register new cgroup event handler.
  2783. *
  2784. * Input must be in format '<event_fd> <control_fd> <args>'.
  2785. * Interpretation of args is defined by control file implementation.
  2786. */
  2787. static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
  2788. const char *buffer)
  2789. {
  2790. struct cgroup_event *event = NULL;
  2791. unsigned int efd, cfd;
  2792. struct file *efile = NULL;
  2793. struct file *cfile = NULL;
  2794. char *endp;
  2795. int ret;
  2796. efd = simple_strtoul(buffer, &endp, 10);
  2797. if (*endp != ' ')
  2798. return -EINVAL;
  2799. buffer = endp + 1;
  2800. cfd = simple_strtoul(buffer, &endp, 10);
  2801. if ((*endp != ' ') && (*endp != '\0'))
  2802. return -EINVAL;
  2803. buffer = endp + 1;
  2804. event = kzalloc(sizeof(*event), GFP_KERNEL);
  2805. if (!event)
  2806. return -ENOMEM;
  2807. event->cgrp = cgrp;
  2808. INIT_LIST_HEAD(&event->list);
  2809. init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
  2810. init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
  2811. INIT_WORK(&event->remove, cgroup_event_remove);
  2812. efile = eventfd_fget(efd);
  2813. if (IS_ERR(efile)) {
  2814. ret = PTR_ERR(efile);
  2815. goto fail;
  2816. }
  2817. event->eventfd = eventfd_ctx_fileget(efile);
  2818. if (IS_ERR(event->eventfd)) {
  2819. ret = PTR_ERR(event->eventfd);
  2820. goto fail;
  2821. }
  2822. cfile = fget(cfd);
  2823. if (!cfile) {
  2824. ret = -EBADF;
  2825. goto fail;
  2826. }
  2827. /* the process need read permission on control file */
  2828. ret = file_permission(cfile, MAY_READ);
  2829. if (ret < 0)
  2830. goto fail;
  2831. event->cft = __file_cft(cfile);
  2832. if (IS_ERR(event->cft)) {
  2833. ret = PTR_ERR(event->cft);
  2834. goto fail;
  2835. }
  2836. if (!event->cft->register_event || !event->cft->unregister_event) {
  2837. ret = -EINVAL;
  2838. goto fail;
  2839. }
  2840. ret = event->cft->register_event(cgrp, event->cft,
  2841. event->eventfd, buffer);
  2842. if (ret)
  2843. goto fail;
  2844. if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
  2845. event->cft->unregister_event(cgrp, event->cft, event->eventfd);
  2846. ret = 0;
  2847. goto fail;
  2848. }
  2849. /*
  2850. * Events should be removed after rmdir of cgroup directory, but before
  2851. * destroying subsystem state objects. Let's take reference to cgroup
  2852. * directory dentry to do that.
  2853. */
  2854. dget(cgrp->dentry);
  2855. spin_lock(&cgrp->event_list_lock);
  2856. list_add(&event->list, &cgrp->event_list);
  2857. spin_unlock(&cgrp->event_list_lock);
  2858. fput(cfile);
  2859. fput(efile);
  2860. return 0;
  2861. fail:
  2862. if (cfile)
  2863. fput(cfile);
  2864. if (event && event->eventfd && !IS_ERR(event->eventfd))
  2865. eventfd_ctx_put(event->eventfd);
  2866. if (!IS_ERR_OR_NULL(efile))
  2867. fput(efile);
  2868. kfree(event);
  2869. return ret;
  2870. }
  2871. static u64 cgroup_clone_children_read(struct cgroup *cgrp,
  2872. struct cftype *cft)
  2873. {
  2874. return clone_children(cgrp);
  2875. }
  2876. static int cgroup_clone_children_write(struct cgroup *cgrp,
  2877. struct cftype *cft,
  2878. u64 val)
  2879. {
  2880. if (val)
  2881. set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  2882. else
  2883. clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  2884. return 0;
  2885. }
  2886. /*
  2887. * for the common functions, 'private' gives the type of file
  2888. */
  2889. /* for hysterical raisins, we can't put this on the older files */
  2890. #define CGROUP_FILE_GENERIC_PREFIX "cgroup."
  2891. static struct cftype files[] = {
  2892. {
  2893. .name = "tasks",
  2894. .open = cgroup_tasks_open,
  2895. .write_u64 = cgroup_tasks_write,
  2896. .release = cgroup_pidlist_release,
  2897. .mode = S_IRUGO | S_IWUSR,
  2898. },
  2899. {
  2900. .name = CGROUP_FILE_GENERIC_PREFIX "procs",
  2901. .open = cgroup_procs_open,
  2902. /* .write_u64 = cgroup_procs_write, TODO */
  2903. .release = cgroup_pidlist_release,
  2904. .mode = S_IRUGO,
  2905. },
  2906. {
  2907. .name = "notify_on_release",
  2908. .read_u64 = cgroup_read_notify_on_release,
  2909. .write_u64 = cgroup_write_notify_on_release,
  2910. },
  2911. {
  2912. .name = CGROUP_FILE_GENERIC_PREFIX "event_control",
  2913. .write_string = cgroup_write_event_control,
  2914. .mode = S_IWUGO,
  2915. },
  2916. {
  2917. .name = "cgroup.clone_children",
  2918. .read_u64 = cgroup_clone_children_read,
  2919. .write_u64 = cgroup_clone_children_write,
  2920. },
  2921. };
  2922. static struct cftype cft_release_agent = {
  2923. .name = "release_agent",
  2924. .read_seq_string = cgroup_release_agent_show,
  2925. .write_string = cgroup_release_agent_write,
  2926. .max_write_len = PATH_MAX,
  2927. };
  2928. static int cgroup_populate_dir(struct cgroup *cgrp)
  2929. {
  2930. int err;
  2931. struct cgroup_subsys *ss;
  2932. /* First clear out any existing files */
  2933. cgroup_clear_directory(cgrp->dentry);
  2934. err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
  2935. if (err < 0)
  2936. return err;
  2937. if (cgrp == cgrp->top_cgroup) {
  2938. if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
  2939. return err;
  2940. }
  2941. for_each_subsys(cgrp->root, ss) {
  2942. if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
  2943. return err;
  2944. }
  2945. /* This cgroup is ready now */
  2946. for_each_subsys(cgrp->root, ss) {
  2947. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  2948. /*
  2949. * Update id->css pointer and make this css visible from
  2950. * CSS ID functions. This pointer will be dereferened
  2951. * from RCU-read-side without locks.
  2952. */
  2953. if (css->id)
  2954. rcu_assign_pointer(css->id->css, css);
  2955. }
  2956. return 0;
  2957. }
  2958. static void init_cgroup_css(struct cgroup_subsys_state *css,
  2959. struct cgroup_subsys *ss,
  2960. struct cgroup *cgrp)
  2961. {
  2962. css->cgroup = cgrp;
  2963. atomic_set(&css->refcnt, 1);
  2964. css->flags = 0;
  2965. css->id = NULL;
  2966. if (cgrp == dummytop)
  2967. set_bit(CSS_ROOT, &css->flags);
  2968. BUG_ON(cgrp->subsys[ss->subsys_id]);
  2969. cgrp->subsys[ss->subsys_id] = css;
  2970. }
  2971. static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
  2972. {
  2973. /* We need to take each hierarchy_mutex in a consistent order */
  2974. int i;
  2975. /*
  2976. * No worry about a race with rebind_subsystems that might mess up the
  2977. * locking order, since both parties are under cgroup_mutex.
  2978. */
  2979. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  2980. struct cgroup_subsys *ss = subsys[i];
  2981. if (ss == NULL)
  2982. continue;
  2983. if (ss->root == root)
  2984. mutex_lock(&ss->hierarchy_mutex);
  2985. }
  2986. }
  2987. static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
  2988. {
  2989. int i;
  2990. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  2991. struct cgroup_subsys *ss = subsys[i];
  2992. if (ss == NULL)
  2993. continue;
  2994. if (ss->root == root)
  2995. mutex_unlock(&ss->hierarchy_mutex);
  2996. }
  2997. }
  2998. /*
  2999. * cgroup_create - create a cgroup
  3000. * @parent: cgroup that will be parent of the new cgroup
  3001. * @dentry: dentry of the new cgroup
  3002. * @mode: mode to set on new inode
  3003. *
  3004. * Must be called with the mutex on the parent inode held
  3005. */
  3006. static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
  3007. mode_t mode)
  3008. {
  3009. struct cgroup *cgrp;
  3010. struct cgroupfs_root *root = parent->root;
  3011. int err = 0;
  3012. struct cgroup_subsys *ss;
  3013. struct super_block *sb = root->sb;
  3014. cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
  3015. if (!cgrp)
  3016. return -ENOMEM;
  3017. /* Grab a reference on the superblock so the hierarchy doesn't
  3018. * get deleted on unmount if there are child cgroups. This
  3019. * can be done outside cgroup_mutex, since the sb can't
  3020. * disappear while someone has an open control file on the
  3021. * fs */
  3022. atomic_inc(&sb->s_active);
  3023. mutex_lock(&cgroup_mutex);
  3024. init_cgroup_housekeeping(cgrp);
  3025. cgrp->parent = parent;
  3026. cgrp->root = parent->root;
  3027. cgrp->top_cgroup = parent->top_cgroup;
  3028. if (notify_on_release(parent))
  3029. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  3030. if (clone_children(parent))
  3031. set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  3032. for_each_subsys(root, ss) {
  3033. struct cgroup_subsys_state *css = ss->create(ss, cgrp);
  3034. if (IS_ERR(css)) {
  3035. err = PTR_ERR(css);
  3036. goto err_destroy;
  3037. }
  3038. init_cgroup_css(css, ss, cgrp);
  3039. if (ss->use_id) {
  3040. err = alloc_css_id(ss, parent, cgrp);
  3041. if (err)
  3042. goto err_destroy;
  3043. }
  3044. /* At error, ->destroy() callback has to free assigned ID. */
  3045. if (clone_children(parent) && ss->post_clone)
  3046. ss->post_clone(ss, cgrp);
  3047. }
  3048. cgroup_lock_hierarchy(root);
  3049. list_add(&cgrp->sibling, &cgrp->parent->children);
  3050. cgroup_unlock_hierarchy(root);
  3051. root->number_of_cgroups++;
  3052. err = cgroup_create_dir(cgrp, dentry, mode);
  3053. if (err < 0)
  3054. goto err_remove;
  3055. /* The cgroup directory was pre-locked for us */
  3056. BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
  3057. err = cgroup_populate_dir(cgrp);
  3058. /* If err < 0, we have a half-filled directory - oh well ;) */
  3059. mutex_unlock(&cgroup_mutex);
  3060. mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
  3061. return 0;
  3062. err_remove:
  3063. cgroup_lock_hierarchy(root);
  3064. list_del(&cgrp->sibling);
  3065. cgroup_unlock_hierarchy(root);
  3066. root->number_of_cgroups--;
  3067. err_destroy:
  3068. for_each_subsys(root, ss) {
  3069. if (cgrp->subsys[ss->subsys_id])
  3070. ss->destroy(ss, cgrp);
  3071. }
  3072. mutex_unlock(&cgroup_mutex);
  3073. /* Release the reference count that we took on the superblock */
  3074. deactivate_super(sb);
  3075. kfree(cgrp);
  3076. return err;
  3077. }
  3078. static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  3079. {
  3080. struct cgroup *c_parent = dentry->d_parent->d_fsdata;
  3081. /* the vfs holds inode->i_mutex already */
  3082. return cgroup_create(c_parent, dentry, mode | S_IFDIR);
  3083. }
  3084. static int cgroup_has_css_refs(struct cgroup *cgrp)
  3085. {
  3086. /* Check the reference count on each subsystem. Since we
  3087. * already established that there are no tasks in the
  3088. * cgroup, if the css refcount is also 1, then there should
  3089. * be no outstanding references, so the subsystem is safe to
  3090. * destroy. We scan across all subsystems rather than using
  3091. * the per-hierarchy linked list of mounted subsystems since
  3092. * we can be called via check_for_release() with no
  3093. * synchronization other than RCU, and the subsystem linked
  3094. * list isn't RCU-safe */
  3095. int i;
  3096. /*
  3097. * We won't need to lock the subsys array, because the subsystems
  3098. * we're concerned about aren't going anywhere since our cgroup root
  3099. * has a reference on them.
  3100. */
  3101. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  3102. struct cgroup_subsys *ss = subsys[i];
  3103. struct cgroup_subsys_state *css;
  3104. /* Skip subsystems not present or not in this hierarchy */
  3105. if (ss == NULL || ss->root != cgrp->root)
  3106. continue;
  3107. css = cgrp->subsys[ss->subsys_id];
  3108. /* When called from check_for_release() it's possible
  3109. * that by this point the cgroup has been removed
  3110. * and the css deleted. But a false-positive doesn't
  3111. * matter, since it can only happen if the cgroup
  3112. * has been deleted and hence no longer needs the
  3113. * release agent to be called anyway. */
  3114. if (css && (atomic_read(&css->refcnt) > 1))
  3115. return 1;
  3116. }
  3117. return 0;
  3118. }
  3119. /*
  3120. * Atomically mark all (or else none) of the cgroup's CSS objects as
  3121. * CSS_REMOVED. Return true on success, or false if the cgroup has
  3122. * busy subsystems. Call with cgroup_mutex held
  3123. */
  3124. static int cgroup_clear_css_refs(struct cgroup *cgrp)
  3125. {
  3126. struct cgroup_subsys *ss;
  3127. unsigned long flags;
  3128. bool failed = false;
  3129. local_irq_save(flags);
  3130. for_each_subsys(cgrp->root, ss) {
  3131. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  3132. int refcnt;
  3133. while (1) {
  3134. /* We can only remove a CSS with a refcnt==1 */
  3135. refcnt = atomic_read(&css->refcnt);
  3136. if (refcnt > 1) {
  3137. failed = true;
  3138. goto done;
  3139. }
  3140. BUG_ON(!refcnt);
  3141. /*
  3142. * Drop the refcnt to 0 while we check other
  3143. * subsystems. This will cause any racing
  3144. * css_tryget() to spin until we set the
  3145. * CSS_REMOVED bits or abort
  3146. */
  3147. if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
  3148. break;
  3149. cpu_relax();
  3150. }
  3151. }
  3152. done:
  3153. for_each_subsys(cgrp->root, ss) {
  3154. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  3155. if (failed) {
  3156. /*
  3157. * Restore old refcnt if we previously managed
  3158. * to clear it from 1 to 0
  3159. */
  3160. if (!atomic_read(&css->refcnt))
  3161. atomic_set(&css->refcnt, 1);
  3162. } else {
  3163. /* Commit the fact that the CSS is removed */
  3164. set_bit(CSS_REMOVED, &css->flags);
  3165. }
  3166. }
  3167. local_irq_restore(flags);
  3168. return !failed;
  3169. }
  3170. static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
  3171. {
  3172. struct cgroup *cgrp = dentry->d_fsdata;
  3173. struct dentry *d;
  3174. struct cgroup *parent;
  3175. DEFINE_WAIT(wait);
  3176. struct cgroup_event *event, *tmp;
  3177. int ret;
  3178. /* the vfs holds both inode->i_mutex already */
  3179. again:
  3180. mutex_lock(&cgroup_mutex);
  3181. if (atomic_read(&cgrp->count) != 0) {
  3182. mutex_unlock(&cgroup_mutex);
  3183. return -EBUSY;
  3184. }
  3185. if (!list_empty(&cgrp->children)) {
  3186. mutex_unlock(&cgroup_mutex);
  3187. return -EBUSY;
  3188. }
  3189. mutex_unlock(&cgroup_mutex);
  3190. /*
  3191. * In general, subsystem has no css->refcnt after pre_destroy(). But
  3192. * in racy cases, subsystem may have to get css->refcnt after
  3193. * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
  3194. * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
  3195. * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
  3196. * and subsystem's reference count handling. Please see css_get/put
  3197. * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
  3198. */
  3199. set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3200. /*
  3201. * Call pre_destroy handlers of subsys. Notify subsystems
  3202. * that rmdir() request comes.
  3203. */
  3204. ret = cgroup_call_pre_destroy(cgrp);
  3205. if (ret) {
  3206. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3207. return ret;
  3208. }
  3209. mutex_lock(&cgroup_mutex);
  3210. parent = cgrp->parent;
  3211. if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
  3212. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3213. mutex_unlock(&cgroup_mutex);
  3214. return -EBUSY;
  3215. }
  3216. prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
  3217. if (!cgroup_clear_css_refs(cgrp)) {
  3218. mutex_unlock(&cgroup_mutex);
  3219. /*
  3220. * Because someone may call cgroup_wakeup_rmdir_waiter() before
  3221. * prepare_to_wait(), we need to check this flag.
  3222. */
  3223. if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
  3224. schedule();
  3225. finish_wait(&cgroup_rmdir_waitq, &wait);
  3226. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3227. if (signal_pending(current))
  3228. return -EINTR;
  3229. goto again;
  3230. }
  3231. /* NO css_tryget() can success after here. */
  3232. finish_wait(&cgroup_rmdir_waitq, &wait);
  3233. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3234. spin_lock(&release_list_lock);
  3235. set_bit(CGRP_REMOVED, &cgrp->flags);
  3236. if (!list_empty(&cgrp->release_list))
  3237. list_del(&cgrp->release_list);
  3238. spin_unlock(&release_list_lock);
  3239. cgroup_lock_hierarchy(cgrp->root);
  3240. /* delete this cgroup from parent->children */
  3241. list_del(&cgrp->sibling);
  3242. cgroup_unlock_hierarchy(cgrp->root);
  3243. d = dget(cgrp->dentry);
  3244. cgroup_d_remove_dir(d);
  3245. dput(d);
  3246. set_bit(CGRP_RELEASABLE, &parent->flags);
  3247. check_for_release(parent);
  3248. /*
  3249. * Unregister events and notify userspace.
  3250. * Notify userspace about cgroup removing only after rmdir of cgroup
  3251. * directory to avoid race between userspace and kernelspace
  3252. */
  3253. spin_lock(&cgrp->event_list_lock);
  3254. list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
  3255. list_del(&event->list);
  3256. remove_wait_queue(event->wqh, &event->wait);
  3257. eventfd_signal(event->eventfd, 1);
  3258. schedule_work(&event->remove);
  3259. }
  3260. spin_unlock(&cgrp->event_list_lock);
  3261. mutex_unlock(&cgroup_mutex);
  3262. return 0;
  3263. }
  3264. static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
  3265. {
  3266. struct cgroup_subsys_state *css;
  3267. printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
  3268. /* Create the top cgroup state for this subsystem */
  3269. list_add(&ss->sibling, &rootnode.subsys_list);
  3270. ss->root = &rootnode;
  3271. css = ss->create(ss, dummytop);
  3272. /* We don't handle early failures gracefully */
  3273. BUG_ON(IS_ERR(css));
  3274. init_cgroup_css(css, ss, dummytop);
  3275. /* Update the init_css_set to contain a subsys
  3276. * pointer to this state - since the subsystem is
  3277. * newly registered, all tasks and hence the
  3278. * init_css_set is in the subsystem's top cgroup. */
  3279. init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
  3280. need_forkexit_callback |= ss->fork || ss->exit;
  3281. /* At system boot, before all subsystems have been
  3282. * registered, no tasks have been forked, so we don't
  3283. * need to invoke fork callbacks here. */
  3284. BUG_ON(!list_empty(&init_task.tasks));
  3285. mutex_init(&ss->hierarchy_mutex);
  3286. lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
  3287. ss->active = 1;
  3288. /* this function shouldn't be used with modular subsystems, since they
  3289. * need to register a subsys_id, among other things */
  3290. BUG_ON(ss->module);
  3291. }
  3292. /**
  3293. * cgroup_load_subsys: load and register a modular subsystem at runtime
  3294. * @ss: the subsystem to load
  3295. *
  3296. * This function should be called in a modular subsystem's initcall. If the
  3297. * subsystem is built as a module, it will be assigned a new subsys_id and set
  3298. * up for use. If the subsystem is built-in anyway, work is delegated to the
  3299. * simpler cgroup_init_subsys.
  3300. */
  3301. int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
  3302. {
  3303. int i;
  3304. struct cgroup_subsys_state *css;
  3305. /* check name and function validity */
  3306. if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
  3307. ss->create == NULL || ss->destroy == NULL)
  3308. return -EINVAL;
  3309. /*
  3310. * we don't support callbacks in modular subsystems. this check is
  3311. * before the ss->module check for consistency; a subsystem that could
  3312. * be a module should still have no callbacks even if the user isn't
  3313. * compiling it as one.
  3314. */
  3315. if (ss->fork || ss->exit)
  3316. return -EINVAL;
  3317. /*
  3318. * an optionally modular subsystem is built-in: we want to do nothing,
  3319. * since cgroup_init_subsys will have already taken care of it.
  3320. */
  3321. if (ss->module == NULL) {
  3322. /* a few sanity checks */
  3323. BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
  3324. BUG_ON(subsys[ss->subsys_id] != ss);
  3325. return 0;
  3326. }
  3327. /*
  3328. * need to register a subsys id before anything else - for example,
  3329. * init_cgroup_css needs it.
  3330. */
  3331. mutex_lock(&cgroup_mutex);
  3332. /* find the first empty slot in the array */
  3333. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  3334. if (subsys[i] == NULL)
  3335. break;
  3336. }
  3337. if (i == CGROUP_SUBSYS_COUNT) {
  3338. /* maximum number of subsystems already registered! */
  3339. mutex_unlock(&cgroup_mutex);
  3340. return -EBUSY;
  3341. }
  3342. /* assign ourselves the subsys_id */
  3343. ss->subsys_id = i;
  3344. subsys[i] = ss;
  3345. /*
  3346. * no ss->create seems to need anything important in the ss struct, so
  3347. * this can happen first (i.e. before the rootnode attachment).
  3348. */
  3349. css = ss->create(ss, dummytop);
  3350. if (IS_ERR(css)) {
  3351. /* failure case - need to deassign the subsys[] slot. */
  3352. subsys[i] = NULL;
  3353. mutex_unlock(&cgroup_mutex);
  3354. return PTR_ERR(css);
  3355. }
  3356. list_add(&ss->sibling, &rootnode.subsys_list);
  3357. ss->root = &rootnode;
  3358. /* our new subsystem will be attached to the dummy hierarchy. */
  3359. init_cgroup_css(css, ss, dummytop);
  3360. /* init_idr must be after init_cgroup_css because it sets css->id. */
  3361. if (ss->use_id) {
  3362. int ret = cgroup_init_idr(ss, css);
  3363. if (ret) {
  3364. dummytop->subsys[ss->subsys_id] = NULL;
  3365. ss->destroy(ss, dummytop);
  3366. subsys[i] = NULL;
  3367. mutex_unlock(&cgroup_mutex);
  3368. return ret;
  3369. }
  3370. }
  3371. /*
  3372. * Now we need to entangle the css into the existing css_sets. unlike
  3373. * in cgroup_init_subsys, there are now multiple css_sets, so each one
  3374. * will need a new pointer to it; done by iterating the css_set_table.
  3375. * furthermore, modifying the existing css_sets will corrupt the hash
  3376. * table state, so each changed css_set will need its hash recomputed.
  3377. * this is all done under the css_set_lock.
  3378. */
  3379. write_lock(&css_set_lock);
  3380. for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
  3381. struct css_set *cg;
  3382. struct hlist_node *node, *tmp;
  3383. struct hlist_head *bucket = &css_set_table[i], *new_bucket;
  3384. hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) {
  3385. /* skip entries that we already rehashed */
  3386. if (cg->subsys[ss->subsys_id])
  3387. continue;
  3388. /* remove existing entry */
  3389. hlist_del(&cg->hlist);
  3390. /* set new value */
  3391. cg->subsys[ss->subsys_id] = css;
  3392. /* recompute hash and restore entry */
  3393. new_bucket = css_set_hash(cg->subsys);
  3394. hlist_add_head(&cg->hlist, new_bucket);
  3395. }
  3396. }
  3397. write_unlock(&css_set_lock);
  3398. mutex_init(&ss->hierarchy_mutex);
  3399. lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
  3400. ss->active = 1;
  3401. /* success! */
  3402. mutex_unlock(&cgroup_mutex);
  3403. return 0;
  3404. }
  3405. EXPORT_SYMBOL_GPL(cgroup_load_subsys);
  3406. /**
  3407. * cgroup_unload_subsys: unload a modular subsystem
  3408. * @ss: the subsystem to unload
  3409. *
  3410. * This function should be called in a modular subsystem's exitcall. When this
  3411. * function is invoked, the refcount on the subsystem's module will be 0, so
  3412. * the subsystem will not be attached to any hierarchy.
  3413. */
  3414. void cgroup_unload_subsys(struct cgroup_subsys *ss)
  3415. {
  3416. struct cg_cgroup_link *link;
  3417. struct hlist_head *hhead;
  3418. BUG_ON(ss->module == NULL);
  3419. /*
  3420. * we shouldn't be called if the subsystem is in use, and the use of
  3421. * try_module_get in parse_cgroupfs_options should ensure that it
  3422. * doesn't start being used while we're killing it off.
  3423. */
  3424. BUG_ON(ss->root != &rootnode);
  3425. mutex_lock(&cgroup_mutex);
  3426. /* deassign the subsys_id */
  3427. BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
  3428. subsys[ss->subsys_id] = NULL;
  3429. /* remove subsystem from rootnode's list of subsystems */
  3430. list_del(&ss->sibling);
  3431. /*
  3432. * disentangle the css from all css_sets attached to the dummytop. as
  3433. * in loading, we need to pay our respects to the hashtable gods.
  3434. */
  3435. write_lock(&css_set_lock);
  3436. list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
  3437. struct css_set *cg = link->cg;
  3438. hlist_del(&cg->hlist);
  3439. BUG_ON(!cg->subsys[ss->subsys_id]);
  3440. cg->subsys[ss->subsys_id] = NULL;
  3441. hhead = css_set_hash(cg->subsys);
  3442. hlist_add_head(&cg->hlist, hhead);
  3443. }
  3444. write_unlock(&css_set_lock);
  3445. /*
  3446. * remove subsystem's css from the dummytop and free it - need to free
  3447. * before marking as null because ss->destroy needs the cgrp->subsys
  3448. * pointer to find their state. note that this also takes care of
  3449. * freeing the css_id.
  3450. */
  3451. ss->destroy(ss, dummytop);
  3452. dummytop->subsys[ss->subsys_id] = NULL;
  3453. mutex_unlock(&cgroup_mutex);
  3454. }
  3455. EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
  3456. /**
  3457. * cgroup_init_early - cgroup initialization at system boot
  3458. *
  3459. * Initialize cgroups at system boot, and initialize any
  3460. * subsystems that request early init.
  3461. */
  3462. int __init cgroup_init_early(void)
  3463. {
  3464. int i;
  3465. atomic_set(&init_css_set.refcount, 1);
  3466. INIT_LIST_HEAD(&init_css_set.cg_links);
  3467. INIT_LIST_HEAD(&init_css_set.tasks);
  3468. INIT_HLIST_NODE(&init_css_set.hlist);
  3469. css_set_count = 1;
  3470. init_cgroup_root(&rootnode);
  3471. root_count = 1;
  3472. init_task.cgroups = &init_css_set;
  3473. init_css_set_link.cg = &init_css_set;
  3474. init_css_set_link.cgrp = dummytop;
  3475. list_add(&init_css_set_link.cgrp_link_list,
  3476. &rootnode.top_cgroup.css_sets);
  3477. list_add(&init_css_set_link.cg_link_list,
  3478. &init_css_set.cg_links);
  3479. for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
  3480. INIT_HLIST_HEAD(&css_set_table[i]);
  3481. /* at bootup time, we don't worry about modular subsystems */
  3482. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3483. struct cgroup_subsys *ss = subsys[i];
  3484. BUG_ON(!ss->name);
  3485. BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
  3486. BUG_ON(!ss->create);
  3487. BUG_ON(!ss->destroy);
  3488. if (ss->subsys_id != i) {
  3489. printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
  3490. ss->name, ss->subsys_id);
  3491. BUG();
  3492. }
  3493. if (ss->early_init)
  3494. cgroup_init_subsys(ss);
  3495. }
  3496. return 0;
  3497. }
  3498. /**
  3499. * cgroup_init - cgroup initialization
  3500. *
  3501. * Register cgroup filesystem and /proc file, and initialize
  3502. * any subsystems that didn't request early init.
  3503. */
  3504. int __init cgroup_init(void)
  3505. {
  3506. int err;
  3507. int i;
  3508. struct hlist_head *hhead;
  3509. err = bdi_init(&cgroup_backing_dev_info);
  3510. if (err)
  3511. return err;
  3512. /* at bootup time, we don't worry about modular subsystems */
  3513. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3514. struct cgroup_subsys *ss = subsys[i];
  3515. if (!ss->early_init)
  3516. cgroup_init_subsys(ss);
  3517. if (ss->use_id)
  3518. cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
  3519. }
  3520. /* Add init_css_set to the hash table */
  3521. hhead = css_set_hash(init_css_set.subsys);
  3522. hlist_add_head(&init_css_set.hlist, hhead);
  3523. BUG_ON(!init_root_id(&rootnode));
  3524. cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
  3525. if (!cgroup_kobj) {
  3526. err = -ENOMEM;
  3527. goto out;
  3528. }
  3529. err = register_filesystem(&cgroup_fs_type);
  3530. if (err < 0) {
  3531. kobject_put(cgroup_kobj);
  3532. goto out;
  3533. }
  3534. proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
  3535. out:
  3536. if (err)
  3537. bdi_destroy(&cgroup_backing_dev_info);
  3538. return err;
  3539. }
  3540. /*
  3541. * proc_cgroup_show()
  3542. * - Print task's cgroup paths into seq_file, one line for each hierarchy
  3543. * - Used for /proc/<pid>/cgroup.
  3544. * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
  3545. * doesn't really matter if tsk->cgroup changes after we read it,
  3546. * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
  3547. * anyway. No need to check that tsk->cgroup != NULL, thanks to
  3548. * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
  3549. * cgroup to top_cgroup.
  3550. */
  3551. /* TODO: Use a proper seq_file iterator */
  3552. static int proc_cgroup_show(struct seq_file *m, void *v)
  3553. {
  3554. struct pid *pid;
  3555. struct task_struct *tsk;
  3556. char *buf;
  3557. int retval;
  3558. struct cgroupfs_root *root;
  3559. retval = -ENOMEM;
  3560. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3561. if (!buf)
  3562. goto out;
  3563. retval = -ESRCH;
  3564. pid = m->private;
  3565. tsk = get_pid_task(pid, PIDTYPE_PID);
  3566. if (!tsk)
  3567. goto out_free;
  3568. retval = 0;
  3569. mutex_lock(&cgroup_mutex);
  3570. for_each_active_root(root) {
  3571. struct cgroup_subsys *ss;
  3572. struct cgroup *cgrp;
  3573. int count = 0;
  3574. seq_printf(m, "%d:", root->hierarchy_id);
  3575. for_each_subsys(root, ss)
  3576. seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
  3577. if (strlen(root->name))
  3578. seq_printf(m, "%sname=%s", count ? "," : "",
  3579. root->name);
  3580. seq_putc(m, ':');
  3581. cgrp = task_cgroup_from_root(tsk, root);
  3582. retval = cgroup_path(cgrp, buf, PAGE_SIZE);
  3583. if (retval < 0)
  3584. goto out_unlock;
  3585. seq_puts(m, buf);
  3586. seq_putc(m, '\n');
  3587. }
  3588. out_unlock:
  3589. mutex_unlock(&cgroup_mutex);
  3590. put_task_struct(tsk);
  3591. out_free:
  3592. kfree(buf);
  3593. out:
  3594. return retval;
  3595. }
  3596. static int cgroup_open(struct inode *inode, struct file *file)
  3597. {
  3598. struct pid *pid = PROC_I(inode)->pid;
  3599. return single_open(file, proc_cgroup_show, pid);
  3600. }
  3601. const struct file_operations proc_cgroup_operations = {
  3602. .open = cgroup_open,
  3603. .read = seq_read,
  3604. .llseek = seq_lseek,
  3605. .release = single_release,
  3606. };
  3607. /* Display information about each subsystem and each hierarchy */
  3608. static int proc_cgroupstats_show(struct seq_file *m, void *v)
  3609. {
  3610. int i;
  3611. seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
  3612. /*
  3613. * ideally we don't want subsystems moving around while we do this.
  3614. * cgroup_mutex is also necessary to guarantee an atomic snapshot of
  3615. * subsys/hierarchy state.
  3616. */
  3617. mutex_lock(&cgroup_mutex);
  3618. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  3619. struct cgroup_subsys *ss = subsys[i];
  3620. if (ss == NULL)
  3621. continue;
  3622. seq_printf(m, "%s\t%d\t%d\t%d\n",
  3623. ss->name, ss->root->hierarchy_id,
  3624. ss->root->number_of_cgroups, !ss->disabled);
  3625. }
  3626. mutex_unlock(&cgroup_mutex);
  3627. return 0;
  3628. }
  3629. static int cgroupstats_open(struct inode *inode, struct file *file)
  3630. {
  3631. return single_open(file, proc_cgroupstats_show, NULL);
  3632. }
  3633. static const struct file_operations proc_cgroupstats_operations = {
  3634. .open = cgroupstats_open,
  3635. .read = seq_read,
  3636. .llseek = seq_lseek,
  3637. .release = single_release,
  3638. };
  3639. /**
  3640. * cgroup_fork - attach newly forked task to its parents cgroup.
  3641. * @child: pointer to task_struct of forking parent process.
  3642. *
  3643. * Description: A task inherits its parent's cgroup at fork().
  3644. *
  3645. * A pointer to the shared css_set was automatically copied in
  3646. * fork.c by dup_task_struct(). However, we ignore that copy, since
  3647. * it was not made under the protection of RCU or cgroup_mutex, so
  3648. * might no longer be a valid cgroup pointer. cgroup_attach_task() might
  3649. * have already changed current->cgroups, allowing the previously
  3650. * referenced cgroup group to be removed and freed.
  3651. *
  3652. * At the point that cgroup_fork() is called, 'current' is the parent
  3653. * task, and the passed argument 'child' points to the child task.
  3654. */
  3655. void cgroup_fork(struct task_struct *child)
  3656. {
  3657. task_lock(current);
  3658. child->cgroups = current->cgroups;
  3659. get_css_set(child->cgroups);
  3660. task_unlock(current);
  3661. INIT_LIST_HEAD(&child->cg_list);
  3662. }
  3663. /**
  3664. * cgroup_fork_callbacks - run fork callbacks
  3665. * @child: the new task
  3666. *
  3667. * Called on a new task very soon before adding it to the
  3668. * tasklist. No need to take any locks since no-one can
  3669. * be operating on this task.
  3670. */
  3671. void cgroup_fork_callbacks(struct task_struct *child)
  3672. {
  3673. if (need_forkexit_callback) {
  3674. int i;
  3675. /*
  3676. * forkexit callbacks are only supported for builtin
  3677. * subsystems, and the builtin section of the subsys array is
  3678. * immutable, so we don't need to lock the subsys array here.
  3679. */
  3680. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3681. struct cgroup_subsys *ss = subsys[i];
  3682. if (ss->fork)
  3683. ss->fork(ss, child);
  3684. }
  3685. }
  3686. }
  3687. /**
  3688. * cgroup_post_fork - called on a new task after adding it to the task list
  3689. * @child: the task in question
  3690. *
  3691. * Adds the task to the list running through its css_set if necessary.
  3692. * Has to be after the task is visible on the task list in case we race
  3693. * with the first call to cgroup_iter_start() - to guarantee that the
  3694. * new task ends up on its list.
  3695. */
  3696. void cgroup_post_fork(struct task_struct *child)
  3697. {
  3698. if (use_task_css_set_links) {
  3699. write_lock(&css_set_lock);
  3700. task_lock(child);
  3701. if (list_empty(&child->cg_list))
  3702. list_add(&child->cg_list, &child->cgroups->tasks);
  3703. task_unlock(child);
  3704. write_unlock(&css_set_lock);
  3705. }
  3706. }
  3707. /**
  3708. * cgroup_exit - detach cgroup from exiting task
  3709. * @tsk: pointer to task_struct of exiting process
  3710. * @run_callback: run exit callbacks?
  3711. *
  3712. * Description: Detach cgroup from @tsk and release it.
  3713. *
  3714. * Note that cgroups marked notify_on_release force every task in
  3715. * them to take the global cgroup_mutex mutex when exiting.
  3716. * This could impact scaling on very large systems. Be reluctant to
  3717. * use notify_on_release cgroups where very high task exit scaling
  3718. * is required on large systems.
  3719. *
  3720. * the_top_cgroup_hack:
  3721. *
  3722. * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
  3723. *
  3724. * We call cgroup_exit() while the task is still competent to
  3725. * handle notify_on_release(), then leave the task attached to the
  3726. * root cgroup in each hierarchy for the remainder of its exit.
  3727. *
  3728. * To do this properly, we would increment the reference count on
  3729. * top_cgroup, and near the very end of the kernel/exit.c do_exit()
  3730. * code we would add a second cgroup function call, to drop that
  3731. * reference. This would just create an unnecessary hot spot on
  3732. * the top_cgroup reference count, to no avail.
  3733. *
  3734. * Normally, holding a reference to a cgroup without bumping its
  3735. * count is unsafe. The cgroup could go away, or someone could
  3736. * attach us to a different cgroup, decrementing the count on
  3737. * the first cgroup that we never incremented. But in this case,
  3738. * top_cgroup isn't going away, and either task has PF_EXITING set,
  3739. * which wards off any cgroup_attach_task() attempts, or task is a failed
  3740. * fork, never visible to cgroup_attach_task.
  3741. */
  3742. void cgroup_exit(struct task_struct *tsk, int run_callbacks)
  3743. {
  3744. int i;
  3745. struct css_set *cg;
  3746. if (run_callbacks && need_forkexit_callback) {
  3747. /*
  3748. * modular subsystems can't use callbacks, so no need to lock
  3749. * the subsys array
  3750. */
  3751. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3752. struct cgroup_subsys *ss = subsys[i];
  3753. if (ss->exit)
  3754. ss->exit(ss, tsk);
  3755. }
  3756. }
  3757. /*
  3758. * Unlink from the css_set task list if necessary.
  3759. * Optimistically check cg_list before taking
  3760. * css_set_lock
  3761. */
  3762. if (!list_empty(&tsk->cg_list)) {
  3763. write_lock(&css_set_lock);
  3764. if (!list_empty(&tsk->cg_list))
  3765. list_del(&tsk->cg_list);
  3766. write_unlock(&css_set_lock);
  3767. }
  3768. /* Reassign the task to the init_css_set. */
  3769. task_lock(tsk);
  3770. cg = tsk->cgroups;
  3771. tsk->cgroups = &init_css_set;
  3772. task_unlock(tsk);
  3773. if (cg)
  3774. put_css_set_taskexit(cg);
  3775. }
  3776. /**
  3777. * cgroup_clone - clone the cgroup the given subsystem is attached to
  3778. * @tsk: the task to be moved
  3779. * @subsys: the given subsystem
  3780. * @nodename: the name for the new cgroup
  3781. *
  3782. * Duplicate the current cgroup in the hierarchy that the given
  3783. * subsystem is attached to, and move this task into the new
  3784. * child.
  3785. */
  3786. int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
  3787. char *nodename)
  3788. {
  3789. struct dentry *dentry;
  3790. int ret = 0;
  3791. struct cgroup *parent, *child;
  3792. struct inode *inode;
  3793. struct css_set *cg;
  3794. struct cgroupfs_root *root;
  3795. struct cgroup_subsys *ss;
  3796. /* We shouldn't be called by an unregistered subsystem */
  3797. BUG_ON(!subsys->active);
  3798. /* First figure out what hierarchy and cgroup we're dealing
  3799. * with, and pin them so we can drop cgroup_mutex */
  3800. mutex_lock(&cgroup_mutex);
  3801. again:
  3802. root = subsys->root;
  3803. if (root == &rootnode) {
  3804. mutex_unlock(&cgroup_mutex);
  3805. return 0;
  3806. }
  3807. /* Pin the hierarchy */
  3808. if (!atomic_inc_not_zero(&root->sb->s_active)) {
  3809. /* We race with the final deactivate_super() */
  3810. mutex_unlock(&cgroup_mutex);
  3811. return 0;
  3812. }
  3813. /* Keep the cgroup alive */
  3814. task_lock(tsk);
  3815. parent = task_cgroup(tsk, subsys->subsys_id);
  3816. cg = tsk->cgroups;
  3817. get_css_set(cg);
  3818. task_unlock(tsk);
  3819. mutex_unlock(&cgroup_mutex);
  3820. /* Now do the VFS work to create a cgroup */
  3821. inode = parent->dentry->d_inode;
  3822. /* Hold the parent directory mutex across this operation to
  3823. * stop anyone else deleting the new cgroup */
  3824. mutex_lock(&inode->i_mutex);
  3825. dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
  3826. if (IS_ERR(dentry)) {
  3827. printk(KERN_INFO
  3828. "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
  3829. PTR_ERR(dentry));
  3830. ret = PTR_ERR(dentry);
  3831. goto out_release;
  3832. }
  3833. /* Create the cgroup directory, which also creates the cgroup */
  3834. ret = vfs_mkdir(inode, dentry, 0755);
  3835. child = __d_cgrp(dentry);
  3836. dput(dentry);
  3837. if (ret) {
  3838. printk(KERN_INFO
  3839. "Failed to create cgroup %s: %d\n", nodename,
  3840. ret);
  3841. goto out_release;
  3842. }
  3843. /* The cgroup now exists. Retake cgroup_mutex and check
  3844. * that we're still in the same state that we thought we
  3845. * were. */
  3846. mutex_lock(&cgroup_mutex);
  3847. if ((root != subsys->root) ||
  3848. (parent != task_cgroup(tsk, subsys->subsys_id))) {
  3849. /* Aargh, we raced ... */
  3850. mutex_unlock(&inode->i_mutex);
  3851. put_css_set(cg);
  3852. deactivate_super(root->sb);
  3853. /* The cgroup is still accessible in the VFS, but
  3854. * we're not going to try to rmdir() it at this
  3855. * point. */
  3856. printk(KERN_INFO
  3857. "Race in cgroup_clone() - leaking cgroup %s\n",
  3858. nodename);
  3859. goto again;
  3860. }
  3861. /* do any required auto-setup */
  3862. for_each_subsys(root, ss) {
  3863. if (ss->post_clone)
  3864. ss->post_clone(ss, child);
  3865. }
  3866. /* All seems fine. Finish by moving the task into the new cgroup */
  3867. ret = cgroup_attach_task(child, tsk);
  3868. mutex_unlock(&cgroup_mutex);
  3869. out_release:
  3870. mutex_unlock(&inode->i_mutex);
  3871. mutex_lock(&cgroup_mutex);
  3872. put_css_set(cg);
  3873. mutex_unlock(&cgroup_mutex);
  3874. deactivate_super(root->sb);
  3875. return ret;
  3876. }
  3877. /**
  3878. * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
  3879. * @cgrp: the cgroup in question
  3880. * @task: the task in question
  3881. *
  3882. * See if @cgrp is a descendant of @task's cgroup in the appropriate
  3883. * hierarchy.
  3884. *
  3885. * If we are sending in dummytop, then presumably we are creating
  3886. * the top cgroup in the subsystem.
  3887. *
  3888. * Called only by the ns (nsproxy) cgroup.
  3889. */
  3890. int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
  3891. {
  3892. int ret;
  3893. struct cgroup *target;
  3894. if (cgrp == dummytop)
  3895. return 1;
  3896. target = task_cgroup_from_root(task, cgrp->root);
  3897. while (cgrp != target && cgrp!= cgrp->top_cgroup)
  3898. cgrp = cgrp->parent;
  3899. ret = (cgrp == target);
  3900. return ret;
  3901. }
  3902. static void check_for_release(struct cgroup *cgrp)
  3903. {
  3904. /* All of these checks rely on RCU to keep the cgroup
  3905. * structure alive */
  3906. if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
  3907. && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
  3908. /* Control Group is currently removeable. If it's not
  3909. * already queued for a userspace notification, queue
  3910. * it now */
  3911. int need_schedule_work = 0;
  3912. spin_lock(&release_list_lock);
  3913. if (!cgroup_is_removed(cgrp) &&
  3914. list_empty(&cgrp->release_list)) {
  3915. list_add(&cgrp->release_list, &release_list);
  3916. need_schedule_work = 1;
  3917. }
  3918. spin_unlock(&release_list_lock);
  3919. if (need_schedule_work)
  3920. schedule_work(&release_agent_work);
  3921. }
  3922. }
  3923. /* Caller must verify that the css is not for root cgroup */
  3924. void __css_put(struct cgroup_subsys_state *css, int count)
  3925. {
  3926. struct cgroup *cgrp = css->cgroup;
  3927. int val;
  3928. rcu_read_lock();
  3929. val = atomic_sub_return(count, &css->refcnt);
  3930. if (val == 1) {
  3931. if (notify_on_release(cgrp)) {
  3932. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  3933. check_for_release(cgrp);
  3934. }
  3935. cgroup_wakeup_rmdir_waiter(cgrp);
  3936. }
  3937. rcu_read_unlock();
  3938. WARN_ON_ONCE(val < 1);
  3939. }
  3940. EXPORT_SYMBOL_GPL(__css_put);
  3941. /*
  3942. * Notify userspace when a cgroup is released, by running the
  3943. * configured release agent with the name of the cgroup (path
  3944. * relative to the root of cgroup file system) as the argument.
  3945. *
  3946. * Most likely, this user command will try to rmdir this cgroup.
  3947. *
  3948. * This races with the possibility that some other task will be
  3949. * attached to this cgroup before it is removed, or that some other
  3950. * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
  3951. * The presumed 'rmdir' will fail quietly if this cgroup is no longer
  3952. * unused, and this cgroup will be reprieved from its death sentence,
  3953. * to continue to serve a useful existence. Next time it's released,
  3954. * we will get notified again, if it still has 'notify_on_release' set.
  3955. *
  3956. * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
  3957. * means only wait until the task is successfully execve()'d. The
  3958. * separate release agent task is forked by call_usermodehelper(),
  3959. * then control in this thread returns here, without waiting for the
  3960. * release agent task. We don't bother to wait because the caller of
  3961. * this routine has no use for the exit status of the release agent
  3962. * task, so no sense holding our caller up for that.
  3963. */
  3964. static void cgroup_release_agent(struct work_struct *work)
  3965. {
  3966. BUG_ON(work != &release_agent_work);
  3967. mutex_lock(&cgroup_mutex);
  3968. spin_lock(&release_list_lock);
  3969. while (!list_empty(&release_list)) {
  3970. char *argv[3], *envp[3];
  3971. int i;
  3972. char *pathbuf = NULL, *agentbuf = NULL;
  3973. struct cgroup *cgrp = list_entry(release_list.next,
  3974. struct cgroup,
  3975. release_list);
  3976. list_del_init(&cgrp->release_list);
  3977. spin_unlock(&release_list_lock);
  3978. pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3979. if (!pathbuf)
  3980. goto continue_free;
  3981. if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
  3982. goto continue_free;
  3983. agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
  3984. if (!agentbuf)
  3985. goto continue_free;
  3986. i = 0;
  3987. argv[i++] = agentbuf;
  3988. argv[i++] = pathbuf;
  3989. argv[i] = NULL;
  3990. i = 0;
  3991. /* minimal command environment */
  3992. envp[i++] = "HOME=/";
  3993. envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
  3994. envp[i] = NULL;
  3995. /* Drop the lock while we invoke the usermode helper,
  3996. * since the exec could involve hitting disk and hence
  3997. * be a slow process */
  3998. mutex_unlock(&cgroup_mutex);
  3999. call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  4000. mutex_lock(&cgroup_mutex);
  4001. continue_free:
  4002. kfree(pathbuf);
  4003. kfree(agentbuf);
  4004. spin_lock(&release_list_lock);
  4005. }
  4006. spin_unlock(&release_list_lock);
  4007. mutex_unlock(&cgroup_mutex);
  4008. }
  4009. static int __init cgroup_disable(char *str)
  4010. {
  4011. int i;
  4012. char *token;
  4013. while ((token = strsep(&str, ",")) != NULL) {
  4014. if (!*token)
  4015. continue;
  4016. /*
  4017. * cgroup_disable, being at boot time, can't know about module
  4018. * subsystems, so we don't worry about them.
  4019. */
  4020. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  4021. struct cgroup_subsys *ss = subsys[i];
  4022. if (!strcmp(token, ss->name)) {
  4023. ss->disabled = 1;
  4024. printk(KERN_INFO "Disabling %s control group"
  4025. " subsystem\n", ss->name);
  4026. break;
  4027. }
  4028. }
  4029. }
  4030. return 1;
  4031. }
  4032. __setup("cgroup_disable=", cgroup_disable);
  4033. /*
  4034. * Functons for CSS ID.
  4035. */
  4036. /*
  4037. *To get ID other than 0, this should be called when !cgroup_is_removed().
  4038. */
  4039. unsigned short css_id(struct cgroup_subsys_state *css)
  4040. {
  4041. struct css_id *cssid;
  4042. /*
  4043. * This css_id() can return correct value when somone has refcnt
  4044. * on this or this is under rcu_read_lock(). Once css->id is allocated,
  4045. * it's unchanged until freed.
  4046. */
  4047. cssid = rcu_dereference_check(css->id,
  4048. rcu_read_lock_held() || atomic_read(&css->refcnt));
  4049. if (cssid)
  4050. return cssid->id;
  4051. return 0;
  4052. }
  4053. EXPORT_SYMBOL_GPL(css_id);
  4054. unsigned short css_depth(struct cgroup_subsys_state *css)
  4055. {
  4056. struct css_id *cssid;
  4057. cssid = rcu_dereference_check(css->id,
  4058. rcu_read_lock_held() || atomic_read(&css->refcnt));
  4059. if (cssid)
  4060. return cssid->depth;
  4061. return 0;
  4062. }
  4063. EXPORT_SYMBOL_GPL(css_depth);
  4064. /**
  4065. * css_is_ancestor - test "root" css is an ancestor of "child"
  4066. * @child: the css to be tested.
  4067. * @root: the css supporsed to be an ancestor of the child.
  4068. *
  4069. * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
  4070. * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
  4071. * But, considering usual usage, the csses should be valid objects after test.
  4072. * Assuming that the caller will do some action to the child if this returns
  4073. * returns true, the caller must take "child";s reference count.
  4074. * If "child" is valid object and this returns true, "root" is valid, too.
  4075. */
  4076. bool css_is_ancestor(struct cgroup_subsys_state *child,
  4077. const struct cgroup_subsys_state *root)
  4078. {
  4079. struct css_id *child_id;
  4080. struct css_id *root_id;
  4081. bool ret = true;
  4082. rcu_read_lock();
  4083. child_id = rcu_dereference(child->id);
  4084. root_id = rcu_dereference(root->id);
  4085. if (!child_id
  4086. || !root_id
  4087. || (child_id->depth < root_id->depth)
  4088. || (child_id->stack[root_id->depth] != root_id->id))
  4089. ret = false;
  4090. rcu_read_unlock();
  4091. return ret;
  4092. }
  4093. static void __free_css_id_cb(struct rcu_head *head)
  4094. {
  4095. struct css_id *id;
  4096. id = container_of(head, struct css_id, rcu_head);
  4097. kfree(id);
  4098. }
  4099. void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
  4100. {
  4101. struct css_id *id = css->id;
  4102. /* When this is called before css_id initialization, id can be NULL */
  4103. if (!id)
  4104. return;
  4105. BUG_ON(!ss->use_id);
  4106. rcu_assign_pointer(id->css, NULL);
  4107. rcu_assign_pointer(css->id, NULL);
  4108. spin_lock(&ss->id_lock);
  4109. idr_remove(&ss->idr, id->id);
  4110. spin_unlock(&ss->id_lock);
  4111. call_rcu(&id->rcu_head, __free_css_id_cb);
  4112. }
  4113. EXPORT_SYMBOL_GPL(free_css_id);
  4114. /*
  4115. * This is called by init or create(). Then, calls to this function are
  4116. * always serialized (By cgroup_mutex() at create()).
  4117. */
  4118. static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
  4119. {
  4120. struct css_id *newid;
  4121. int myid, error, size;
  4122. BUG_ON(!ss->use_id);
  4123. size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
  4124. newid = kzalloc(size, GFP_KERNEL);
  4125. if (!newid)
  4126. return ERR_PTR(-ENOMEM);
  4127. /* get id */
  4128. if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
  4129. error = -ENOMEM;
  4130. goto err_out;
  4131. }
  4132. spin_lock(&ss->id_lock);
  4133. /* Don't use 0. allocates an ID of 1-65535 */
  4134. error = idr_get_new_above(&ss->idr, newid, 1, &myid);
  4135. spin_unlock(&ss->id_lock);
  4136. /* Returns error when there are no free spaces for new ID.*/
  4137. if (error) {
  4138. error = -ENOSPC;
  4139. goto err_out;
  4140. }
  4141. if (myid > CSS_ID_MAX)
  4142. goto remove_idr;
  4143. newid->id = myid;
  4144. newid->depth = depth;
  4145. return newid;
  4146. remove_idr:
  4147. error = -ENOSPC;
  4148. spin_lock(&ss->id_lock);
  4149. idr_remove(&ss->idr, myid);
  4150. spin_unlock(&ss->id_lock);
  4151. err_out:
  4152. kfree(newid);
  4153. return ERR_PTR(error);
  4154. }
  4155. static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
  4156. struct cgroup_subsys_state *rootcss)
  4157. {
  4158. struct css_id *newid;
  4159. spin_lock_init(&ss->id_lock);
  4160. idr_init(&ss->idr);
  4161. newid = get_new_cssid(ss, 0);
  4162. if (IS_ERR(newid))
  4163. return PTR_ERR(newid);
  4164. newid->stack[0] = newid->id;
  4165. newid->css = rootcss;
  4166. rootcss->id = newid;
  4167. return 0;
  4168. }
  4169. static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
  4170. struct cgroup *child)
  4171. {
  4172. int subsys_id, i, depth = 0;
  4173. struct cgroup_subsys_state *parent_css, *child_css;
  4174. struct css_id *child_id, *parent_id;
  4175. subsys_id = ss->subsys_id;
  4176. parent_css = parent->subsys[subsys_id];
  4177. child_css = child->subsys[subsys_id];
  4178. parent_id = parent_css->id;
  4179. depth = parent_id->depth + 1;
  4180. child_id = get_new_cssid(ss, depth);
  4181. if (IS_ERR(child_id))
  4182. return PTR_ERR(child_id);
  4183. for (i = 0; i < depth; i++)
  4184. child_id->stack[i] = parent_id->stack[i];
  4185. child_id->stack[depth] = child_id->id;
  4186. /*
  4187. * child_id->css pointer will be set after this cgroup is available
  4188. * see cgroup_populate_dir()
  4189. */
  4190. rcu_assign_pointer(child_css->id, child_id);
  4191. return 0;
  4192. }
  4193. /**
  4194. * css_lookup - lookup css by id
  4195. * @ss: cgroup subsys to be looked into.
  4196. * @id: the id
  4197. *
  4198. * Returns pointer to cgroup_subsys_state if there is valid one with id.
  4199. * NULL if not. Should be called under rcu_read_lock()
  4200. */
  4201. struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
  4202. {
  4203. struct css_id *cssid = NULL;
  4204. BUG_ON(!ss->use_id);
  4205. cssid = idr_find(&ss->idr, id);
  4206. if (unlikely(!cssid))
  4207. return NULL;
  4208. return rcu_dereference(cssid->css);
  4209. }
  4210. EXPORT_SYMBOL_GPL(css_lookup);
  4211. /**
  4212. * css_get_next - lookup next cgroup under specified hierarchy.
  4213. * @ss: pointer to subsystem
  4214. * @id: current position of iteration.
  4215. * @root: pointer to css. search tree under this.
  4216. * @foundid: position of found object.
  4217. *
  4218. * Search next css under the specified hierarchy of rootid. Calling under
  4219. * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
  4220. */
  4221. struct cgroup_subsys_state *
  4222. css_get_next(struct cgroup_subsys *ss, int id,
  4223. struct cgroup_subsys_state *root, int *foundid)
  4224. {
  4225. struct cgroup_subsys_state *ret = NULL;
  4226. struct css_id *tmp;
  4227. int tmpid;
  4228. int rootid = css_id(root);
  4229. int depth = css_depth(root);
  4230. if (!rootid)
  4231. return NULL;
  4232. BUG_ON(!ss->use_id);
  4233. /* fill start point for scan */
  4234. tmpid = id;
  4235. while (1) {
  4236. /*
  4237. * scan next entry from bitmap(tree), tmpid is updated after
  4238. * idr_get_next().
  4239. */
  4240. spin_lock(&ss->id_lock);
  4241. tmp = idr_get_next(&ss->idr, &tmpid);
  4242. spin_unlock(&ss->id_lock);
  4243. if (!tmp)
  4244. break;
  4245. if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
  4246. ret = rcu_dereference(tmp->css);
  4247. if (ret) {
  4248. *foundid = tmpid;
  4249. break;
  4250. }
  4251. }
  4252. /* continue to scan from next id */
  4253. tmpid = tmpid + 1;
  4254. }
  4255. return ret;
  4256. }
  4257. #ifdef CONFIG_CGROUP_DEBUG
  4258. static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
  4259. struct cgroup *cont)
  4260. {
  4261. struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
  4262. if (!css)
  4263. return ERR_PTR(-ENOMEM);
  4264. return css;
  4265. }
  4266. static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
  4267. {
  4268. kfree(cont->subsys[debug_subsys_id]);
  4269. }
  4270. static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
  4271. {
  4272. return atomic_read(&cont->count);
  4273. }
  4274. static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
  4275. {
  4276. return cgroup_task_count(cont);
  4277. }
  4278. static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
  4279. {
  4280. return (u64)(unsigned long)current->cgroups;
  4281. }
  4282. static u64 current_css_set_refcount_read(struct cgroup *cont,
  4283. struct cftype *cft)
  4284. {
  4285. u64 count;
  4286. rcu_read_lock();
  4287. count = atomic_read(&current->cgroups->refcount);
  4288. rcu_read_unlock();
  4289. return count;
  4290. }
  4291. static int current_css_set_cg_links_read(struct cgroup *cont,
  4292. struct cftype *cft,
  4293. struct seq_file *seq)
  4294. {
  4295. struct cg_cgroup_link *link;
  4296. struct css_set *cg;
  4297. read_lock(&css_set_lock);
  4298. rcu_read_lock();
  4299. cg = rcu_dereference(current->cgroups);
  4300. list_for_each_entry(link, &cg->cg_links, cg_link_list) {
  4301. struct cgroup *c = link->cgrp;
  4302. const char *name;
  4303. if (c->dentry)
  4304. name = c->dentry->d_name.name;
  4305. else
  4306. name = "?";
  4307. seq_printf(seq, "Root %d group %s\n",
  4308. c->root->hierarchy_id, name);
  4309. }
  4310. rcu_read_unlock();
  4311. read_unlock(&css_set_lock);
  4312. return 0;
  4313. }
  4314. #define MAX_TASKS_SHOWN_PER_CSS 25
  4315. static int cgroup_css_links_read(struct cgroup *cont,
  4316. struct cftype *cft,
  4317. struct seq_file *seq)
  4318. {
  4319. struct cg_cgroup_link *link;
  4320. read_lock(&css_set_lock);
  4321. list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
  4322. struct css_set *cg = link->cg;
  4323. struct task_struct *task;
  4324. int count = 0;
  4325. seq_printf(seq, "css_set %p\n", cg);
  4326. list_for_each_entry(task, &cg->tasks, cg_list) {
  4327. if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
  4328. seq_puts(seq, " ...\n");
  4329. break;
  4330. } else {
  4331. seq_printf(seq, " task %d\n",
  4332. task_pid_vnr(task));
  4333. }
  4334. }
  4335. }
  4336. read_unlock(&css_set_lock);
  4337. return 0;
  4338. }
  4339. static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
  4340. {
  4341. return test_bit(CGRP_RELEASABLE, &cgrp->flags);
  4342. }
  4343. static struct cftype debug_files[] = {
  4344. {
  4345. .name = "cgroup_refcount",
  4346. .read_u64 = cgroup_refcount_read,
  4347. },
  4348. {
  4349. .name = "taskcount",
  4350. .read_u64 = debug_taskcount_read,
  4351. },
  4352. {
  4353. .name = "current_css_set",
  4354. .read_u64 = current_css_set_read,
  4355. },
  4356. {
  4357. .name = "current_css_set_refcount",
  4358. .read_u64 = current_css_set_refcount_read,
  4359. },
  4360. {
  4361. .name = "current_css_set_cg_links",
  4362. .read_seq_string = current_css_set_cg_links_read,
  4363. },
  4364. {
  4365. .name = "cgroup_css_links",
  4366. .read_seq_string = cgroup_css_links_read,
  4367. },
  4368. {
  4369. .name = "releasable",
  4370. .read_u64 = releasable_read,
  4371. },
  4372. };
  4373. static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  4374. {
  4375. return cgroup_add_files(cont, ss, debug_files,
  4376. ARRAY_SIZE(debug_files));
  4377. }
  4378. struct cgroup_subsys debug_subsys = {
  4379. .name = "debug",
  4380. .create = debug_create,
  4381. .destroy = debug_destroy,
  4382. .populate = debug_populate,
  4383. .subsys_id = debug_subsys_id,
  4384. };
  4385. #endif /* CONFIG_CGROUP_DEBUG */