cgroup.c 129 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966
  1. /*
  2. * Generic process-grouping system.
  3. *
  4. * Based originally on the cpuset system, extracted by Paul Menage
  5. * Copyright (C) 2006 Google, Inc
  6. *
  7. * Notifications support
  8. * Copyright (C) 2009 Nokia Corporation
  9. * Author: Kirill A. Shutemov
  10. *
  11. * Copyright notices from the original cpuset code:
  12. * --------------------------------------------------
  13. * Copyright (C) 2003 BULL SA.
  14. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  15. *
  16. * Portions derived from Patrick Mochel's sysfs code.
  17. * sysfs is Copyright (c) 2001-3 Patrick Mochel
  18. *
  19. * 2003-10-10 Written by Simon Derr.
  20. * 2003-10-22 Updates by Stephen Hemminger.
  21. * 2004 May-July Rework by Paul Jackson.
  22. * ---------------------------------------------------
  23. *
  24. * This file is subject to the terms and conditions of the GNU General Public
  25. * License. See the file COPYING in the main directory of the Linux
  26. * distribution for more details.
  27. */
  28. #include <linux/cgroup.h>
  29. #include <linux/ctype.h>
  30. #include <linux/errno.h>
  31. #include <linux/fs.h>
  32. #include <linux/kernel.h>
  33. #include <linux/list.h>
  34. #include <linux/mm.h>
  35. #include <linux/mutex.h>
  36. #include <linux/mount.h>
  37. #include <linux/pagemap.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/rcupdate.h>
  40. #include <linux/sched.h>
  41. #include <linux/backing-dev.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/slab.h>
  44. #include <linux/magic.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/string.h>
  47. #include <linux/sort.h>
  48. #include <linux/kmod.h>
  49. #include <linux/module.h>
  50. #include <linux/delayacct.h>
  51. #include <linux/cgroupstats.h>
  52. #include <linux/hash.h>
  53. #include <linux/namei.h>
  54. #include <linux/pid_namespace.h>
  55. #include <linux/idr.h>
  56. #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
  57. #include <linux/eventfd.h>
  58. #include <linux/poll.h>
  59. #include <asm/atomic.h>
  60. static DEFINE_MUTEX(cgroup_mutex);
  61. /*
  62. * Generate an array of cgroup subsystem pointers. At boot time, this is
  63. * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
  64. * registered after that. The mutable section of this array is protected by
  65. * cgroup_mutex.
  66. */
  67. #define SUBSYS(_x) &_x ## _subsys,
  68. static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
  69. #include <linux/cgroup_subsys.h>
  70. };
  71. #define MAX_CGROUP_ROOT_NAMELEN 64
  72. /*
  73. * A cgroupfs_root represents the root of a cgroup hierarchy,
  74. * and may be associated with a superblock to form an active
  75. * hierarchy
  76. */
  77. struct cgroupfs_root {
  78. struct super_block *sb;
  79. /*
  80. * The bitmask of subsystems intended to be attached to this
  81. * hierarchy
  82. */
  83. unsigned long subsys_bits;
  84. /* Unique id for this hierarchy. */
  85. int hierarchy_id;
  86. /* The bitmask of subsystems currently attached to this hierarchy */
  87. unsigned long actual_subsys_bits;
  88. /* A list running through the attached subsystems */
  89. struct list_head subsys_list;
  90. /* The root cgroup for this hierarchy */
  91. struct cgroup top_cgroup;
  92. /* Tracks how many cgroups are currently defined in hierarchy.*/
  93. int number_of_cgroups;
  94. /* A list running through the active hierarchies */
  95. struct list_head root_list;
  96. /* Hierarchy-specific flags */
  97. unsigned long flags;
  98. /* The path to use for release notifications. */
  99. char release_agent_path[PATH_MAX];
  100. /* The name for this hierarchy - may be empty */
  101. char name[MAX_CGROUP_ROOT_NAMELEN];
  102. };
  103. /*
  104. * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
  105. * subsystems that are otherwise unattached - it never has more than a
  106. * single cgroup, and all tasks are part of that cgroup.
  107. */
  108. static struct cgroupfs_root rootnode;
  109. /*
  110. * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
  111. * cgroup_subsys->use_id != 0.
  112. */
  113. #define CSS_ID_MAX (65535)
  114. struct css_id {
  115. /*
  116. * The css to which this ID points. This pointer is set to valid value
  117. * after cgroup is populated. If cgroup is removed, this will be NULL.
  118. * This pointer is expected to be RCU-safe because destroy()
  119. * is called after synchronize_rcu(). But for safe use, css_is_removed()
  120. * css_tryget() should be used for avoiding race.
  121. */
  122. struct cgroup_subsys_state __rcu *css;
  123. /*
  124. * ID of this css.
  125. */
  126. unsigned short id;
  127. /*
  128. * Depth in hierarchy which this ID belongs to.
  129. */
  130. unsigned short depth;
  131. /*
  132. * ID is freed by RCU. (and lookup routine is RCU safe.)
  133. */
  134. struct rcu_head rcu_head;
  135. /*
  136. * Hierarchy of CSS ID belongs to.
  137. */
  138. unsigned short stack[0]; /* Array of Length (depth+1) */
  139. };
  140. /*
  141. * cgroup_event represents events which userspace want to recieve.
  142. */
  143. struct cgroup_event {
  144. /*
  145. * Cgroup which the event belongs to.
  146. */
  147. struct cgroup *cgrp;
  148. /*
  149. * Control file which the event associated.
  150. */
  151. struct cftype *cft;
  152. /*
  153. * eventfd to signal userspace about the event.
  154. */
  155. struct eventfd_ctx *eventfd;
  156. /*
  157. * Each of these stored in a list by the cgroup.
  158. */
  159. struct list_head list;
  160. /*
  161. * All fields below needed to unregister event when
  162. * userspace closes eventfd.
  163. */
  164. poll_table pt;
  165. wait_queue_head_t *wqh;
  166. wait_queue_t wait;
  167. struct work_struct remove;
  168. };
  169. /* The list of hierarchy roots */
  170. static LIST_HEAD(roots);
  171. static int root_count;
  172. static DEFINE_IDA(hierarchy_ida);
  173. static int next_hierarchy_id;
  174. static DEFINE_SPINLOCK(hierarchy_id_lock);
  175. /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
  176. #define dummytop (&rootnode.top_cgroup)
  177. /* This flag indicates whether tasks in the fork and exit paths should
  178. * check for fork/exit handlers to call. This avoids us having to do
  179. * extra work in the fork/exit path if none of the subsystems need to
  180. * be called.
  181. */
  182. static int need_forkexit_callback __read_mostly;
  183. #ifdef CONFIG_PROVE_LOCKING
  184. int cgroup_lock_is_held(void)
  185. {
  186. return lockdep_is_held(&cgroup_mutex);
  187. }
  188. #else /* #ifdef CONFIG_PROVE_LOCKING */
  189. int cgroup_lock_is_held(void)
  190. {
  191. return mutex_is_locked(&cgroup_mutex);
  192. }
  193. #endif /* #else #ifdef CONFIG_PROVE_LOCKING */
  194. EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
  195. /* convenient tests for these bits */
  196. inline int cgroup_is_removed(const struct cgroup *cgrp)
  197. {
  198. return test_bit(CGRP_REMOVED, &cgrp->flags);
  199. }
  200. /* bits in struct cgroupfs_root flags field */
  201. enum {
  202. ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
  203. };
  204. static int cgroup_is_releasable(const struct cgroup *cgrp)
  205. {
  206. const int bits =
  207. (1 << CGRP_RELEASABLE) |
  208. (1 << CGRP_NOTIFY_ON_RELEASE);
  209. return (cgrp->flags & bits) == bits;
  210. }
  211. static int notify_on_release(const struct cgroup *cgrp)
  212. {
  213. return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  214. }
  215. static int clone_children(const struct cgroup *cgrp)
  216. {
  217. return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  218. }
  219. /*
  220. * for_each_subsys() allows you to iterate on each subsystem attached to
  221. * an active hierarchy
  222. */
  223. #define for_each_subsys(_root, _ss) \
  224. list_for_each_entry(_ss, &_root->subsys_list, sibling)
  225. /* for_each_active_root() allows you to iterate across the active hierarchies */
  226. #define for_each_active_root(_root) \
  227. list_for_each_entry(_root, &roots, root_list)
  228. /* the list of cgroups eligible for automatic release. Protected by
  229. * release_list_lock */
  230. static LIST_HEAD(release_list);
  231. static DEFINE_SPINLOCK(release_list_lock);
  232. static void cgroup_release_agent(struct work_struct *work);
  233. static DECLARE_WORK(release_agent_work, cgroup_release_agent);
  234. static void check_for_release(struct cgroup *cgrp);
  235. /* Link structure for associating css_set objects with cgroups */
  236. struct cg_cgroup_link {
  237. /*
  238. * List running through cg_cgroup_links associated with a
  239. * cgroup, anchored on cgroup->css_sets
  240. */
  241. struct list_head cgrp_link_list;
  242. struct cgroup *cgrp;
  243. /*
  244. * List running through cg_cgroup_links pointing at a
  245. * single css_set object, anchored on css_set->cg_links
  246. */
  247. struct list_head cg_link_list;
  248. struct css_set *cg;
  249. };
  250. /* The default css_set - used by init and its children prior to any
  251. * hierarchies being mounted. It contains a pointer to the root state
  252. * for each subsystem. Also used to anchor the list of css_sets. Not
  253. * reference-counted, to improve performance when child cgroups
  254. * haven't been created.
  255. */
  256. static struct css_set init_css_set;
  257. static struct cg_cgroup_link init_css_set_link;
  258. static int cgroup_init_idr(struct cgroup_subsys *ss,
  259. struct cgroup_subsys_state *css);
  260. /* css_set_lock protects the list of css_set objects, and the
  261. * chain of tasks off each css_set. Nests outside task->alloc_lock
  262. * due to cgroup_iter_start() */
  263. static DEFINE_RWLOCK(css_set_lock);
  264. static int css_set_count;
  265. /*
  266. * hash table for cgroup groups. This improves the performance to find
  267. * an existing css_set. This hash doesn't (currently) take into
  268. * account cgroups in empty hierarchies.
  269. */
  270. #define CSS_SET_HASH_BITS 7
  271. #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
  272. static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
  273. static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
  274. {
  275. int i;
  276. int index;
  277. unsigned long tmp = 0UL;
  278. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
  279. tmp += (unsigned long)css[i];
  280. tmp = (tmp >> 16) ^ tmp;
  281. index = hash_long(tmp, CSS_SET_HASH_BITS);
  282. return &css_set_table[index];
  283. }
  284. static void free_css_set_rcu(struct rcu_head *obj)
  285. {
  286. struct css_set *cg = container_of(obj, struct css_set, rcu_head);
  287. kfree(cg);
  288. }
  289. /* We don't maintain the lists running through each css_set to its
  290. * task until after the first call to cgroup_iter_start(). This
  291. * reduces the fork()/exit() overhead for people who have cgroups
  292. * compiled into their kernel but not actually in use */
  293. static int use_task_css_set_links __read_mostly;
  294. static void __put_css_set(struct css_set *cg, int taskexit)
  295. {
  296. struct cg_cgroup_link *link;
  297. struct cg_cgroup_link *saved_link;
  298. /*
  299. * Ensure that the refcount doesn't hit zero while any readers
  300. * can see it. Similar to atomic_dec_and_lock(), but for an
  301. * rwlock
  302. */
  303. if (atomic_add_unless(&cg->refcount, -1, 1))
  304. return;
  305. write_lock(&css_set_lock);
  306. if (!atomic_dec_and_test(&cg->refcount)) {
  307. write_unlock(&css_set_lock);
  308. return;
  309. }
  310. /* This css_set is dead. unlink it and release cgroup refcounts */
  311. hlist_del(&cg->hlist);
  312. css_set_count--;
  313. list_for_each_entry_safe(link, saved_link, &cg->cg_links,
  314. cg_link_list) {
  315. struct cgroup *cgrp = link->cgrp;
  316. list_del(&link->cg_link_list);
  317. list_del(&link->cgrp_link_list);
  318. if (atomic_dec_and_test(&cgrp->count) &&
  319. notify_on_release(cgrp)) {
  320. if (taskexit)
  321. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  322. check_for_release(cgrp);
  323. }
  324. kfree(link);
  325. }
  326. write_unlock(&css_set_lock);
  327. call_rcu(&cg->rcu_head, free_css_set_rcu);
  328. }
  329. /*
  330. * refcounted get/put for css_set objects
  331. */
  332. static inline void get_css_set(struct css_set *cg)
  333. {
  334. atomic_inc(&cg->refcount);
  335. }
  336. static inline void put_css_set(struct css_set *cg)
  337. {
  338. __put_css_set(cg, 0);
  339. }
  340. static inline void put_css_set_taskexit(struct css_set *cg)
  341. {
  342. __put_css_set(cg, 1);
  343. }
  344. /*
  345. * compare_css_sets - helper function for find_existing_css_set().
  346. * @cg: candidate css_set being tested
  347. * @old_cg: existing css_set for a task
  348. * @new_cgrp: cgroup that's being entered by the task
  349. * @template: desired set of css pointers in css_set (pre-calculated)
  350. *
  351. * Returns true if "cg" matches "old_cg" except for the hierarchy
  352. * which "new_cgrp" belongs to, for which it should match "new_cgrp".
  353. */
  354. static bool compare_css_sets(struct css_set *cg,
  355. struct css_set *old_cg,
  356. struct cgroup *new_cgrp,
  357. struct cgroup_subsys_state *template[])
  358. {
  359. struct list_head *l1, *l2;
  360. if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
  361. /* Not all subsystems matched */
  362. return false;
  363. }
  364. /*
  365. * Compare cgroup pointers in order to distinguish between
  366. * different cgroups in heirarchies with no subsystems. We
  367. * could get by with just this check alone (and skip the
  368. * memcmp above) but on most setups the memcmp check will
  369. * avoid the need for this more expensive check on almost all
  370. * candidates.
  371. */
  372. l1 = &cg->cg_links;
  373. l2 = &old_cg->cg_links;
  374. while (1) {
  375. struct cg_cgroup_link *cgl1, *cgl2;
  376. struct cgroup *cg1, *cg2;
  377. l1 = l1->next;
  378. l2 = l2->next;
  379. /* See if we reached the end - both lists are equal length. */
  380. if (l1 == &cg->cg_links) {
  381. BUG_ON(l2 != &old_cg->cg_links);
  382. break;
  383. } else {
  384. BUG_ON(l2 == &old_cg->cg_links);
  385. }
  386. /* Locate the cgroups associated with these links. */
  387. cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
  388. cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
  389. cg1 = cgl1->cgrp;
  390. cg2 = cgl2->cgrp;
  391. /* Hierarchies should be linked in the same order. */
  392. BUG_ON(cg1->root != cg2->root);
  393. /*
  394. * If this hierarchy is the hierarchy of the cgroup
  395. * that's changing, then we need to check that this
  396. * css_set points to the new cgroup; if it's any other
  397. * hierarchy, then this css_set should point to the
  398. * same cgroup as the old css_set.
  399. */
  400. if (cg1->root == new_cgrp->root) {
  401. if (cg1 != new_cgrp)
  402. return false;
  403. } else {
  404. if (cg1 != cg2)
  405. return false;
  406. }
  407. }
  408. return true;
  409. }
  410. /*
  411. * find_existing_css_set() is a helper for
  412. * find_css_set(), and checks to see whether an existing
  413. * css_set is suitable.
  414. *
  415. * oldcg: the cgroup group that we're using before the cgroup
  416. * transition
  417. *
  418. * cgrp: the cgroup that we're moving into
  419. *
  420. * template: location in which to build the desired set of subsystem
  421. * state objects for the new cgroup group
  422. */
  423. static struct css_set *find_existing_css_set(
  424. struct css_set *oldcg,
  425. struct cgroup *cgrp,
  426. struct cgroup_subsys_state *template[])
  427. {
  428. int i;
  429. struct cgroupfs_root *root = cgrp->root;
  430. struct hlist_head *hhead;
  431. struct hlist_node *node;
  432. struct css_set *cg;
  433. /*
  434. * Build the set of subsystem state objects that we want to see in the
  435. * new css_set. while subsystems can change globally, the entries here
  436. * won't change, so no need for locking.
  437. */
  438. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  439. if (root->subsys_bits & (1UL << i)) {
  440. /* Subsystem is in this hierarchy. So we want
  441. * the subsystem state from the new
  442. * cgroup */
  443. template[i] = cgrp->subsys[i];
  444. } else {
  445. /* Subsystem is not in this hierarchy, so we
  446. * don't want to change the subsystem state */
  447. template[i] = oldcg->subsys[i];
  448. }
  449. }
  450. hhead = css_set_hash(template);
  451. hlist_for_each_entry(cg, node, hhead, hlist) {
  452. if (!compare_css_sets(cg, oldcg, cgrp, template))
  453. continue;
  454. /* This css_set matches what we need */
  455. return cg;
  456. }
  457. /* No existing cgroup group matched */
  458. return NULL;
  459. }
  460. static void free_cg_links(struct list_head *tmp)
  461. {
  462. struct cg_cgroup_link *link;
  463. struct cg_cgroup_link *saved_link;
  464. list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
  465. list_del(&link->cgrp_link_list);
  466. kfree(link);
  467. }
  468. }
  469. /*
  470. * allocate_cg_links() allocates "count" cg_cgroup_link structures
  471. * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
  472. * success or a negative error
  473. */
  474. static int allocate_cg_links(int count, struct list_head *tmp)
  475. {
  476. struct cg_cgroup_link *link;
  477. int i;
  478. INIT_LIST_HEAD(tmp);
  479. for (i = 0; i < count; i++) {
  480. link = kmalloc(sizeof(*link), GFP_KERNEL);
  481. if (!link) {
  482. free_cg_links(tmp);
  483. return -ENOMEM;
  484. }
  485. list_add(&link->cgrp_link_list, tmp);
  486. }
  487. return 0;
  488. }
  489. /**
  490. * link_css_set - a helper function to link a css_set to a cgroup
  491. * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
  492. * @cg: the css_set to be linked
  493. * @cgrp: the destination cgroup
  494. */
  495. static void link_css_set(struct list_head *tmp_cg_links,
  496. struct css_set *cg, struct cgroup *cgrp)
  497. {
  498. struct cg_cgroup_link *link;
  499. BUG_ON(list_empty(tmp_cg_links));
  500. link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
  501. cgrp_link_list);
  502. link->cg = cg;
  503. link->cgrp = cgrp;
  504. atomic_inc(&cgrp->count);
  505. list_move(&link->cgrp_link_list, &cgrp->css_sets);
  506. /*
  507. * Always add links to the tail of the list so that the list
  508. * is sorted by order of hierarchy creation
  509. */
  510. list_add_tail(&link->cg_link_list, &cg->cg_links);
  511. }
  512. /*
  513. * find_css_set() takes an existing cgroup group and a
  514. * cgroup object, and returns a css_set object that's
  515. * equivalent to the old group, but with the given cgroup
  516. * substituted into the appropriate hierarchy. Must be called with
  517. * cgroup_mutex held
  518. */
  519. static struct css_set *find_css_set(
  520. struct css_set *oldcg, struct cgroup *cgrp)
  521. {
  522. struct css_set *res;
  523. struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
  524. struct list_head tmp_cg_links;
  525. struct hlist_head *hhead;
  526. struct cg_cgroup_link *link;
  527. /* First see if we already have a cgroup group that matches
  528. * the desired set */
  529. read_lock(&css_set_lock);
  530. res = find_existing_css_set(oldcg, cgrp, template);
  531. if (res)
  532. get_css_set(res);
  533. read_unlock(&css_set_lock);
  534. if (res)
  535. return res;
  536. res = kmalloc(sizeof(*res), GFP_KERNEL);
  537. if (!res)
  538. return NULL;
  539. /* Allocate all the cg_cgroup_link objects that we'll need */
  540. if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
  541. kfree(res);
  542. return NULL;
  543. }
  544. atomic_set(&res->refcount, 1);
  545. INIT_LIST_HEAD(&res->cg_links);
  546. INIT_LIST_HEAD(&res->tasks);
  547. INIT_HLIST_NODE(&res->hlist);
  548. /* Copy the set of subsystem state objects generated in
  549. * find_existing_css_set() */
  550. memcpy(res->subsys, template, sizeof(res->subsys));
  551. write_lock(&css_set_lock);
  552. /* Add reference counts and links from the new css_set. */
  553. list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
  554. struct cgroup *c = link->cgrp;
  555. if (c->root == cgrp->root)
  556. c = cgrp;
  557. link_css_set(&tmp_cg_links, res, c);
  558. }
  559. BUG_ON(!list_empty(&tmp_cg_links));
  560. css_set_count++;
  561. /* Add this cgroup group to the hash table */
  562. hhead = css_set_hash(res->subsys);
  563. hlist_add_head(&res->hlist, hhead);
  564. write_unlock(&css_set_lock);
  565. return res;
  566. }
  567. /*
  568. * Return the cgroup for "task" from the given hierarchy. Must be
  569. * called with cgroup_mutex held.
  570. */
  571. static struct cgroup *task_cgroup_from_root(struct task_struct *task,
  572. struct cgroupfs_root *root)
  573. {
  574. struct css_set *css;
  575. struct cgroup *res = NULL;
  576. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  577. read_lock(&css_set_lock);
  578. /*
  579. * No need to lock the task - since we hold cgroup_mutex the
  580. * task can't change groups, so the only thing that can happen
  581. * is that it exits and its css is set back to init_css_set.
  582. */
  583. css = task->cgroups;
  584. if (css == &init_css_set) {
  585. res = &root->top_cgroup;
  586. } else {
  587. struct cg_cgroup_link *link;
  588. list_for_each_entry(link, &css->cg_links, cg_link_list) {
  589. struct cgroup *c = link->cgrp;
  590. if (c->root == root) {
  591. res = c;
  592. break;
  593. }
  594. }
  595. }
  596. read_unlock(&css_set_lock);
  597. BUG_ON(!res);
  598. return res;
  599. }
  600. /*
  601. * There is one global cgroup mutex. We also require taking
  602. * task_lock() when dereferencing a task's cgroup subsys pointers.
  603. * See "The task_lock() exception", at the end of this comment.
  604. *
  605. * A task must hold cgroup_mutex to modify cgroups.
  606. *
  607. * Any task can increment and decrement the count field without lock.
  608. * So in general, code holding cgroup_mutex can't rely on the count
  609. * field not changing. However, if the count goes to zero, then only
  610. * cgroup_attach_task() can increment it again. Because a count of zero
  611. * means that no tasks are currently attached, therefore there is no
  612. * way a task attached to that cgroup can fork (the other way to
  613. * increment the count). So code holding cgroup_mutex can safely
  614. * assume that if the count is zero, it will stay zero. Similarly, if
  615. * a task holds cgroup_mutex on a cgroup with zero count, it
  616. * knows that the cgroup won't be removed, as cgroup_rmdir()
  617. * needs that mutex.
  618. *
  619. * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
  620. * (usually) take cgroup_mutex. These are the two most performance
  621. * critical pieces of code here. The exception occurs on cgroup_exit(),
  622. * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
  623. * is taken, and if the cgroup count is zero, a usermode call made
  624. * to the release agent with the name of the cgroup (path relative to
  625. * the root of cgroup file system) as the argument.
  626. *
  627. * A cgroup can only be deleted if both its 'count' of using tasks
  628. * is zero, and its list of 'children' cgroups is empty. Since all
  629. * tasks in the system use _some_ cgroup, and since there is always at
  630. * least one task in the system (init, pid == 1), therefore, top_cgroup
  631. * always has either children cgroups and/or using tasks. So we don't
  632. * need a special hack to ensure that top_cgroup cannot be deleted.
  633. *
  634. * The task_lock() exception
  635. *
  636. * The need for this exception arises from the action of
  637. * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
  638. * another. It does so using cgroup_mutex, however there are
  639. * several performance critical places that need to reference
  640. * task->cgroup without the expense of grabbing a system global
  641. * mutex. Therefore except as noted below, when dereferencing or, as
  642. * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
  643. * task_lock(), which acts on a spinlock (task->alloc_lock) already in
  644. * the task_struct routinely used for such matters.
  645. *
  646. * P.S. One more locking exception. RCU is used to guard the
  647. * update of a tasks cgroup pointer by cgroup_attach_task()
  648. */
  649. /**
  650. * cgroup_lock - lock out any changes to cgroup structures
  651. *
  652. */
  653. void cgroup_lock(void)
  654. {
  655. mutex_lock(&cgroup_mutex);
  656. }
  657. EXPORT_SYMBOL_GPL(cgroup_lock);
  658. /**
  659. * cgroup_unlock - release lock on cgroup changes
  660. *
  661. * Undo the lock taken in a previous cgroup_lock() call.
  662. */
  663. void cgroup_unlock(void)
  664. {
  665. mutex_unlock(&cgroup_mutex);
  666. }
  667. EXPORT_SYMBOL_GPL(cgroup_unlock);
  668. /*
  669. * A couple of forward declarations required, due to cyclic reference loop:
  670. * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
  671. * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
  672. * -> cgroup_mkdir.
  673. */
  674. static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
  675. static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
  676. static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
  677. static int cgroup_populate_dir(struct cgroup *cgrp);
  678. static const struct inode_operations cgroup_dir_inode_operations;
  679. static const struct file_operations proc_cgroupstats_operations;
  680. static struct backing_dev_info cgroup_backing_dev_info = {
  681. .name = "cgroup",
  682. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  683. };
  684. static int alloc_css_id(struct cgroup_subsys *ss,
  685. struct cgroup *parent, struct cgroup *child);
  686. static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
  687. {
  688. struct inode *inode = new_inode(sb);
  689. if (inode) {
  690. inode->i_ino = get_next_ino();
  691. inode->i_mode = mode;
  692. inode->i_uid = current_fsuid();
  693. inode->i_gid = current_fsgid();
  694. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  695. inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
  696. }
  697. return inode;
  698. }
  699. /*
  700. * Call subsys's pre_destroy handler.
  701. * This is called before css refcnt check.
  702. */
  703. static int cgroup_call_pre_destroy(struct cgroup *cgrp)
  704. {
  705. struct cgroup_subsys *ss;
  706. int ret = 0;
  707. for_each_subsys(cgrp->root, ss)
  708. if (ss->pre_destroy) {
  709. ret = ss->pre_destroy(ss, cgrp);
  710. if (ret)
  711. break;
  712. }
  713. return ret;
  714. }
  715. static void free_cgroup_rcu(struct rcu_head *obj)
  716. {
  717. struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
  718. kfree(cgrp);
  719. }
  720. static void cgroup_diput(struct dentry *dentry, struct inode *inode)
  721. {
  722. /* is dentry a directory ? if so, kfree() associated cgroup */
  723. if (S_ISDIR(inode->i_mode)) {
  724. struct cgroup *cgrp = dentry->d_fsdata;
  725. struct cgroup_subsys *ss;
  726. BUG_ON(!(cgroup_is_removed(cgrp)));
  727. /* It's possible for external users to be holding css
  728. * reference counts on a cgroup; css_put() needs to
  729. * be able to access the cgroup after decrementing
  730. * the reference count in order to know if it needs to
  731. * queue the cgroup to be handled by the release
  732. * agent */
  733. synchronize_rcu();
  734. mutex_lock(&cgroup_mutex);
  735. /*
  736. * Release the subsystem state objects.
  737. */
  738. for_each_subsys(cgrp->root, ss)
  739. ss->destroy(ss, cgrp);
  740. cgrp->root->number_of_cgroups--;
  741. mutex_unlock(&cgroup_mutex);
  742. /*
  743. * Drop the active superblock reference that we took when we
  744. * created the cgroup
  745. */
  746. deactivate_super(cgrp->root->sb);
  747. /*
  748. * if we're getting rid of the cgroup, refcount should ensure
  749. * that there are no pidlists left.
  750. */
  751. BUG_ON(!list_empty(&cgrp->pidlists));
  752. call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
  753. }
  754. iput(inode);
  755. }
  756. static int cgroup_delete(const struct dentry *d)
  757. {
  758. return 1;
  759. }
  760. static void remove_dir(struct dentry *d)
  761. {
  762. struct dentry *parent = dget(d->d_parent);
  763. d_delete(d);
  764. simple_rmdir(parent->d_inode, d);
  765. dput(parent);
  766. }
  767. static void cgroup_clear_directory(struct dentry *dentry)
  768. {
  769. struct list_head *node;
  770. BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
  771. spin_lock(&dentry->d_lock);
  772. node = dentry->d_subdirs.next;
  773. while (node != &dentry->d_subdirs) {
  774. struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
  775. spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
  776. list_del_init(node);
  777. if (d->d_inode) {
  778. /* This should never be called on a cgroup
  779. * directory with child cgroups */
  780. BUG_ON(d->d_inode->i_mode & S_IFDIR);
  781. dget_dlock(d);
  782. spin_unlock(&d->d_lock);
  783. spin_unlock(&dentry->d_lock);
  784. d_delete(d);
  785. simple_unlink(dentry->d_inode, d);
  786. dput(d);
  787. spin_lock(&dentry->d_lock);
  788. } else
  789. spin_unlock(&d->d_lock);
  790. node = dentry->d_subdirs.next;
  791. }
  792. spin_unlock(&dentry->d_lock);
  793. }
  794. /*
  795. * NOTE : the dentry must have been dget()'ed
  796. */
  797. static void cgroup_d_remove_dir(struct dentry *dentry)
  798. {
  799. struct dentry *parent;
  800. cgroup_clear_directory(dentry);
  801. parent = dentry->d_parent;
  802. spin_lock(&parent->d_lock);
  803. spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
  804. list_del_init(&dentry->d_u.d_child);
  805. spin_unlock(&dentry->d_lock);
  806. spin_unlock(&parent->d_lock);
  807. remove_dir(dentry);
  808. }
  809. /*
  810. * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
  811. * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
  812. * reference to css->refcnt. In general, this refcnt is expected to goes down
  813. * to zero, soon.
  814. *
  815. * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
  816. */
  817. DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
  818. static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
  819. {
  820. if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
  821. wake_up_all(&cgroup_rmdir_waitq);
  822. }
  823. void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
  824. {
  825. css_get(css);
  826. }
  827. void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
  828. {
  829. cgroup_wakeup_rmdir_waiter(css->cgroup);
  830. css_put(css);
  831. }
  832. /*
  833. * Call with cgroup_mutex held. Drops reference counts on modules, including
  834. * any duplicate ones that parse_cgroupfs_options took. If this function
  835. * returns an error, no reference counts are touched.
  836. */
  837. static int rebind_subsystems(struct cgroupfs_root *root,
  838. unsigned long final_bits)
  839. {
  840. unsigned long added_bits, removed_bits;
  841. struct cgroup *cgrp = &root->top_cgroup;
  842. int i;
  843. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  844. removed_bits = root->actual_subsys_bits & ~final_bits;
  845. added_bits = final_bits & ~root->actual_subsys_bits;
  846. /* Check that any added subsystems are currently free */
  847. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  848. unsigned long bit = 1UL << i;
  849. struct cgroup_subsys *ss = subsys[i];
  850. if (!(bit & added_bits))
  851. continue;
  852. /*
  853. * Nobody should tell us to do a subsys that doesn't exist:
  854. * parse_cgroupfs_options should catch that case and refcounts
  855. * ensure that subsystems won't disappear once selected.
  856. */
  857. BUG_ON(ss == NULL);
  858. if (ss->root != &rootnode) {
  859. /* Subsystem isn't free */
  860. return -EBUSY;
  861. }
  862. }
  863. /* Currently we don't handle adding/removing subsystems when
  864. * any child cgroups exist. This is theoretically supportable
  865. * but involves complex error handling, so it's being left until
  866. * later */
  867. if (root->number_of_cgroups > 1)
  868. return -EBUSY;
  869. /* Process each subsystem */
  870. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  871. struct cgroup_subsys *ss = subsys[i];
  872. unsigned long bit = 1UL << i;
  873. if (bit & added_bits) {
  874. /* We're binding this subsystem to this hierarchy */
  875. BUG_ON(ss == NULL);
  876. BUG_ON(cgrp->subsys[i]);
  877. BUG_ON(!dummytop->subsys[i]);
  878. BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
  879. mutex_lock(&ss->hierarchy_mutex);
  880. cgrp->subsys[i] = dummytop->subsys[i];
  881. cgrp->subsys[i]->cgroup = cgrp;
  882. list_move(&ss->sibling, &root->subsys_list);
  883. ss->root = root;
  884. if (ss->bind)
  885. ss->bind(ss, cgrp);
  886. mutex_unlock(&ss->hierarchy_mutex);
  887. /* refcount was already taken, and we're keeping it */
  888. } else if (bit & removed_bits) {
  889. /* We're removing this subsystem */
  890. BUG_ON(ss == NULL);
  891. BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
  892. BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
  893. mutex_lock(&ss->hierarchy_mutex);
  894. if (ss->bind)
  895. ss->bind(ss, dummytop);
  896. dummytop->subsys[i]->cgroup = dummytop;
  897. cgrp->subsys[i] = NULL;
  898. subsys[i]->root = &rootnode;
  899. list_move(&ss->sibling, &rootnode.subsys_list);
  900. mutex_unlock(&ss->hierarchy_mutex);
  901. /* subsystem is now free - drop reference on module */
  902. module_put(ss->module);
  903. } else if (bit & final_bits) {
  904. /* Subsystem state should already exist */
  905. BUG_ON(ss == NULL);
  906. BUG_ON(!cgrp->subsys[i]);
  907. /*
  908. * a refcount was taken, but we already had one, so
  909. * drop the extra reference.
  910. */
  911. module_put(ss->module);
  912. #ifdef CONFIG_MODULE_UNLOAD
  913. BUG_ON(ss->module && !module_refcount(ss->module));
  914. #endif
  915. } else {
  916. /* Subsystem state shouldn't exist */
  917. BUG_ON(cgrp->subsys[i]);
  918. }
  919. }
  920. root->subsys_bits = root->actual_subsys_bits = final_bits;
  921. synchronize_rcu();
  922. return 0;
  923. }
  924. static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
  925. {
  926. struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
  927. struct cgroup_subsys *ss;
  928. mutex_lock(&cgroup_mutex);
  929. for_each_subsys(root, ss)
  930. seq_printf(seq, ",%s", ss->name);
  931. if (test_bit(ROOT_NOPREFIX, &root->flags))
  932. seq_puts(seq, ",noprefix");
  933. if (strlen(root->release_agent_path))
  934. seq_printf(seq, ",release_agent=%s", root->release_agent_path);
  935. if (clone_children(&root->top_cgroup))
  936. seq_puts(seq, ",clone_children");
  937. if (strlen(root->name))
  938. seq_printf(seq, ",name=%s", root->name);
  939. mutex_unlock(&cgroup_mutex);
  940. return 0;
  941. }
  942. struct cgroup_sb_opts {
  943. unsigned long subsys_bits;
  944. unsigned long flags;
  945. char *release_agent;
  946. bool clone_children;
  947. char *name;
  948. /* User explicitly requested empty subsystem */
  949. bool none;
  950. struct cgroupfs_root *new_root;
  951. };
  952. /*
  953. * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
  954. * with cgroup_mutex held to protect the subsys[] array. This function takes
  955. * refcounts on subsystems to be used, unless it returns error, in which case
  956. * no refcounts are taken.
  957. */
  958. static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
  959. {
  960. char *token, *o = data;
  961. bool all_ss = false, one_ss = false;
  962. unsigned long mask = (unsigned long)-1;
  963. int i;
  964. bool module_pin_failed = false;
  965. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  966. #ifdef CONFIG_CPUSETS
  967. mask = ~(1UL << cpuset_subsys_id);
  968. #endif
  969. memset(opts, 0, sizeof(*opts));
  970. while ((token = strsep(&o, ",")) != NULL) {
  971. if (!*token)
  972. return -EINVAL;
  973. if (!strcmp(token, "none")) {
  974. /* Explicitly have no subsystems */
  975. opts->none = true;
  976. continue;
  977. }
  978. if (!strcmp(token, "all")) {
  979. /* Mutually exclusive option 'all' + subsystem name */
  980. if (one_ss)
  981. return -EINVAL;
  982. all_ss = true;
  983. continue;
  984. }
  985. if (!strcmp(token, "noprefix")) {
  986. set_bit(ROOT_NOPREFIX, &opts->flags);
  987. continue;
  988. }
  989. if (!strcmp(token, "clone_children")) {
  990. opts->clone_children = true;
  991. continue;
  992. }
  993. if (!strncmp(token, "release_agent=", 14)) {
  994. /* Specifying two release agents is forbidden */
  995. if (opts->release_agent)
  996. return -EINVAL;
  997. opts->release_agent =
  998. kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
  999. if (!opts->release_agent)
  1000. return -ENOMEM;
  1001. continue;
  1002. }
  1003. if (!strncmp(token, "name=", 5)) {
  1004. const char *name = token + 5;
  1005. /* Can't specify an empty name */
  1006. if (!strlen(name))
  1007. return -EINVAL;
  1008. /* Must match [\w.-]+ */
  1009. for (i = 0; i < strlen(name); i++) {
  1010. char c = name[i];
  1011. if (isalnum(c))
  1012. continue;
  1013. if ((c == '.') || (c == '-') || (c == '_'))
  1014. continue;
  1015. return -EINVAL;
  1016. }
  1017. /* Specifying two names is forbidden */
  1018. if (opts->name)
  1019. return -EINVAL;
  1020. opts->name = kstrndup(name,
  1021. MAX_CGROUP_ROOT_NAMELEN - 1,
  1022. GFP_KERNEL);
  1023. if (!opts->name)
  1024. return -ENOMEM;
  1025. continue;
  1026. }
  1027. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  1028. struct cgroup_subsys *ss = subsys[i];
  1029. if (ss == NULL)
  1030. continue;
  1031. if (strcmp(token, ss->name))
  1032. continue;
  1033. if (ss->disabled)
  1034. continue;
  1035. /* Mutually exclusive option 'all' + subsystem name */
  1036. if (all_ss)
  1037. return -EINVAL;
  1038. set_bit(i, &opts->subsys_bits);
  1039. one_ss = true;
  1040. break;
  1041. }
  1042. if (i == CGROUP_SUBSYS_COUNT)
  1043. return -ENOENT;
  1044. }
  1045. /*
  1046. * If the 'all' option was specified select all the subsystems,
  1047. * otherwise 'all, 'none' and a subsystem name options were not
  1048. * specified, let's default to 'all'
  1049. */
  1050. if (all_ss || (!all_ss && !one_ss && !opts->none)) {
  1051. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  1052. struct cgroup_subsys *ss = subsys[i];
  1053. if (ss == NULL)
  1054. continue;
  1055. if (ss->disabled)
  1056. continue;
  1057. set_bit(i, &opts->subsys_bits);
  1058. }
  1059. }
  1060. /* Consistency checks */
  1061. /*
  1062. * Option noprefix was introduced just for backward compatibility
  1063. * with the old cpuset, so we allow noprefix only if mounting just
  1064. * the cpuset subsystem.
  1065. */
  1066. if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
  1067. (opts->subsys_bits & mask))
  1068. return -EINVAL;
  1069. /* Can't specify "none" and some subsystems */
  1070. if (opts->subsys_bits && opts->none)
  1071. return -EINVAL;
  1072. /*
  1073. * We either have to specify by name or by subsystems. (So all
  1074. * empty hierarchies must have a name).
  1075. */
  1076. if (!opts->subsys_bits && !opts->name)
  1077. return -EINVAL;
  1078. /*
  1079. * Grab references on all the modules we'll need, so the subsystems
  1080. * don't dance around before rebind_subsystems attaches them. This may
  1081. * take duplicate reference counts on a subsystem that's already used,
  1082. * but rebind_subsystems handles this case.
  1083. */
  1084. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  1085. unsigned long bit = 1UL << i;
  1086. if (!(bit & opts->subsys_bits))
  1087. continue;
  1088. if (!try_module_get(subsys[i]->module)) {
  1089. module_pin_failed = true;
  1090. break;
  1091. }
  1092. }
  1093. if (module_pin_failed) {
  1094. /*
  1095. * oops, one of the modules was going away. this means that we
  1096. * raced with a module_delete call, and to the user this is
  1097. * essentially a "subsystem doesn't exist" case.
  1098. */
  1099. for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
  1100. /* drop refcounts only on the ones we took */
  1101. unsigned long bit = 1UL << i;
  1102. if (!(bit & opts->subsys_bits))
  1103. continue;
  1104. module_put(subsys[i]->module);
  1105. }
  1106. return -ENOENT;
  1107. }
  1108. return 0;
  1109. }
  1110. static void drop_parsed_module_refcounts(unsigned long subsys_bits)
  1111. {
  1112. int i;
  1113. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  1114. unsigned long bit = 1UL << i;
  1115. if (!(bit & subsys_bits))
  1116. continue;
  1117. module_put(subsys[i]->module);
  1118. }
  1119. }
  1120. static int cgroup_remount(struct super_block *sb, int *flags, char *data)
  1121. {
  1122. int ret = 0;
  1123. struct cgroupfs_root *root = sb->s_fs_info;
  1124. struct cgroup *cgrp = &root->top_cgroup;
  1125. struct cgroup_sb_opts opts;
  1126. mutex_lock(&cgrp->dentry->d_inode->i_mutex);
  1127. mutex_lock(&cgroup_mutex);
  1128. /* See what subsystems are wanted */
  1129. ret = parse_cgroupfs_options(data, &opts);
  1130. if (ret)
  1131. goto out_unlock;
  1132. /* Don't allow flags or name to change at remount */
  1133. if (opts.flags != root->flags ||
  1134. (opts.name && strcmp(opts.name, root->name))) {
  1135. ret = -EINVAL;
  1136. drop_parsed_module_refcounts(opts.subsys_bits);
  1137. goto out_unlock;
  1138. }
  1139. ret = rebind_subsystems(root, opts.subsys_bits);
  1140. if (ret) {
  1141. drop_parsed_module_refcounts(opts.subsys_bits);
  1142. goto out_unlock;
  1143. }
  1144. /* (re)populate subsystem files */
  1145. cgroup_populate_dir(cgrp);
  1146. if (opts.release_agent)
  1147. strcpy(root->release_agent_path, opts.release_agent);
  1148. out_unlock:
  1149. kfree(opts.release_agent);
  1150. kfree(opts.name);
  1151. mutex_unlock(&cgroup_mutex);
  1152. mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
  1153. return ret;
  1154. }
  1155. static const struct super_operations cgroup_ops = {
  1156. .statfs = simple_statfs,
  1157. .drop_inode = generic_delete_inode,
  1158. .show_options = cgroup_show_options,
  1159. .remount_fs = cgroup_remount,
  1160. };
  1161. static void init_cgroup_housekeeping(struct cgroup *cgrp)
  1162. {
  1163. INIT_LIST_HEAD(&cgrp->sibling);
  1164. INIT_LIST_HEAD(&cgrp->children);
  1165. INIT_LIST_HEAD(&cgrp->css_sets);
  1166. INIT_LIST_HEAD(&cgrp->release_list);
  1167. INIT_LIST_HEAD(&cgrp->pidlists);
  1168. mutex_init(&cgrp->pidlist_mutex);
  1169. INIT_LIST_HEAD(&cgrp->event_list);
  1170. spin_lock_init(&cgrp->event_list_lock);
  1171. }
  1172. static void init_cgroup_root(struct cgroupfs_root *root)
  1173. {
  1174. struct cgroup *cgrp = &root->top_cgroup;
  1175. INIT_LIST_HEAD(&root->subsys_list);
  1176. INIT_LIST_HEAD(&root->root_list);
  1177. root->number_of_cgroups = 1;
  1178. cgrp->root = root;
  1179. cgrp->top_cgroup = cgrp;
  1180. init_cgroup_housekeeping(cgrp);
  1181. }
  1182. static bool init_root_id(struct cgroupfs_root *root)
  1183. {
  1184. int ret = 0;
  1185. do {
  1186. if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
  1187. return false;
  1188. spin_lock(&hierarchy_id_lock);
  1189. /* Try to allocate the next unused ID */
  1190. ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
  1191. &root->hierarchy_id);
  1192. if (ret == -ENOSPC)
  1193. /* Try again starting from 0 */
  1194. ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
  1195. if (!ret) {
  1196. next_hierarchy_id = root->hierarchy_id + 1;
  1197. } else if (ret != -EAGAIN) {
  1198. /* Can only get here if the 31-bit IDR is full ... */
  1199. BUG_ON(ret);
  1200. }
  1201. spin_unlock(&hierarchy_id_lock);
  1202. } while (ret);
  1203. return true;
  1204. }
  1205. static int cgroup_test_super(struct super_block *sb, void *data)
  1206. {
  1207. struct cgroup_sb_opts *opts = data;
  1208. struct cgroupfs_root *root = sb->s_fs_info;
  1209. /* If we asked for a name then it must match */
  1210. if (opts->name && strcmp(opts->name, root->name))
  1211. return 0;
  1212. /*
  1213. * If we asked for subsystems (or explicitly for no
  1214. * subsystems) then they must match
  1215. */
  1216. if ((opts->subsys_bits || opts->none)
  1217. && (opts->subsys_bits != root->subsys_bits))
  1218. return 0;
  1219. return 1;
  1220. }
  1221. static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
  1222. {
  1223. struct cgroupfs_root *root;
  1224. if (!opts->subsys_bits && !opts->none)
  1225. return NULL;
  1226. root = kzalloc(sizeof(*root), GFP_KERNEL);
  1227. if (!root)
  1228. return ERR_PTR(-ENOMEM);
  1229. if (!init_root_id(root)) {
  1230. kfree(root);
  1231. return ERR_PTR(-ENOMEM);
  1232. }
  1233. init_cgroup_root(root);
  1234. root->subsys_bits = opts->subsys_bits;
  1235. root->flags = opts->flags;
  1236. if (opts->release_agent)
  1237. strcpy(root->release_agent_path, opts->release_agent);
  1238. if (opts->name)
  1239. strcpy(root->name, opts->name);
  1240. if (opts->clone_children)
  1241. set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
  1242. return root;
  1243. }
  1244. static void cgroup_drop_root(struct cgroupfs_root *root)
  1245. {
  1246. if (!root)
  1247. return;
  1248. BUG_ON(!root->hierarchy_id);
  1249. spin_lock(&hierarchy_id_lock);
  1250. ida_remove(&hierarchy_ida, root->hierarchy_id);
  1251. spin_unlock(&hierarchy_id_lock);
  1252. kfree(root);
  1253. }
  1254. static int cgroup_set_super(struct super_block *sb, void *data)
  1255. {
  1256. int ret;
  1257. struct cgroup_sb_opts *opts = data;
  1258. /* If we don't have a new root, we can't set up a new sb */
  1259. if (!opts->new_root)
  1260. return -EINVAL;
  1261. BUG_ON(!opts->subsys_bits && !opts->none);
  1262. ret = set_anon_super(sb, NULL);
  1263. if (ret)
  1264. return ret;
  1265. sb->s_fs_info = opts->new_root;
  1266. opts->new_root->sb = sb;
  1267. sb->s_blocksize = PAGE_CACHE_SIZE;
  1268. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  1269. sb->s_magic = CGROUP_SUPER_MAGIC;
  1270. sb->s_op = &cgroup_ops;
  1271. return 0;
  1272. }
  1273. static int cgroup_get_rootdir(struct super_block *sb)
  1274. {
  1275. static const struct dentry_operations cgroup_dops = {
  1276. .d_iput = cgroup_diput,
  1277. .d_delete = cgroup_delete,
  1278. };
  1279. struct inode *inode =
  1280. cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
  1281. struct dentry *dentry;
  1282. if (!inode)
  1283. return -ENOMEM;
  1284. inode->i_fop = &simple_dir_operations;
  1285. inode->i_op = &cgroup_dir_inode_operations;
  1286. /* directories start off with i_nlink == 2 (for "." entry) */
  1287. inc_nlink(inode);
  1288. dentry = d_alloc_root(inode);
  1289. if (!dentry) {
  1290. iput(inode);
  1291. return -ENOMEM;
  1292. }
  1293. sb->s_root = dentry;
  1294. /* for everything else we want ->d_op set */
  1295. sb->s_d_op = &cgroup_dops;
  1296. return 0;
  1297. }
  1298. static struct dentry *cgroup_mount(struct file_system_type *fs_type,
  1299. int flags, const char *unused_dev_name,
  1300. void *data)
  1301. {
  1302. struct cgroup_sb_opts opts;
  1303. struct cgroupfs_root *root;
  1304. int ret = 0;
  1305. struct super_block *sb;
  1306. struct cgroupfs_root *new_root;
  1307. /* First find the desired set of subsystems */
  1308. mutex_lock(&cgroup_mutex);
  1309. ret = parse_cgroupfs_options(data, &opts);
  1310. mutex_unlock(&cgroup_mutex);
  1311. if (ret)
  1312. goto out_err;
  1313. /*
  1314. * Allocate a new cgroup root. We may not need it if we're
  1315. * reusing an existing hierarchy.
  1316. */
  1317. new_root = cgroup_root_from_opts(&opts);
  1318. if (IS_ERR(new_root)) {
  1319. ret = PTR_ERR(new_root);
  1320. goto drop_modules;
  1321. }
  1322. opts.new_root = new_root;
  1323. /* Locate an existing or new sb for this hierarchy */
  1324. sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
  1325. if (IS_ERR(sb)) {
  1326. ret = PTR_ERR(sb);
  1327. cgroup_drop_root(opts.new_root);
  1328. goto drop_modules;
  1329. }
  1330. root = sb->s_fs_info;
  1331. BUG_ON(!root);
  1332. if (root == opts.new_root) {
  1333. /* We used the new root structure, so this is a new hierarchy */
  1334. struct list_head tmp_cg_links;
  1335. struct cgroup *root_cgrp = &root->top_cgroup;
  1336. struct inode *inode;
  1337. struct cgroupfs_root *existing_root;
  1338. int i;
  1339. BUG_ON(sb->s_root != NULL);
  1340. ret = cgroup_get_rootdir(sb);
  1341. if (ret)
  1342. goto drop_new_super;
  1343. inode = sb->s_root->d_inode;
  1344. mutex_lock(&inode->i_mutex);
  1345. mutex_lock(&cgroup_mutex);
  1346. if (strlen(root->name)) {
  1347. /* Check for name clashes with existing mounts */
  1348. for_each_active_root(existing_root) {
  1349. if (!strcmp(existing_root->name, root->name)) {
  1350. ret = -EBUSY;
  1351. mutex_unlock(&cgroup_mutex);
  1352. mutex_unlock(&inode->i_mutex);
  1353. goto drop_new_super;
  1354. }
  1355. }
  1356. }
  1357. /*
  1358. * We're accessing css_set_count without locking
  1359. * css_set_lock here, but that's OK - it can only be
  1360. * increased by someone holding cgroup_lock, and
  1361. * that's us. The worst that can happen is that we
  1362. * have some link structures left over
  1363. */
  1364. ret = allocate_cg_links(css_set_count, &tmp_cg_links);
  1365. if (ret) {
  1366. mutex_unlock(&cgroup_mutex);
  1367. mutex_unlock(&inode->i_mutex);
  1368. goto drop_new_super;
  1369. }
  1370. ret = rebind_subsystems(root, root->subsys_bits);
  1371. if (ret == -EBUSY) {
  1372. mutex_unlock(&cgroup_mutex);
  1373. mutex_unlock(&inode->i_mutex);
  1374. free_cg_links(&tmp_cg_links);
  1375. goto drop_new_super;
  1376. }
  1377. /*
  1378. * There must be no failure case after here, since rebinding
  1379. * takes care of subsystems' refcounts, which are explicitly
  1380. * dropped in the failure exit path.
  1381. */
  1382. /* EBUSY should be the only error here */
  1383. BUG_ON(ret);
  1384. list_add(&root->root_list, &roots);
  1385. root_count++;
  1386. sb->s_root->d_fsdata = root_cgrp;
  1387. root->top_cgroup.dentry = sb->s_root;
  1388. /* Link the top cgroup in this hierarchy into all
  1389. * the css_set objects */
  1390. write_lock(&css_set_lock);
  1391. for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
  1392. struct hlist_head *hhead = &css_set_table[i];
  1393. struct hlist_node *node;
  1394. struct css_set *cg;
  1395. hlist_for_each_entry(cg, node, hhead, hlist)
  1396. link_css_set(&tmp_cg_links, cg, root_cgrp);
  1397. }
  1398. write_unlock(&css_set_lock);
  1399. free_cg_links(&tmp_cg_links);
  1400. BUG_ON(!list_empty(&root_cgrp->sibling));
  1401. BUG_ON(!list_empty(&root_cgrp->children));
  1402. BUG_ON(root->number_of_cgroups != 1);
  1403. cgroup_populate_dir(root_cgrp);
  1404. mutex_unlock(&cgroup_mutex);
  1405. mutex_unlock(&inode->i_mutex);
  1406. } else {
  1407. /*
  1408. * We re-used an existing hierarchy - the new root (if
  1409. * any) is not needed
  1410. */
  1411. cgroup_drop_root(opts.new_root);
  1412. /* no subsys rebinding, so refcounts don't change */
  1413. drop_parsed_module_refcounts(opts.subsys_bits);
  1414. }
  1415. kfree(opts.release_agent);
  1416. kfree(opts.name);
  1417. return dget(sb->s_root);
  1418. drop_new_super:
  1419. deactivate_locked_super(sb);
  1420. drop_modules:
  1421. drop_parsed_module_refcounts(opts.subsys_bits);
  1422. out_err:
  1423. kfree(opts.release_agent);
  1424. kfree(opts.name);
  1425. return ERR_PTR(ret);
  1426. }
  1427. static void cgroup_kill_sb(struct super_block *sb) {
  1428. struct cgroupfs_root *root = sb->s_fs_info;
  1429. struct cgroup *cgrp = &root->top_cgroup;
  1430. int ret;
  1431. struct cg_cgroup_link *link;
  1432. struct cg_cgroup_link *saved_link;
  1433. BUG_ON(!root);
  1434. BUG_ON(root->number_of_cgroups != 1);
  1435. BUG_ON(!list_empty(&cgrp->children));
  1436. BUG_ON(!list_empty(&cgrp->sibling));
  1437. mutex_lock(&cgroup_mutex);
  1438. /* Rebind all subsystems back to the default hierarchy */
  1439. ret = rebind_subsystems(root, 0);
  1440. /* Shouldn't be able to fail ... */
  1441. BUG_ON(ret);
  1442. /*
  1443. * Release all the links from css_sets to this hierarchy's
  1444. * root cgroup
  1445. */
  1446. write_lock(&css_set_lock);
  1447. list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
  1448. cgrp_link_list) {
  1449. list_del(&link->cg_link_list);
  1450. list_del(&link->cgrp_link_list);
  1451. kfree(link);
  1452. }
  1453. write_unlock(&css_set_lock);
  1454. if (!list_empty(&root->root_list)) {
  1455. list_del(&root->root_list);
  1456. root_count--;
  1457. }
  1458. mutex_unlock(&cgroup_mutex);
  1459. kill_litter_super(sb);
  1460. cgroup_drop_root(root);
  1461. }
  1462. static struct file_system_type cgroup_fs_type = {
  1463. .name = "cgroup",
  1464. .mount = cgroup_mount,
  1465. .kill_sb = cgroup_kill_sb,
  1466. };
  1467. static struct kobject *cgroup_kobj;
  1468. static inline struct cgroup *__d_cgrp(struct dentry *dentry)
  1469. {
  1470. return dentry->d_fsdata;
  1471. }
  1472. static inline struct cftype *__d_cft(struct dentry *dentry)
  1473. {
  1474. return dentry->d_fsdata;
  1475. }
  1476. /**
  1477. * cgroup_path - generate the path of a cgroup
  1478. * @cgrp: the cgroup in question
  1479. * @buf: the buffer to write the path into
  1480. * @buflen: the length of the buffer
  1481. *
  1482. * Called with cgroup_mutex held or else with an RCU-protected cgroup
  1483. * reference. Writes path of cgroup into buf. Returns 0 on success,
  1484. * -errno on error.
  1485. */
  1486. int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
  1487. {
  1488. char *start;
  1489. struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
  1490. rcu_read_lock_held() ||
  1491. cgroup_lock_is_held());
  1492. if (!dentry || cgrp == dummytop) {
  1493. /*
  1494. * Inactive subsystems have no dentry for their root
  1495. * cgroup
  1496. */
  1497. strcpy(buf, "/");
  1498. return 0;
  1499. }
  1500. start = buf + buflen;
  1501. *--start = '\0';
  1502. for (;;) {
  1503. int len = dentry->d_name.len;
  1504. if ((start -= len) < buf)
  1505. return -ENAMETOOLONG;
  1506. memcpy(start, dentry->d_name.name, len);
  1507. cgrp = cgrp->parent;
  1508. if (!cgrp)
  1509. break;
  1510. dentry = rcu_dereference_check(cgrp->dentry,
  1511. rcu_read_lock_held() ||
  1512. cgroup_lock_is_held());
  1513. if (!cgrp->parent)
  1514. continue;
  1515. if (--start < buf)
  1516. return -ENAMETOOLONG;
  1517. *start = '/';
  1518. }
  1519. memmove(buf, start, buf + buflen - start);
  1520. return 0;
  1521. }
  1522. EXPORT_SYMBOL_GPL(cgroup_path);
  1523. /**
  1524. * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
  1525. * @cgrp: the cgroup the task is attaching to
  1526. * @tsk: the task to be attached
  1527. *
  1528. * Call holding cgroup_mutex. May take task_lock of
  1529. * the task 'tsk' during call.
  1530. */
  1531. int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  1532. {
  1533. int retval = 0;
  1534. struct cgroup_subsys *ss, *failed_ss = NULL;
  1535. struct cgroup *oldcgrp;
  1536. struct css_set *cg;
  1537. struct css_set *newcg;
  1538. struct cgroupfs_root *root = cgrp->root;
  1539. /* Nothing to do if the task is already in that cgroup */
  1540. oldcgrp = task_cgroup_from_root(tsk, root);
  1541. if (cgrp == oldcgrp)
  1542. return 0;
  1543. for_each_subsys(root, ss) {
  1544. if (ss->can_attach) {
  1545. retval = ss->can_attach(ss, cgrp, tsk, false);
  1546. if (retval) {
  1547. /*
  1548. * Remember on which subsystem the can_attach()
  1549. * failed, so that we only call cancel_attach()
  1550. * against the subsystems whose can_attach()
  1551. * succeeded. (See below)
  1552. */
  1553. failed_ss = ss;
  1554. goto out;
  1555. }
  1556. }
  1557. }
  1558. task_lock(tsk);
  1559. cg = tsk->cgroups;
  1560. get_css_set(cg);
  1561. task_unlock(tsk);
  1562. /*
  1563. * Locate or allocate a new css_set for this task,
  1564. * based on its final set of cgroups
  1565. */
  1566. newcg = find_css_set(cg, cgrp);
  1567. put_css_set(cg);
  1568. if (!newcg) {
  1569. retval = -ENOMEM;
  1570. goto out;
  1571. }
  1572. task_lock(tsk);
  1573. if (tsk->flags & PF_EXITING) {
  1574. task_unlock(tsk);
  1575. put_css_set(newcg);
  1576. retval = -ESRCH;
  1577. goto out;
  1578. }
  1579. rcu_assign_pointer(tsk->cgroups, newcg);
  1580. task_unlock(tsk);
  1581. /* Update the css_set linked lists if we're using them */
  1582. write_lock(&css_set_lock);
  1583. if (!list_empty(&tsk->cg_list)) {
  1584. list_del(&tsk->cg_list);
  1585. list_add(&tsk->cg_list, &newcg->tasks);
  1586. }
  1587. write_unlock(&css_set_lock);
  1588. for_each_subsys(root, ss) {
  1589. if (ss->attach)
  1590. ss->attach(ss, cgrp, oldcgrp, tsk, false);
  1591. }
  1592. set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
  1593. synchronize_rcu();
  1594. put_css_set(cg);
  1595. /*
  1596. * wake up rmdir() waiter. the rmdir should fail since the cgroup
  1597. * is no longer empty.
  1598. */
  1599. cgroup_wakeup_rmdir_waiter(cgrp);
  1600. out:
  1601. if (retval) {
  1602. for_each_subsys(root, ss) {
  1603. if (ss == failed_ss)
  1604. /*
  1605. * This subsystem was the one that failed the
  1606. * can_attach() check earlier, so we don't need
  1607. * to call cancel_attach() against it or any
  1608. * remaining subsystems.
  1609. */
  1610. break;
  1611. if (ss->cancel_attach)
  1612. ss->cancel_attach(ss, cgrp, tsk, false);
  1613. }
  1614. }
  1615. return retval;
  1616. }
  1617. /**
  1618. * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  1619. * @from: attach to all cgroups of a given task
  1620. * @tsk: the task to be attached
  1621. */
  1622. int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  1623. {
  1624. struct cgroupfs_root *root;
  1625. int retval = 0;
  1626. cgroup_lock();
  1627. for_each_active_root(root) {
  1628. struct cgroup *from_cg = task_cgroup_from_root(from, root);
  1629. retval = cgroup_attach_task(from_cg, tsk);
  1630. if (retval)
  1631. break;
  1632. }
  1633. cgroup_unlock();
  1634. return retval;
  1635. }
  1636. EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  1637. /*
  1638. * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
  1639. * held. May take task_lock of task
  1640. */
  1641. static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
  1642. {
  1643. struct task_struct *tsk;
  1644. const struct cred *cred = current_cred(), *tcred;
  1645. int ret;
  1646. if (pid) {
  1647. rcu_read_lock();
  1648. tsk = find_task_by_vpid(pid);
  1649. if (!tsk || tsk->flags & PF_EXITING) {
  1650. rcu_read_unlock();
  1651. return -ESRCH;
  1652. }
  1653. tcred = __task_cred(tsk);
  1654. if (cred->euid &&
  1655. cred->euid != tcred->uid &&
  1656. cred->euid != tcred->suid) {
  1657. rcu_read_unlock();
  1658. return -EACCES;
  1659. }
  1660. get_task_struct(tsk);
  1661. rcu_read_unlock();
  1662. } else {
  1663. tsk = current;
  1664. get_task_struct(tsk);
  1665. }
  1666. ret = cgroup_attach_task(cgrp, tsk);
  1667. put_task_struct(tsk);
  1668. return ret;
  1669. }
  1670. static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
  1671. {
  1672. int ret;
  1673. if (!cgroup_lock_live_group(cgrp))
  1674. return -ENODEV;
  1675. ret = attach_task_by_pid(cgrp, pid);
  1676. cgroup_unlock();
  1677. return ret;
  1678. }
  1679. /**
  1680. * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
  1681. * @cgrp: the cgroup to be checked for liveness
  1682. *
  1683. * On success, returns true; the lock should be later released with
  1684. * cgroup_unlock(). On failure returns false with no lock held.
  1685. */
  1686. bool cgroup_lock_live_group(struct cgroup *cgrp)
  1687. {
  1688. mutex_lock(&cgroup_mutex);
  1689. if (cgroup_is_removed(cgrp)) {
  1690. mutex_unlock(&cgroup_mutex);
  1691. return false;
  1692. }
  1693. return true;
  1694. }
  1695. EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
  1696. static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
  1697. const char *buffer)
  1698. {
  1699. BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
  1700. if (strlen(buffer) >= PATH_MAX)
  1701. return -EINVAL;
  1702. if (!cgroup_lock_live_group(cgrp))
  1703. return -ENODEV;
  1704. strcpy(cgrp->root->release_agent_path, buffer);
  1705. cgroup_unlock();
  1706. return 0;
  1707. }
  1708. static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
  1709. struct seq_file *seq)
  1710. {
  1711. if (!cgroup_lock_live_group(cgrp))
  1712. return -ENODEV;
  1713. seq_puts(seq, cgrp->root->release_agent_path);
  1714. seq_putc(seq, '\n');
  1715. cgroup_unlock();
  1716. return 0;
  1717. }
  1718. /* A buffer size big enough for numbers or short strings */
  1719. #define CGROUP_LOCAL_BUFFER_SIZE 64
  1720. static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
  1721. struct file *file,
  1722. const char __user *userbuf,
  1723. size_t nbytes, loff_t *unused_ppos)
  1724. {
  1725. char buffer[CGROUP_LOCAL_BUFFER_SIZE];
  1726. int retval = 0;
  1727. char *end;
  1728. if (!nbytes)
  1729. return -EINVAL;
  1730. if (nbytes >= sizeof(buffer))
  1731. return -E2BIG;
  1732. if (copy_from_user(buffer, userbuf, nbytes))
  1733. return -EFAULT;
  1734. buffer[nbytes] = 0; /* nul-terminate */
  1735. if (cft->write_u64) {
  1736. u64 val = simple_strtoull(strstrip(buffer), &end, 0);
  1737. if (*end)
  1738. return -EINVAL;
  1739. retval = cft->write_u64(cgrp, cft, val);
  1740. } else {
  1741. s64 val = simple_strtoll(strstrip(buffer), &end, 0);
  1742. if (*end)
  1743. return -EINVAL;
  1744. retval = cft->write_s64(cgrp, cft, val);
  1745. }
  1746. if (!retval)
  1747. retval = nbytes;
  1748. return retval;
  1749. }
  1750. static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
  1751. struct file *file,
  1752. const char __user *userbuf,
  1753. size_t nbytes, loff_t *unused_ppos)
  1754. {
  1755. char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
  1756. int retval = 0;
  1757. size_t max_bytes = cft->max_write_len;
  1758. char *buffer = local_buffer;
  1759. if (!max_bytes)
  1760. max_bytes = sizeof(local_buffer) - 1;
  1761. if (nbytes >= max_bytes)
  1762. return -E2BIG;
  1763. /* Allocate a dynamic buffer if we need one */
  1764. if (nbytes >= sizeof(local_buffer)) {
  1765. buffer = kmalloc(nbytes + 1, GFP_KERNEL);
  1766. if (buffer == NULL)
  1767. return -ENOMEM;
  1768. }
  1769. if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
  1770. retval = -EFAULT;
  1771. goto out;
  1772. }
  1773. buffer[nbytes] = 0; /* nul-terminate */
  1774. retval = cft->write_string(cgrp, cft, strstrip(buffer));
  1775. if (!retval)
  1776. retval = nbytes;
  1777. out:
  1778. if (buffer != local_buffer)
  1779. kfree(buffer);
  1780. return retval;
  1781. }
  1782. static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
  1783. size_t nbytes, loff_t *ppos)
  1784. {
  1785. struct cftype *cft = __d_cft(file->f_dentry);
  1786. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  1787. if (cgroup_is_removed(cgrp))
  1788. return -ENODEV;
  1789. if (cft->write)
  1790. return cft->write(cgrp, cft, file, buf, nbytes, ppos);
  1791. if (cft->write_u64 || cft->write_s64)
  1792. return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
  1793. if (cft->write_string)
  1794. return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
  1795. if (cft->trigger) {
  1796. int ret = cft->trigger(cgrp, (unsigned int)cft->private);
  1797. return ret ? ret : nbytes;
  1798. }
  1799. return -EINVAL;
  1800. }
  1801. static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
  1802. struct file *file,
  1803. char __user *buf, size_t nbytes,
  1804. loff_t *ppos)
  1805. {
  1806. char tmp[CGROUP_LOCAL_BUFFER_SIZE];
  1807. u64 val = cft->read_u64(cgrp, cft);
  1808. int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
  1809. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  1810. }
  1811. static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
  1812. struct file *file,
  1813. char __user *buf, size_t nbytes,
  1814. loff_t *ppos)
  1815. {
  1816. char tmp[CGROUP_LOCAL_BUFFER_SIZE];
  1817. s64 val = cft->read_s64(cgrp, cft);
  1818. int len = sprintf(tmp, "%lld\n", (long long) val);
  1819. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  1820. }
  1821. static ssize_t cgroup_file_read(struct file *file, char __user *buf,
  1822. size_t nbytes, loff_t *ppos)
  1823. {
  1824. struct cftype *cft = __d_cft(file->f_dentry);
  1825. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  1826. if (cgroup_is_removed(cgrp))
  1827. return -ENODEV;
  1828. if (cft->read)
  1829. return cft->read(cgrp, cft, file, buf, nbytes, ppos);
  1830. if (cft->read_u64)
  1831. return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
  1832. if (cft->read_s64)
  1833. return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
  1834. return -EINVAL;
  1835. }
  1836. /*
  1837. * seqfile ops/methods for returning structured data. Currently just
  1838. * supports string->u64 maps, but can be extended in future.
  1839. */
  1840. struct cgroup_seqfile_state {
  1841. struct cftype *cft;
  1842. struct cgroup *cgroup;
  1843. };
  1844. static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
  1845. {
  1846. struct seq_file *sf = cb->state;
  1847. return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
  1848. }
  1849. static int cgroup_seqfile_show(struct seq_file *m, void *arg)
  1850. {
  1851. struct cgroup_seqfile_state *state = m->private;
  1852. struct cftype *cft = state->cft;
  1853. if (cft->read_map) {
  1854. struct cgroup_map_cb cb = {
  1855. .fill = cgroup_map_add,
  1856. .state = m,
  1857. };
  1858. return cft->read_map(state->cgroup, cft, &cb);
  1859. }
  1860. return cft->read_seq_string(state->cgroup, cft, m);
  1861. }
  1862. static int cgroup_seqfile_release(struct inode *inode, struct file *file)
  1863. {
  1864. struct seq_file *seq = file->private_data;
  1865. kfree(seq->private);
  1866. return single_release(inode, file);
  1867. }
  1868. static const struct file_operations cgroup_seqfile_operations = {
  1869. .read = seq_read,
  1870. .write = cgroup_file_write,
  1871. .llseek = seq_lseek,
  1872. .release = cgroup_seqfile_release,
  1873. };
  1874. static int cgroup_file_open(struct inode *inode, struct file *file)
  1875. {
  1876. int err;
  1877. struct cftype *cft;
  1878. err = generic_file_open(inode, file);
  1879. if (err)
  1880. return err;
  1881. cft = __d_cft(file->f_dentry);
  1882. if (cft->read_map || cft->read_seq_string) {
  1883. struct cgroup_seqfile_state *state =
  1884. kzalloc(sizeof(*state), GFP_USER);
  1885. if (!state)
  1886. return -ENOMEM;
  1887. state->cft = cft;
  1888. state->cgroup = __d_cgrp(file->f_dentry->d_parent);
  1889. file->f_op = &cgroup_seqfile_operations;
  1890. err = single_open(file, cgroup_seqfile_show, state);
  1891. if (err < 0)
  1892. kfree(state);
  1893. } else if (cft->open)
  1894. err = cft->open(inode, file);
  1895. else
  1896. err = 0;
  1897. return err;
  1898. }
  1899. static int cgroup_file_release(struct inode *inode, struct file *file)
  1900. {
  1901. struct cftype *cft = __d_cft(file->f_dentry);
  1902. if (cft->release)
  1903. return cft->release(inode, file);
  1904. return 0;
  1905. }
  1906. /*
  1907. * cgroup_rename - Only allow simple rename of directories in place.
  1908. */
  1909. static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
  1910. struct inode *new_dir, struct dentry *new_dentry)
  1911. {
  1912. if (!S_ISDIR(old_dentry->d_inode->i_mode))
  1913. return -ENOTDIR;
  1914. if (new_dentry->d_inode)
  1915. return -EEXIST;
  1916. if (old_dir != new_dir)
  1917. return -EIO;
  1918. return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
  1919. }
  1920. static const struct file_operations cgroup_file_operations = {
  1921. .read = cgroup_file_read,
  1922. .write = cgroup_file_write,
  1923. .llseek = generic_file_llseek,
  1924. .open = cgroup_file_open,
  1925. .release = cgroup_file_release,
  1926. };
  1927. static const struct inode_operations cgroup_dir_inode_operations = {
  1928. .lookup = cgroup_lookup,
  1929. .mkdir = cgroup_mkdir,
  1930. .rmdir = cgroup_rmdir,
  1931. .rename = cgroup_rename,
  1932. };
  1933. static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
  1934. {
  1935. if (dentry->d_name.len > NAME_MAX)
  1936. return ERR_PTR(-ENAMETOOLONG);
  1937. d_add(dentry, NULL);
  1938. return NULL;
  1939. }
  1940. /*
  1941. * Check if a file is a control file
  1942. */
  1943. static inline struct cftype *__file_cft(struct file *file)
  1944. {
  1945. if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
  1946. return ERR_PTR(-EINVAL);
  1947. return __d_cft(file->f_dentry);
  1948. }
  1949. static int cgroup_create_file(struct dentry *dentry, mode_t mode,
  1950. struct super_block *sb)
  1951. {
  1952. struct inode *inode;
  1953. if (!dentry)
  1954. return -ENOENT;
  1955. if (dentry->d_inode)
  1956. return -EEXIST;
  1957. inode = cgroup_new_inode(mode, sb);
  1958. if (!inode)
  1959. return -ENOMEM;
  1960. if (S_ISDIR(mode)) {
  1961. inode->i_op = &cgroup_dir_inode_operations;
  1962. inode->i_fop = &simple_dir_operations;
  1963. /* start off with i_nlink == 2 (for "." entry) */
  1964. inc_nlink(inode);
  1965. /* start with the directory inode held, so that we can
  1966. * populate it without racing with another mkdir */
  1967. mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
  1968. } else if (S_ISREG(mode)) {
  1969. inode->i_size = 0;
  1970. inode->i_fop = &cgroup_file_operations;
  1971. }
  1972. d_instantiate(dentry, inode);
  1973. dget(dentry); /* Extra count - pin the dentry in core */
  1974. return 0;
  1975. }
  1976. /*
  1977. * cgroup_create_dir - create a directory for an object.
  1978. * @cgrp: the cgroup we create the directory for. It must have a valid
  1979. * ->parent field. And we are going to fill its ->dentry field.
  1980. * @dentry: dentry of the new cgroup
  1981. * @mode: mode to set on new directory.
  1982. */
  1983. static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
  1984. mode_t mode)
  1985. {
  1986. struct dentry *parent;
  1987. int error = 0;
  1988. parent = cgrp->parent->dentry;
  1989. error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
  1990. if (!error) {
  1991. dentry->d_fsdata = cgrp;
  1992. inc_nlink(parent->d_inode);
  1993. rcu_assign_pointer(cgrp->dentry, dentry);
  1994. dget(dentry);
  1995. }
  1996. dput(dentry);
  1997. return error;
  1998. }
  1999. /**
  2000. * cgroup_file_mode - deduce file mode of a control file
  2001. * @cft: the control file in question
  2002. *
  2003. * returns cft->mode if ->mode is not 0
  2004. * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
  2005. * returns S_IRUGO if it has only a read handler
  2006. * returns S_IWUSR if it has only a write hander
  2007. */
  2008. static mode_t cgroup_file_mode(const struct cftype *cft)
  2009. {
  2010. mode_t mode = 0;
  2011. if (cft->mode)
  2012. return cft->mode;
  2013. if (cft->read || cft->read_u64 || cft->read_s64 ||
  2014. cft->read_map || cft->read_seq_string)
  2015. mode |= S_IRUGO;
  2016. if (cft->write || cft->write_u64 || cft->write_s64 ||
  2017. cft->write_string || cft->trigger)
  2018. mode |= S_IWUSR;
  2019. return mode;
  2020. }
  2021. int cgroup_add_file(struct cgroup *cgrp,
  2022. struct cgroup_subsys *subsys,
  2023. const struct cftype *cft)
  2024. {
  2025. struct dentry *dir = cgrp->dentry;
  2026. struct dentry *dentry;
  2027. int error;
  2028. mode_t mode;
  2029. char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
  2030. if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
  2031. strcpy(name, subsys->name);
  2032. strcat(name, ".");
  2033. }
  2034. strcat(name, cft->name);
  2035. BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
  2036. dentry = lookup_one_len(name, dir, strlen(name));
  2037. if (!IS_ERR(dentry)) {
  2038. mode = cgroup_file_mode(cft);
  2039. error = cgroup_create_file(dentry, mode | S_IFREG,
  2040. cgrp->root->sb);
  2041. if (!error)
  2042. dentry->d_fsdata = (void *)cft;
  2043. dput(dentry);
  2044. } else
  2045. error = PTR_ERR(dentry);
  2046. return error;
  2047. }
  2048. EXPORT_SYMBOL_GPL(cgroup_add_file);
  2049. int cgroup_add_files(struct cgroup *cgrp,
  2050. struct cgroup_subsys *subsys,
  2051. const struct cftype cft[],
  2052. int count)
  2053. {
  2054. int i, err;
  2055. for (i = 0; i < count; i++) {
  2056. err = cgroup_add_file(cgrp, subsys, &cft[i]);
  2057. if (err)
  2058. return err;
  2059. }
  2060. return 0;
  2061. }
  2062. EXPORT_SYMBOL_GPL(cgroup_add_files);
  2063. /**
  2064. * cgroup_task_count - count the number of tasks in a cgroup.
  2065. * @cgrp: the cgroup in question
  2066. *
  2067. * Return the number of tasks in the cgroup.
  2068. */
  2069. int cgroup_task_count(const struct cgroup *cgrp)
  2070. {
  2071. int count = 0;
  2072. struct cg_cgroup_link *link;
  2073. read_lock(&css_set_lock);
  2074. list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
  2075. count += atomic_read(&link->cg->refcount);
  2076. }
  2077. read_unlock(&css_set_lock);
  2078. return count;
  2079. }
  2080. /*
  2081. * Advance a list_head iterator. The iterator should be positioned at
  2082. * the start of a css_set
  2083. */
  2084. static void cgroup_advance_iter(struct cgroup *cgrp,
  2085. struct cgroup_iter *it)
  2086. {
  2087. struct list_head *l = it->cg_link;
  2088. struct cg_cgroup_link *link;
  2089. struct css_set *cg;
  2090. /* Advance to the next non-empty css_set */
  2091. do {
  2092. l = l->next;
  2093. if (l == &cgrp->css_sets) {
  2094. it->cg_link = NULL;
  2095. return;
  2096. }
  2097. link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
  2098. cg = link->cg;
  2099. } while (list_empty(&cg->tasks));
  2100. it->cg_link = l;
  2101. it->task = cg->tasks.next;
  2102. }
  2103. /*
  2104. * To reduce the fork() overhead for systems that are not actually
  2105. * using their cgroups capability, we don't maintain the lists running
  2106. * through each css_set to its tasks until we see the list actually
  2107. * used - in other words after the first call to cgroup_iter_start().
  2108. *
  2109. * The tasklist_lock is not held here, as do_each_thread() and
  2110. * while_each_thread() are protected by RCU.
  2111. */
  2112. static void cgroup_enable_task_cg_lists(void)
  2113. {
  2114. struct task_struct *p, *g;
  2115. write_lock(&css_set_lock);
  2116. use_task_css_set_links = 1;
  2117. do_each_thread(g, p) {
  2118. task_lock(p);
  2119. /*
  2120. * We should check if the process is exiting, otherwise
  2121. * it will race with cgroup_exit() in that the list
  2122. * entry won't be deleted though the process has exited.
  2123. */
  2124. if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
  2125. list_add(&p->cg_list, &p->cgroups->tasks);
  2126. task_unlock(p);
  2127. } while_each_thread(g, p);
  2128. write_unlock(&css_set_lock);
  2129. }
  2130. void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
  2131. {
  2132. /*
  2133. * The first time anyone tries to iterate across a cgroup,
  2134. * we need to enable the list linking each css_set to its
  2135. * tasks, and fix up all existing tasks.
  2136. */
  2137. if (!use_task_css_set_links)
  2138. cgroup_enable_task_cg_lists();
  2139. read_lock(&css_set_lock);
  2140. it->cg_link = &cgrp->css_sets;
  2141. cgroup_advance_iter(cgrp, it);
  2142. }
  2143. struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
  2144. struct cgroup_iter *it)
  2145. {
  2146. struct task_struct *res;
  2147. struct list_head *l = it->task;
  2148. struct cg_cgroup_link *link;
  2149. /* If the iterator cg is NULL, we have no tasks */
  2150. if (!it->cg_link)
  2151. return NULL;
  2152. res = list_entry(l, struct task_struct, cg_list);
  2153. /* Advance iterator to find next entry */
  2154. l = l->next;
  2155. link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
  2156. if (l == &link->cg->tasks) {
  2157. /* We reached the end of this task list - move on to
  2158. * the next cg_cgroup_link */
  2159. cgroup_advance_iter(cgrp, it);
  2160. } else {
  2161. it->task = l;
  2162. }
  2163. return res;
  2164. }
  2165. void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
  2166. {
  2167. read_unlock(&css_set_lock);
  2168. }
  2169. static inline int started_after_time(struct task_struct *t1,
  2170. struct timespec *time,
  2171. struct task_struct *t2)
  2172. {
  2173. int start_diff = timespec_compare(&t1->start_time, time);
  2174. if (start_diff > 0) {
  2175. return 1;
  2176. } else if (start_diff < 0) {
  2177. return 0;
  2178. } else {
  2179. /*
  2180. * Arbitrarily, if two processes started at the same
  2181. * time, we'll say that the lower pointer value
  2182. * started first. Note that t2 may have exited by now
  2183. * so this may not be a valid pointer any longer, but
  2184. * that's fine - it still serves to distinguish
  2185. * between two tasks started (effectively) simultaneously.
  2186. */
  2187. return t1 > t2;
  2188. }
  2189. }
  2190. /*
  2191. * This function is a callback from heap_insert() and is used to order
  2192. * the heap.
  2193. * In this case we order the heap in descending task start time.
  2194. */
  2195. static inline int started_after(void *p1, void *p2)
  2196. {
  2197. struct task_struct *t1 = p1;
  2198. struct task_struct *t2 = p2;
  2199. return started_after_time(t1, &t2->start_time, t2);
  2200. }
  2201. /**
  2202. * cgroup_scan_tasks - iterate though all the tasks in a cgroup
  2203. * @scan: struct cgroup_scanner containing arguments for the scan
  2204. *
  2205. * Arguments include pointers to callback functions test_task() and
  2206. * process_task().
  2207. * Iterate through all the tasks in a cgroup, calling test_task() for each,
  2208. * and if it returns true, call process_task() for it also.
  2209. * The test_task pointer may be NULL, meaning always true (select all tasks).
  2210. * Effectively duplicates cgroup_iter_{start,next,end}()
  2211. * but does not lock css_set_lock for the call to process_task().
  2212. * The struct cgroup_scanner may be embedded in any structure of the caller's
  2213. * creation.
  2214. * It is guaranteed that process_task() will act on every task that
  2215. * is a member of the cgroup for the duration of this call. This
  2216. * function may or may not call process_task() for tasks that exit
  2217. * or move to a different cgroup during the call, or are forked or
  2218. * move into the cgroup during the call.
  2219. *
  2220. * Note that test_task() may be called with locks held, and may in some
  2221. * situations be called multiple times for the same task, so it should
  2222. * be cheap.
  2223. * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
  2224. * pre-allocated and will be used for heap operations (and its "gt" member will
  2225. * be overwritten), else a temporary heap will be used (allocation of which
  2226. * may cause this function to fail).
  2227. */
  2228. int cgroup_scan_tasks(struct cgroup_scanner *scan)
  2229. {
  2230. int retval, i;
  2231. struct cgroup_iter it;
  2232. struct task_struct *p, *dropped;
  2233. /* Never dereference latest_task, since it's not refcounted */
  2234. struct task_struct *latest_task = NULL;
  2235. struct ptr_heap tmp_heap;
  2236. struct ptr_heap *heap;
  2237. struct timespec latest_time = { 0, 0 };
  2238. if (scan->heap) {
  2239. /* The caller supplied our heap and pre-allocated its memory */
  2240. heap = scan->heap;
  2241. heap->gt = &started_after;
  2242. } else {
  2243. /* We need to allocate our own heap memory */
  2244. heap = &tmp_heap;
  2245. retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
  2246. if (retval)
  2247. /* cannot allocate the heap */
  2248. return retval;
  2249. }
  2250. again:
  2251. /*
  2252. * Scan tasks in the cgroup, using the scanner's "test_task" callback
  2253. * to determine which are of interest, and using the scanner's
  2254. * "process_task" callback to process any of them that need an update.
  2255. * Since we don't want to hold any locks during the task updates,
  2256. * gather tasks to be processed in a heap structure.
  2257. * The heap is sorted by descending task start time.
  2258. * If the statically-sized heap fills up, we overflow tasks that
  2259. * started later, and in future iterations only consider tasks that
  2260. * started after the latest task in the previous pass. This
  2261. * guarantees forward progress and that we don't miss any tasks.
  2262. */
  2263. heap->size = 0;
  2264. cgroup_iter_start(scan->cg, &it);
  2265. while ((p = cgroup_iter_next(scan->cg, &it))) {
  2266. /*
  2267. * Only affect tasks that qualify per the caller's callback,
  2268. * if he provided one
  2269. */
  2270. if (scan->test_task && !scan->test_task(p, scan))
  2271. continue;
  2272. /*
  2273. * Only process tasks that started after the last task
  2274. * we processed
  2275. */
  2276. if (!started_after_time(p, &latest_time, latest_task))
  2277. continue;
  2278. dropped = heap_insert(heap, p);
  2279. if (dropped == NULL) {
  2280. /*
  2281. * The new task was inserted; the heap wasn't
  2282. * previously full
  2283. */
  2284. get_task_struct(p);
  2285. } else if (dropped != p) {
  2286. /*
  2287. * The new task was inserted, and pushed out a
  2288. * different task
  2289. */
  2290. get_task_struct(p);
  2291. put_task_struct(dropped);
  2292. }
  2293. /*
  2294. * Else the new task was newer than anything already in
  2295. * the heap and wasn't inserted
  2296. */
  2297. }
  2298. cgroup_iter_end(scan->cg, &it);
  2299. if (heap->size) {
  2300. for (i = 0; i < heap->size; i++) {
  2301. struct task_struct *q = heap->ptrs[i];
  2302. if (i == 0) {
  2303. latest_time = q->start_time;
  2304. latest_task = q;
  2305. }
  2306. /* Process the task per the caller's callback */
  2307. scan->process_task(q, scan);
  2308. put_task_struct(q);
  2309. }
  2310. /*
  2311. * If we had to process any tasks at all, scan again
  2312. * in case some of them were in the middle of forking
  2313. * children that didn't get processed.
  2314. * Not the most efficient way to do it, but it avoids
  2315. * having to take callback_mutex in the fork path
  2316. */
  2317. goto again;
  2318. }
  2319. if (heap == &tmp_heap)
  2320. heap_free(&tmp_heap);
  2321. return 0;
  2322. }
  2323. /*
  2324. * Stuff for reading the 'tasks'/'procs' files.
  2325. *
  2326. * Reading this file can return large amounts of data if a cgroup has
  2327. * *lots* of attached tasks. So it may need several calls to read(),
  2328. * but we cannot guarantee that the information we produce is correct
  2329. * unless we produce it entirely atomically.
  2330. *
  2331. */
  2332. /*
  2333. * The following two functions "fix" the issue where there are more pids
  2334. * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
  2335. * TODO: replace with a kernel-wide solution to this problem
  2336. */
  2337. #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
  2338. static void *pidlist_allocate(int count)
  2339. {
  2340. if (PIDLIST_TOO_LARGE(count))
  2341. return vmalloc(count * sizeof(pid_t));
  2342. else
  2343. return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
  2344. }
  2345. static void pidlist_free(void *p)
  2346. {
  2347. if (is_vmalloc_addr(p))
  2348. vfree(p);
  2349. else
  2350. kfree(p);
  2351. }
  2352. static void *pidlist_resize(void *p, int newcount)
  2353. {
  2354. void *newlist;
  2355. /* note: if new alloc fails, old p will still be valid either way */
  2356. if (is_vmalloc_addr(p)) {
  2357. newlist = vmalloc(newcount * sizeof(pid_t));
  2358. if (!newlist)
  2359. return NULL;
  2360. memcpy(newlist, p, newcount * sizeof(pid_t));
  2361. vfree(p);
  2362. } else {
  2363. newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
  2364. }
  2365. return newlist;
  2366. }
  2367. /*
  2368. * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
  2369. * If the new stripped list is sufficiently smaller and there's enough memory
  2370. * to allocate a new buffer, will let go of the unneeded memory. Returns the
  2371. * number of unique elements.
  2372. */
  2373. /* is the size difference enough that we should re-allocate the array? */
  2374. #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
  2375. static int pidlist_uniq(pid_t **p, int length)
  2376. {
  2377. int src, dest = 1;
  2378. pid_t *list = *p;
  2379. pid_t *newlist;
  2380. /*
  2381. * we presume the 0th element is unique, so i starts at 1. trivial
  2382. * edge cases first; no work needs to be done for either
  2383. */
  2384. if (length == 0 || length == 1)
  2385. return length;
  2386. /* src and dest walk down the list; dest counts unique elements */
  2387. for (src = 1; src < length; src++) {
  2388. /* find next unique element */
  2389. while (list[src] == list[src-1]) {
  2390. src++;
  2391. if (src == length)
  2392. goto after;
  2393. }
  2394. /* dest always points to where the next unique element goes */
  2395. list[dest] = list[src];
  2396. dest++;
  2397. }
  2398. after:
  2399. /*
  2400. * if the length difference is large enough, we want to allocate a
  2401. * smaller buffer to save memory. if this fails due to out of memory,
  2402. * we'll just stay with what we've got.
  2403. */
  2404. if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
  2405. newlist = pidlist_resize(list, dest);
  2406. if (newlist)
  2407. *p = newlist;
  2408. }
  2409. return dest;
  2410. }
  2411. static int cmppid(const void *a, const void *b)
  2412. {
  2413. return *(pid_t *)a - *(pid_t *)b;
  2414. }
  2415. /*
  2416. * find the appropriate pidlist for our purpose (given procs vs tasks)
  2417. * returns with the lock on that pidlist already held, and takes care
  2418. * of the use count, or returns NULL with no locks held if we're out of
  2419. * memory.
  2420. */
  2421. static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
  2422. enum cgroup_filetype type)
  2423. {
  2424. struct cgroup_pidlist *l;
  2425. /* don't need task_nsproxy() if we're looking at ourself */
  2426. struct pid_namespace *ns = current->nsproxy->pid_ns;
  2427. /*
  2428. * We can't drop the pidlist_mutex before taking the l->mutex in case
  2429. * the last ref-holder is trying to remove l from the list at the same
  2430. * time. Holding the pidlist_mutex precludes somebody taking whichever
  2431. * list we find out from under us - compare release_pid_array().
  2432. */
  2433. mutex_lock(&cgrp->pidlist_mutex);
  2434. list_for_each_entry(l, &cgrp->pidlists, links) {
  2435. if (l->key.type == type && l->key.ns == ns) {
  2436. /* make sure l doesn't vanish out from under us */
  2437. down_write(&l->mutex);
  2438. mutex_unlock(&cgrp->pidlist_mutex);
  2439. return l;
  2440. }
  2441. }
  2442. /* entry not found; create a new one */
  2443. l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
  2444. if (!l) {
  2445. mutex_unlock(&cgrp->pidlist_mutex);
  2446. return l;
  2447. }
  2448. init_rwsem(&l->mutex);
  2449. down_write(&l->mutex);
  2450. l->key.type = type;
  2451. l->key.ns = get_pid_ns(ns);
  2452. l->use_count = 0; /* don't increment here */
  2453. l->list = NULL;
  2454. l->owner = cgrp;
  2455. list_add(&l->links, &cgrp->pidlists);
  2456. mutex_unlock(&cgrp->pidlist_mutex);
  2457. return l;
  2458. }
  2459. /*
  2460. * Load a cgroup's pidarray with either procs' tgids or tasks' pids
  2461. */
  2462. static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
  2463. struct cgroup_pidlist **lp)
  2464. {
  2465. pid_t *array;
  2466. int length;
  2467. int pid, n = 0; /* used for populating the array */
  2468. struct cgroup_iter it;
  2469. struct task_struct *tsk;
  2470. struct cgroup_pidlist *l;
  2471. /*
  2472. * If cgroup gets more users after we read count, we won't have
  2473. * enough space - tough. This race is indistinguishable to the
  2474. * caller from the case that the additional cgroup users didn't
  2475. * show up until sometime later on.
  2476. */
  2477. length = cgroup_task_count(cgrp);
  2478. array = pidlist_allocate(length);
  2479. if (!array)
  2480. return -ENOMEM;
  2481. /* now, populate the array */
  2482. cgroup_iter_start(cgrp, &it);
  2483. while ((tsk = cgroup_iter_next(cgrp, &it))) {
  2484. if (unlikely(n == length))
  2485. break;
  2486. /* get tgid or pid for procs or tasks file respectively */
  2487. if (type == CGROUP_FILE_PROCS)
  2488. pid = task_tgid_vnr(tsk);
  2489. else
  2490. pid = task_pid_vnr(tsk);
  2491. if (pid > 0) /* make sure to only use valid results */
  2492. array[n++] = pid;
  2493. }
  2494. cgroup_iter_end(cgrp, &it);
  2495. length = n;
  2496. /* now sort & (if procs) strip out duplicates */
  2497. sort(array, length, sizeof(pid_t), cmppid, NULL);
  2498. if (type == CGROUP_FILE_PROCS)
  2499. length = pidlist_uniq(&array, length);
  2500. l = cgroup_pidlist_find(cgrp, type);
  2501. if (!l) {
  2502. pidlist_free(array);
  2503. return -ENOMEM;
  2504. }
  2505. /* store array, freeing old if necessary - lock already held */
  2506. pidlist_free(l->list);
  2507. l->list = array;
  2508. l->length = length;
  2509. l->use_count++;
  2510. up_write(&l->mutex);
  2511. *lp = l;
  2512. return 0;
  2513. }
  2514. /**
  2515. * cgroupstats_build - build and fill cgroupstats
  2516. * @stats: cgroupstats to fill information into
  2517. * @dentry: A dentry entry belonging to the cgroup for which stats have
  2518. * been requested.
  2519. *
  2520. * Build and fill cgroupstats so that taskstats can export it to user
  2521. * space.
  2522. */
  2523. int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
  2524. {
  2525. int ret = -EINVAL;
  2526. struct cgroup *cgrp;
  2527. struct cgroup_iter it;
  2528. struct task_struct *tsk;
  2529. /*
  2530. * Validate dentry by checking the superblock operations,
  2531. * and make sure it's a directory.
  2532. */
  2533. if (dentry->d_sb->s_op != &cgroup_ops ||
  2534. !S_ISDIR(dentry->d_inode->i_mode))
  2535. goto err;
  2536. ret = 0;
  2537. cgrp = dentry->d_fsdata;
  2538. cgroup_iter_start(cgrp, &it);
  2539. while ((tsk = cgroup_iter_next(cgrp, &it))) {
  2540. switch (tsk->state) {
  2541. case TASK_RUNNING:
  2542. stats->nr_running++;
  2543. break;
  2544. case TASK_INTERRUPTIBLE:
  2545. stats->nr_sleeping++;
  2546. break;
  2547. case TASK_UNINTERRUPTIBLE:
  2548. stats->nr_uninterruptible++;
  2549. break;
  2550. case TASK_STOPPED:
  2551. stats->nr_stopped++;
  2552. break;
  2553. default:
  2554. if (delayacct_is_task_waiting_on_io(tsk))
  2555. stats->nr_io_wait++;
  2556. break;
  2557. }
  2558. }
  2559. cgroup_iter_end(cgrp, &it);
  2560. err:
  2561. return ret;
  2562. }
  2563. /*
  2564. * seq_file methods for the tasks/procs files. The seq_file position is the
  2565. * next pid to display; the seq_file iterator is a pointer to the pid
  2566. * in the cgroup->l->list array.
  2567. */
  2568. static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
  2569. {
  2570. /*
  2571. * Initially we receive a position value that corresponds to
  2572. * one more than the last pid shown (or 0 on the first call or
  2573. * after a seek to the start). Use a binary-search to find the
  2574. * next pid to display, if any
  2575. */
  2576. struct cgroup_pidlist *l = s->private;
  2577. int index = 0, pid = *pos;
  2578. int *iter;
  2579. down_read(&l->mutex);
  2580. if (pid) {
  2581. int end = l->length;
  2582. while (index < end) {
  2583. int mid = (index + end) / 2;
  2584. if (l->list[mid] == pid) {
  2585. index = mid;
  2586. break;
  2587. } else if (l->list[mid] <= pid)
  2588. index = mid + 1;
  2589. else
  2590. end = mid;
  2591. }
  2592. }
  2593. /* If we're off the end of the array, we're done */
  2594. if (index >= l->length)
  2595. return NULL;
  2596. /* Update the abstract position to be the actual pid that we found */
  2597. iter = l->list + index;
  2598. *pos = *iter;
  2599. return iter;
  2600. }
  2601. static void cgroup_pidlist_stop(struct seq_file *s, void *v)
  2602. {
  2603. struct cgroup_pidlist *l = s->private;
  2604. up_read(&l->mutex);
  2605. }
  2606. static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
  2607. {
  2608. struct cgroup_pidlist *l = s->private;
  2609. pid_t *p = v;
  2610. pid_t *end = l->list + l->length;
  2611. /*
  2612. * Advance to the next pid in the array. If this goes off the
  2613. * end, we're done
  2614. */
  2615. p++;
  2616. if (p >= end) {
  2617. return NULL;
  2618. } else {
  2619. *pos = *p;
  2620. return p;
  2621. }
  2622. }
  2623. static int cgroup_pidlist_show(struct seq_file *s, void *v)
  2624. {
  2625. return seq_printf(s, "%d\n", *(int *)v);
  2626. }
  2627. /*
  2628. * seq_operations functions for iterating on pidlists through seq_file -
  2629. * independent of whether it's tasks or procs
  2630. */
  2631. static const struct seq_operations cgroup_pidlist_seq_operations = {
  2632. .start = cgroup_pidlist_start,
  2633. .stop = cgroup_pidlist_stop,
  2634. .next = cgroup_pidlist_next,
  2635. .show = cgroup_pidlist_show,
  2636. };
  2637. static void cgroup_release_pid_array(struct cgroup_pidlist *l)
  2638. {
  2639. /*
  2640. * the case where we're the last user of this particular pidlist will
  2641. * have us remove it from the cgroup's list, which entails taking the
  2642. * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
  2643. * pidlist_mutex, we have to take pidlist_mutex first.
  2644. */
  2645. mutex_lock(&l->owner->pidlist_mutex);
  2646. down_write(&l->mutex);
  2647. BUG_ON(!l->use_count);
  2648. if (!--l->use_count) {
  2649. /* we're the last user if refcount is 0; remove and free */
  2650. list_del(&l->links);
  2651. mutex_unlock(&l->owner->pidlist_mutex);
  2652. pidlist_free(l->list);
  2653. put_pid_ns(l->key.ns);
  2654. up_write(&l->mutex);
  2655. kfree(l);
  2656. return;
  2657. }
  2658. mutex_unlock(&l->owner->pidlist_mutex);
  2659. up_write(&l->mutex);
  2660. }
  2661. static int cgroup_pidlist_release(struct inode *inode, struct file *file)
  2662. {
  2663. struct cgroup_pidlist *l;
  2664. if (!(file->f_mode & FMODE_READ))
  2665. return 0;
  2666. /*
  2667. * the seq_file will only be initialized if the file was opened for
  2668. * reading; hence we check if it's not null only in that case.
  2669. */
  2670. l = ((struct seq_file *)file->private_data)->private;
  2671. cgroup_release_pid_array(l);
  2672. return seq_release(inode, file);
  2673. }
  2674. static const struct file_operations cgroup_pidlist_operations = {
  2675. .read = seq_read,
  2676. .llseek = seq_lseek,
  2677. .write = cgroup_file_write,
  2678. .release = cgroup_pidlist_release,
  2679. };
  2680. /*
  2681. * The following functions handle opens on a file that displays a pidlist
  2682. * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
  2683. * in the cgroup.
  2684. */
  2685. /* helper function for the two below it */
  2686. static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
  2687. {
  2688. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  2689. struct cgroup_pidlist *l;
  2690. int retval;
  2691. /* Nothing to do for write-only files */
  2692. if (!(file->f_mode & FMODE_READ))
  2693. return 0;
  2694. /* have the array populated */
  2695. retval = pidlist_array_load(cgrp, type, &l);
  2696. if (retval)
  2697. return retval;
  2698. /* configure file information */
  2699. file->f_op = &cgroup_pidlist_operations;
  2700. retval = seq_open(file, &cgroup_pidlist_seq_operations);
  2701. if (retval) {
  2702. cgroup_release_pid_array(l);
  2703. return retval;
  2704. }
  2705. ((struct seq_file *)file->private_data)->private = l;
  2706. return 0;
  2707. }
  2708. static int cgroup_tasks_open(struct inode *unused, struct file *file)
  2709. {
  2710. return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
  2711. }
  2712. static int cgroup_procs_open(struct inode *unused, struct file *file)
  2713. {
  2714. return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
  2715. }
  2716. static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
  2717. struct cftype *cft)
  2718. {
  2719. return notify_on_release(cgrp);
  2720. }
  2721. static int cgroup_write_notify_on_release(struct cgroup *cgrp,
  2722. struct cftype *cft,
  2723. u64 val)
  2724. {
  2725. clear_bit(CGRP_RELEASABLE, &cgrp->flags);
  2726. if (val)
  2727. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2728. else
  2729. clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2730. return 0;
  2731. }
  2732. /*
  2733. * Unregister event and free resources.
  2734. *
  2735. * Gets called from workqueue.
  2736. */
  2737. static void cgroup_event_remove(struct work_struct *work)
  2738. {
  2739. struct cgroup_event *event = container_of(work, struct cgroup_event,
  2740. remove);
  2741. struct cgroup *cgrp = event->cgrp;
  2742. event->cft->unregister_event(cgrp, event->cft, event->eventfd);
  2743. eventfd_ctx_put(event->eventfd);
  2744. kfree(event);
  2745. dput(cgrp->dentry);
  2746. }
  2747. /*
  2748. * Gets called on POLLHUP on eventfd when user closes it.
  2749. *
  2750. * Called with wqh->lock held and interrupts disabled.
  2751. */
  2752. static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
  2753. int sync, void *key)
  2754. {
  2755. struct cgroup_event *event = container_of(wait,
  2756. struct cgroup_event, wait);
  2757. struct cgroup *cgrp = event->cgrp;
  2758. unsigned long flags = (unsigned long)key;
  2759. if (flags & POLLHUP) {
  2760. __remove_wait_queue(event->wqh, &event->wait);
  2761. spin_lock(&cgrp->event_list_lock);
  2762. list_del(&event->list);
  2763. spin_unlock(&cgrp->event_list_lock);
  2764. /*
  2765. * We are in atomic context, but cgroup_event_remove() may
  2766. * sleep, so we have to call it in workqueue.
  2767. */
  2768. schedule_work(&event->remove);
  2769. }
  2770. return 0;
  2771. }
  2772. static void cgroup_event_ptable_queue_proc(struct file *file,
  2773. wait_queue_head_t *wqh, poll_table *pt)
  2774. {
  2775. struct cgroup_event *event = container_of(pt,
  2776. struct cgroup_event, pt);
  2777. event->wqh = wqh;
  2778. add_wait_queue(wqh, &event->wait);
  2779. }
  2780. /*
  2781. * Parse input and register new cgroup event handler.
  2782. *
  2783. * Input must be in format '<event_fd> <control_fd> <args>'.
  2784. * Interpretation of args is defined by control file implementation.
  2785. */
  2786. static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
  2787. const char *buffer)
  2788. {
  2789. struct cgroup_event *event = NULL;
  2790. unsigned int efd, cfd;
  2791. struct file *efile = NULL;
  2792. struct file *cfile = NULL;
  2793. char *endp;
  2794. int ret;
  2795. efd = simple_strtoul(buffer, &endp, 10);
  2796. if (*endp != ' ')
  2797. return -EINVAL;
  2798. buffer = endp + 1;
  2799. cfd = simple_strtoul(buffer, &endp, 10);
  2800. if ((*endp != ' ') && (*endp != '\0'))
  2801. return -EINVAL;
  2802. buffer = endp + 1;
  2803. event = kzalloc(sizeof(*event), GFP_KERNEL);
  2804. if (!event)
  2805. return -ENOMEM;
  2806. event->cgrp = cgrp;
  2807. INIT_LIST_HEAD(&event->list);
  2808. init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
  2809. init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
  2810. INIT_WORK(&event->remove, cgroup_event_remove);
  2811. efile = eventfd_fget(efd);
  2812. if (IS_ERR(efile)) {
  2813. ret = PTR_ERR(efile);
  2814. goto fail;
  2815. }
  2816. event->eventfd = eventfd_ctx_fileget(efile);
  2817. if (IS_ERR(event->eventfd)) {
  2818. ret = PTR_ERR(event->eventfd);
  2819. goto fail;
  2820. }
  2821. cfile = fget(cfd);
  2822. if (!cfile) {
  2823. ret = -EBADF;
  2824. goto fail;
  2825. }
  2826. /* the process need read permission on control file */
  2827. ret = file_permission(cfile, MAY_READ);
  2828. if (ret < 0)
  2829. goto fail;
  2830. event->cft = __file_cft(cfile);
  2831. if (IS_ERR(event->cft)) {
  2832. ret = PTR_ERR(event->cft);
  2833. goto fail;
  2834. }
  2835. if (!event->cft->register_event || !event->cft->unregister_event) {
  2836. ret = -EINVAL;
  2837. goto fail;
  2838. }
  2839. ret = event->cft->register_event(cgrp, event->cft,
  2840. event->eventfd, buffer);
  2841. if (ret)
  2842. goto fail;
  2843. if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
  2844. event->cft->unregister_event(cgrp, event->cft, event->eventfd);
  2845. ret = 0;
  2846. goto fail;
  2847. }
  2848. /*
  2849. * Events should be removed after rmdir of cgroup directory, but before
  2850. * destroying subsystem state objects. Let's take reference to cgroup
  2851. * directory dentry to do that.
  2852. */
  2853. dget(cgrp->dentry);
  2854. spin_lock(&cgrp->event_list_lock);
  2855. list_add(&event->list, &cgrp->event_list);
  2856. spin_unlock(&cgrp->event_list_lock);
  2857. fput(cfile);
  2858. fput(efile);
  2859. return 0;
  2860. fail:
  2861. if (cfile)
  2862. fput(cfile);
  2863. if (event && event->eventfd && !IS_ERR(event->eventfd))
  2864. eventfd_ctx_put(event->eventfd);
  2865. if (!IS_ERR_OR_NULL(efile))
  2866. fput(efile);
  2867. kfree(event);
  2868. return ret;
  2869. }
  2870. static u64 cgroup_clone_children_read(struct cgroup *cgrp,
  2871. struct cftype *cft)
  2872. {
  2873. return clone_children(cgrp);
  2874. }
  2875. static int cgroup_clone_children_write(struct cgroup *cgrp,
  2876. struct cftype *cft,
  2877. u64 val)
  2878. {
  2879. if (val)
  2880. set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  2881. else
  2882. clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  2883. return 0;
  2884. }
  2885. /*
  2886. * for the common functions, 'private' gives the type of file
  2887. */
  2888. /* for hysterical raisins, we can't put this on the older files */
  2889. #define CGROUP_FILE_GENERIC_PREFIX "cgroup."
  2890. static struct cftype files[] = {
  2891. {
  2892. .name = "tasks",
  2893. .open = cgroup_tasks_open,
  2894. .write_u64 = cgroup_tasks_write,
  2895. .release = cgroup_pidlist_release,
  2896. .mode = S_IRUGO | S_IWUSR,
  2897. },
  2898. {
  2899. .name = CGROUP_FILE_GENERIC_PREFIX "procs",
  2900. .open = cgroup_procs_open,
  2901. /* .write_u64 = cgroup_procs_write, TODO */
  2902. .release = cgroup_pidlist_release,
  2903. .mode = S_IRUGO,
  2904. },
  2905. {
  2906. .name = "notify_on_release",
  2907. .read_u64 = cgroup_read_notify_on_release,
  2908. .write_u64 = cgroup_write_notify_on_release,
  2909. },
  2910. {
  2911. .name = CGROUP_FILE_GENERIC_PREFIX "event_control",
  2912. .write_string = cgroup_write_event_control,
  2913. .mode = S_IWUGO,
  2914. },
  2915. {
  2916. .name = "cgroup.clone_children",
  2917. .read_u64 = cgroup_clone_children_read,
  2918. .write_u64 = cgroup_clone_children_write,
  2919. },
  2920. };
  2921. static struct cftype cft_release_agent = {
  2922. .name = "release_agent",
  2923. .read_seq_string = cgroup_release_agent_show,
  2924. .write_string = cgroup_release_agent_write,
  2925. .max_write_len = PATH_MAX,
  2926. };
  2927. static int cgroup_populate_dir(struct cgroup *cgrp)
  2928. {
  2929. int err;
  2930. struct cgroup_subsys *ss;
  2931. /* First clear out any existing files */
  2932. cgroup_clear_directory(cgrp->dentry);
  2933. err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
  2934. if (err < 0)
  2935. return err;
  2936. if (cgrp == cgrp->top_cgroup) {
  2937. if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
  2938. return err;
  2939. }
  2940. for_each_subsys(cgrp->root, ss) {
  2941. if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
  2942. return err;
  2943. }
  2944. /* This cgroup is ready now */
  2945. for_each_subsys(cgrp->root, ss) {
  2946. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  2947. /*
  2948. * Update id->css pointer and make this css visible from
  2949. * CSS ID functions. This pointer will be dereferened
  2950. * from RCU-read-side without locks.
  2951. */
  2952. if (css->id)
  2953. rcu_assign_pointer(css->id->css, css);
  2954. }
  2955. return 0;
  2956. }
  2957. static void init_cgroup_css(struct cgroup_subsys_state *css,
  2958. struct cgroup_subsys *ss,
  2959. struct cgroup *cgrp)
  2960. {
  2961. css->cgroup = cgrp;
  2962. atomic_set(&css->refcnt, 1);
  2963. css->flags = 0;
  2964. css->id = NULL;
  2965. if (cgrp == dummytop)
  2966. set_bit(CSS_ROOT, &css->flags);
  2967. BUG_ON(cgrp->subsys[ss->subsys_id]);
  2968. cgrp->subsys[ss->subsys_id] = css;
  2969. }
  2970. static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
  2971. {
  2972. /* We need to take each hierarchy_mutex in a consistent order */
  2973. int i;
  2974. /*
  2975. * No worry about a race with rebind_subsystems that might mess up the
  2976. * locking order, since both parties are under cgroup_mutex.
  2977. */
  2978. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  2979. struct cgroup_subsys *ss = subsys[i];
  2980. if (ss == NULL)
  2981. continue;
  2982. if (ss->root == root)
  2983. mutex_lock(&ss->hierarchy_mutex);
  2984. }
  2985. }
  2986. static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
  2987. {
  2988. int i;
  2989. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  2990. struct cgroup_subsys *ss = subsys[i];
  2991. if (ss == NULL)
  2992. continue;
  2993. if (ss->root == root)
  2994. mutex_unlock(&ss->hierarchy_mutex);
  2995. }
  2996. }
  2997. /*
  2998. * cgroup_create - create a cgroup
  2999. * @parent: cgroup that will be parent of the new cgroup
  3000. * @dentry: dentry of the new cgroup
  3001. * @mode: mode to set on new inode
  3002. *
  3003. * Must be called with the mutex on the parent inode held
  3004. */
  3005. static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
  3006. mode_t mode)
  3007. {
  3008. struct cgroup *cgrp;
  3009. struct cgroupfs_root *root = parent->root;
  3010. int err = 0;
  3011. struct cgroup_subsys *ss;
  3012. struct super_block *sb = root->sb;
  3013. cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
  3014. if (!cgrp)
  3015. return -ENOMEM;
  3016. /* Grab a reference on the superblock so the hierarchy doesn't
  3017. * get deleted on unmount if there are child cgroups. This
  3018. * can be done outside cgroup_mutex, since the sb can't
  3019. * disappear while someone has an open control file on the
  3020. * fs */
  3021. atomic_inc(&sb->s_active);
  3022. mutex_lock(&cgroup_mutex);
  3023. init_cgroup_housekeeping(cgrp);
  3024. cgrp->parent = parent;
  3025. cgrp->root = parent->root;
  3026. cgrp->top_cgroup = parent->top_cgroup;
  3027. if (notify_on_release(parent))
  3028. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  3029. if (clone_children(parent))
  3030. set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
  3031. for_each_subsys(root, ss) {
  3032. struct cgroup_subsys_state *css = ss->create(ss, cgrp);
  3033. if (IS_ERR(css)) {
  3034. err = PTR_ERR(css);
  3035. goto err_destroy;
  3036. }
  3037. init_cgroup_css(css, ss, cgrp);
  3038. if (ss->use_id) {
  3039. err = alloc_css_id(ss, parent, cgrp);
  3040. if (err)
  3041. goto err_destroy;
  3042. }
  3043. /* At error, ->destroy() callback has to free assigned ID. */
  3044. if (clone_children(parent) && ss->post_clone)
  3045. ss->post_clone(ss, cgrp);
  3046. }
  3047. cgroup_lock_hierarchy(root);
  3048. list_add(&cgrp->sibling, &cgrp->parent->children);
  3049. cgroup_unlock_hierarchy(root);
  3050. root->number_of_cgroups++;
  3051. err = cgroup_create_dir(cgrp, dentry, mode);
  3052. if (err < 0)
  3053. goto err_remove;
  3054. /* The cgroup directory was pre-locked for us */
  3055. BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
  3056. err = cgroup_populate_dir(cgrp);
  3057. /* If err < 0, we have a half-filled directory - oh well ;) */
  3058. mutex_unlock(&cgroup_mutex);
  3059. mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
  3060. return 0;
  3061. err_remove:
  3062. cgroup_lock_hierarchy(root);
  3063. list_del(&cgrp->sibling);
  3064. cgroup_unlock_hierarchy(root);
  3065. root->number_of_cgroups--;
  3066. err_destroy:
  3067. for_each_subsys(root, ss) {
  3068. if (cgrp->subsys[ss->subsys_id])
  3069. ss->destroy(ss, cgrp);
  3070. }
  3071. mutex_unlock(&cgroup_mutex);
  3072. /* Release the reference count that we took on the superblock */
  3073. deactivate_super(sb);
  3074. kfree(cgrp);
  3075. return err;
  3076. }
  3077. static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  3078. {
  3079. struct cgroup *c_parent = dentry->d_parent->d_fsdata;
  3080. /* the vfs holds inode->i_mutex already */
  3081. return cgroup_create(c_parent, dentry, mode | S_IFDIR);
  3082. }
  3083. static int cgroup_has_css_refs(struct cgroup *cgrp)
  3084. {
  3085. /* Check the reference count on each subsystem. Since we
  3086. * already established that there are no tasks in the
  3087. * cgroup, if the css refcount is also 1, then there should
  3088. * be no outstanding references, so the subsystem is safe to
  3089. * destroy. We scan across all subsystems rather than using
  3090. * the per-hierarchy linked list of mounted subsystems since
  3091. * we can be called via check_for_release() with no
  3092. * synchronization other than RCU, and the subsystem linked
  3093. * list isn't RCU-safe */
  3094. int i;
  3095. /*
  3096. * We won't need to lock the subsys array, because the subsystems
  3097. * we're concerned about aren't going anywhere since our cgroup root
  3098. * has a reference on them.
  3099. */
  3100. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  3101. struct cgroup_subsys *ss = subsys[i];
  3102. struct cgroup_subsys_state *css;
  3103. /* Skip subsystems not present or not in this hierarchy */
  3104. if (ss == NULL || ss->root != cgrp->root)
  3105. continue;
  3106. css = cgrp->subsys[ss->subsys_id];
  3107. /* When called from check_for_release() it's possible
  3108. * that by this point the cgroup has been removed
  3109. * and the css deleted. But a false-positive doesn't
  3110. * matter, since it can only happen if the cgroup
  3111. * has been deleted and hence no longer needs the
  3112. * release agent to be called anyway. */
  3113. if (css && (atomic_read(&css->refcnt) > 1))
  3114. return 1;
  3115. }
  3116. return 0;
  3117. }
  3118. /*
  3119. * Atomically mark all (or else none) of the cgroup's CSS objects as
  3120. * CSS_REMOVED. Return true on success, or false if the cgroup has
  3121. * busy subsystems. Call with cgroup_mutex held
  3122. */
  3123. static int cgroup_clear_css_refs(struct cgroup *cgrp)
  3124. {
  3125. struct cgroup_subsys *ss;
  3126. unsigned long flags;
  3127. bool failed = false;
  3128. local_irq_save(flags);
  3129. for_each_subsys(cgrp->root, ss) {
  3130. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  3131. int refcnt;
  3132. while (1) {
  3133. /* We can only remove a CSS with a refcnt==1 */
  3134. refcnt = atomic_read(&css->refcnt);
  3135. if (refcnt > 1) {
  3136. failed = true;
  3137. goto done;
  3138. }
  3139. BUG_ON(!refcnt);
  3140. /*
  3141. * Drop the refcnt to 0 while we check other
  3142. * subsystems. This will cause any racing
  3143. * css_tryget() to spin until we set the
  3144. * CSS_REMOVED bits or abort
  3145. */
  3146. if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
  3147. break;
  3148. cpu_relax();
  3149. }
  3150. }
  3151. done:
  3152. for_each_subsys(cgrp->root, ss) {
  3153. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  3154. if (failed) {
  3155. /*
  3156. * Restore old refcnt if we previously managed
  3157. * to clear it from 1 to 0
  3158. */
  3159. if (!atomic_read(&css->refcnt))
  3160. atomic_set(&css->refcnt, 1);
  3161. } else {
  3162. /* Commit the fact that the CSS is removed */
  3163. set_bit(CSS_REMOVED, &css->flags);
  3164. }
  3165. }
  3166. local_irq_restore(flags);
  3167. return !failed;
  3168. }
  3169. static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
  3170. {
  3171. struct cgroup *cgrp = dentry->d_fsdata;
  3172. struct dentry *d;
  3173. struct cgroup *parent;
  3174. DEFINE_WAIT(wait);
  3175. struct cgroup_event *event, *tmp;
  3176. int ret;
  3177. /* the vfs holds both inode->i_mutex already */
  3178. again:
  3179. mutex_lock(&cgroup_mutex);
  3180. if (atomic_read(&cgrp->count) != 0) {
  3181. mutex_unlock(&cgroup_mutex);
  3182. return -EBUSY;
  3183. }
  3184. if (!list_empty(&cgrp->children)) {
  3185. mutex_unlock(&cgroup_mutex);
  3186. return -EBUSY;
  3187. }
  3188. mutex_unlock(&cgroup_mutex);
  3189. /*
  3190. * In general, subsystem has no css->refcnt after pre_destroy(). But
  3191. * in racy cases, subsystem may have to get css->refcnt after
  3192. * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
  3193. * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
  3194. * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
  3195. * and subsystem's reference count handling. Please see css_get/put
  3196. * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
  3197. */
  3198. set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3199. /*
  3200. * Call pre_destroy handlers of subsys. Notify subsystems
  3201. * that rmdir() request comes.
  3202. */
  3203. ret = cgroup_call_pre_destroy(cgrp);
  3204. if (ret) {
  3205. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3206. return ret;
  3207. }
  3208. mutex_lock(&cgroup_mutex);
  3209. parent = cgrp->parent;
  3210. if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
  3211. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3212. mutex_unlock(&cgroup_mutex);
  3213. return -EBUSY;
  3214. }
  3215. prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
  3216. if (!cgroup_clear_css_refs(cgrp)) {
  3217. mutex_unlock(&cgroup_mutex);
  3218. /*
  3219. * Because someone may call cgroup_wakeup_rmdir_waiter() before
  3220. * prepare_to_wait(), we need to check this flag.
  3221. */
  3222. if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
  3223. schedule();
  3224. finish_wait(&cgroup_rmdir_waitq, &wait);
  3225. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3226. if (signal_pending(current))
  3227. return -EINTR;
  3228. goto again;
  3229. }
  3230. /* NO css_tryget() can success after here. */
  3231. finish_wait(&cgroup_rmdir_waitq, &wait);
  3232. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3233. spin_lock(&release_list_lock);
  3234. set_bit(CGRP_REMOVED, &cgrp->flags);
  3235. if (!list_empty(&cgrp->release_list))
  3236. list_del(&cgrp->release_list);
  3237. spin_unlock(&release_list_lock);
  3238. cgroup_lock_hierarchy(cgrp->root);
  3239. /* delete this cgroup from parent->children */
  3240. list_del(&cgrp->sibling);
  3241. cgroup_unlock_hierarchy(cgrp->root);
  3242. d = dget(cgrp->dentry);
  3243. cgroup_d_remove_dir(d);
  3244. dput(d);
  3245. set_bit(CGRP_RELEASABLE, &parent->flags);
  3246. check_for_release(parent);
  3247. /*
  3248. * Unregister events and notify userspace.
  3249. * Notify userspace about cgroup removing only after rmdir of cgroup
  3250. * directory to avoid race between userspace and kernelspace
  3251. */
  3252. spin_lock(&cgrp->event_list_lock);
  3253. list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
  3254. list_del(&event->list);
  3255. remove_wait_queue(event->wqh, &event->wait);
  3256. eventfd_signal(event->eventfd, 1);
  3257. schedule_work(&event->remove);
  3258. }
  3259. spin_unlock(&cgrp->event_list_lock);
  3260. mutex_unlock(&cgroup_mutex);
  3261. return 0;
  3262. }
  3263. static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
  3264. {
  3265. struct cgroup_subsys_state *css;
  3266. printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
  3267. /* Create the top cgroup state for this subsystem */
  3268. list_add(&ss->sibling, &rootnode.subsys_list);
  3269. ss->root = &rootnode;
  3270. css = ss->create(ss, dummytop);
  3271. /* We don't handle early failures gracefully */
  3272. BUG_ON(IS_ERR(css));
  3273. init_cgroup_css(css, ss, dummytop);
  3274. /* Update the init_css_set to contain a subsys
  3275. * pointer to this state - since the subsystem is
  3276. * newly registered, all tasks and hence the
  3277. * init_css_set is in the subsystem's top cgroup. */
  3278. init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
  3279. need_forkexit_callback |= ss->fork || ss->exit;
  3280. /* At system boot, before all subsystems have been
  3281. * registered, no tasks have been forked, so we don't
  3282. * need to invoke fork callbacks here. */
  3283. BUG_ON(!list_empty(&init_task.tasks));
  3284. mutex_init(&ss->hierarchy_mutex);
  3285. lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
  3286. ss->active = 1;
  3287. /* this function shouldn't be used with modular subsystems, since they
  3288. * need to register a subsys_id, among other things */
  3289. BUG_ON(ss->module);
  3290. }
  3291. /**
  3292. * cgroup_load_subsys: load and register a modular subsystem at runtime
  3293. * @ss: the subsystem to load
  3294. *
  3295. * This function should be called in a modular subsystem's initcall. If the
  3296. * subsystem is built as a module, it will be assigned a new subsys_id and set
  3297. * up for use. If the subsystem is built-in anyway, work is delegated to the
  3298. * simpler cgroup_init_subsys.
  3299. */
  3300. int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
  3301. {
  3302. int i;
  3303. struct cgroup_subsys_state *css;
  3304. /* check name and function validity */
  3305. if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
  3306. ss->create == NULL || ss->destroy == NULL)
  3307. return -EINVAL;
  3308. /*
  3309. * we don't support callbacks in modular subsystems. this check is
  3310. * before the ss->module check for consistency; a subsystem that could
  3311. * be a module should still have no callbacks even if the user isn't
  3312. * compiling it as one.
  3313. */
  3314. if (ss->fork || ss->exit)
  3315. return -EINVAL;
  3316. /*
  3317. * an optionally modular subsystem is built-in: we want to do nothing,
  3318. * since cgroup_init_subsys will have already taken care of it.
  3319. */
  3320. if (ss->module == NULL) {
  3321. /* a few sanity checks */
  3322. BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
  3323. BUG_ON(subsys[ss->subsys_id] != ss);
  3324. return 0;
  3325. }
  3326. /*
  3327. * need to register a subsys id before anything else - for example,
  3328. * init_cgroup_css needs it.
  3329. */
  3330. mutex_lock(&cgroup_mutex);
  3331. /* find the first empty slot in the array */
  3332. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  3333. if (subsys[i] == NULL)
  3334. break;
  3335. }
  3336. if (i == CGROUP_SUBSYS_COUNT) {
  3337. /* maximum number of subsystems already registered! */
  3338. mutex_unlock(&cgroup_mutex);
  3339. return -EBUSY;
  3340. }
  3341. /* assign ourselves the subsys_id */
  3342. ss->subsys_id = i;
  3343. subsys[i] = ss;
  3344. /*
  3345. * no ss->create seems to need anything important in the ss struct, so
  3346. * this can happen first (i.e. before the rootnode attachment).
  3347. */
  3348. css = ss->create(ss, dummytop);
  3349. if (IS_ERR(css)) {
  3350. /* failure case - need to deassign the subsys[] slot. */
  3351. subsys[i] = NULL;
  3352. mutex_unlock(&cgroup_mutex);
  3353. return PTR_ERR(css);
  3354. }
  3355. list_add(&ss->sibling, &rootnode.subsys_list);
  3356. ss->root = &rootnode;
  3357. /* our new subsystem will be attached to the dummy hierarchy. */
  3358. init_cgroup_css(css, ss, dummytop);
  3359. /* init_idr must be after init_cgroup_css because it sets css->id. */
  3360. if (ss->use_id) {
  3361. int ret = cgroup_init_idr(ss, css);
  3362. if (ret) {
  3363. dummytop->subsys[ss->subsys_id] = NULL;
  3364. ss->destroy(ss, dummytop);
  3365. subsys[i] = NULL;
  3366. mutex_unlock(&cgroup_mutex);
  3367. return ret;
  3368. }
  3369. }
  3370. /*
  3371. * Now we need to entangle the css into the existing css_sets. unlike
  3372. * in cgroup_init_subsys, there are now multiple css_sets, so each one
  3373. * will need a new pointer to it; done by iterating the css_set_table.
  3374. * furthermore, modifying the existing css_sets will corrupt the hash
  3375. * table state, so each changed css_set will need its hash recomputed.
  3376. * this is all done under the css_set_lock.
  3377. */
  3378. write_lock(&css_set_lock);
  3379. for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
  3380. struct css_set *cg;
  3381. struct hlist_node *node, *tmp;
  3382. struct hlist_head *bucket = &css_set_table[i], *new_bucket;
  3383. hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) {
  3384. /* skip entries that we already rehashed */
  3385. if (cg->subsys[ss->subsys_id])
  3386. continue;
  3387. /* remove existing entry */
  3388. hlist_del(&cg->hlist);
  3389. /* set new value */
  3390. cg->subsys[ss->subsys_id] = css;
  3391. /* recompute hash and restore entry */
  3392. new_bucket = css_set_hash(cg->subsys);
  3393. hlist_add_head(&cg->hlist, new_bucket);
  3394. }
  3395. }
  3396. write_unlock(&css_set_lock);
  3397. mutex_init(&ss->hierarchy_mutex);
  3398. lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
  3399. ss->active = 1;
  3400. /* success! */
  3401. mutex_unlock(&cgroup_mutex);
  3402. return 0;
  3403. }
  3404. EXPORT_SYMBOL_GPL(cgroup_load_subsys);
  3405. /**
  3406. * cgroup_unload_subsys: unload a modular subsystem
  3407. * @ss: the subsystem to unload
  3408. *
  3409. * This function should be called in a modular subsystem's exitcall. When this
  3410. * function is invoked, the refcount on the subsystem's module will be 0, so
  3411. * the subsystem will not be attached to any hierarchy.
  3412. */
  3413. void cgroup_unload_subsys(struct cgroup_subsys *ss)
  3414. {
  3415. struct cg_cgroup_link *link;
  3416. struct hlist_head *hhead;
  3417. BUG_ON(ss->module == NULL);
  3418. /*
  3419. * we shouldn't be called if the subsystem is in use, and the use of
  3420. * try_module_get in parse_cgroupfs_options should ensure that it
  3421. * doesn't start being used while we're killing it off.
  3422. */
  3423. BUG_ON(ss->root != &rootnode);
  3424. mutex_lock(&cgroup_mutex);
  3425. /* deassign the subsys_id */
  3426. BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
  3427. subsys[ss->subsys_id] = NULL;
  3428. /* remove subsystem from rootnode's list of subsystems */
  3429. list_del(&ss->sibling);
  3430. /*
  3431. * disentangle the css from all css_sets attached to the dummytop. as
  3432. * in loading, we need to pay our respects to the hashtable gods.
  3433. */
  3434. write_lock(&css_set_lock);
  3435. list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
  3436. struct css_set *cg = link->cg;
  3437. hlist_del(&cg->hlist);
  3438. BUG_ON(!cg->subsys[ss->subsys_id]);
  3439. cg->subsys[ss->subsys_id] = NULL;
  3440. hhead = css_set_hash(cg->subsys);
  3441. hlist_add_head(&cg->hlist, hhead);
  3442. }
  3443. write_unlock(&css_set_lock);
  3444. /*
  3445. * remove subsystem's css from the dummytop and free it - need to free
  3446. * before marking as null because ss->destroy needs the cgrp->subsys
  3447. * pointer to find their state. note that this also takes care of
  3448. * freeing the css_id.
  3449. */
  3450. ss->destroy(ss, dummytop);
  3451. dummytop->subsys[ss->subsys_id] = NULL;
  3452. mutex_unlock(&cgroup_mutex);
  3453. }
  3454. EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
  3455. /**
  3456. * cgroup_init_early - cgroup initialization at system boot
  3457. *
  3458. * Initialize cgroups at system boot, and initialize any
  3459. * subsystems that request early init.
  3460. */
  3461. int __init cgroup_init_early(void)
  3462. {
  3463. int i;
  3464. atomic_set(&init_css_set.refcount, 1);
  3465. INIT_LIST_HEAD(&init_css_set.cg_links);
  3466. INIT_LIST_HEAD(&init_css_set.tasks);
  3467. INIT_HLIST_NODE(&init_css_set.hlist);
  3468. css_set_count = 1;
  3469. init_cgroup_root(&rootnode);
  3470. root_count = 1;
  3471. init_task.cgroups = &init_css_set;
  3472. init_css_set_link.cg = &init_css_set;
  3473. init_css_set_link.cgrp = dummytop;
  3474. list_add(&init_css_set_link.cgrp_link_list,
  3475. &rootnode.top_cgroup.css_sets);
  3476. list_add(&init_css_set_link.cg_link_list,
  3477. &init_css_set.cg_links);
  3478. for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
  3479. INIT_HLIST_HEAD(&css_set_table[i]);
  3480. /* at bootup time, we don't worry about modular subsystems */
  3481. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3482. struct cgroup_subsys *ss = subsys[i];
  3483. BUG_ON(!ss->name);
  3484. BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
  3485. BUG_ON(!ss->create);
  3486. BUG_ON(!ss->destroy);
  3487. if (ss->subsys_id != i) {
  3488. printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
  3489. ss->name, ss->subsys_id);
  3490. BUG();
  3491. }
  3492. if (ss->early_init)
  3493. cgroup_init_subsys(ss);
  3494. }
  3495. return 0;
  3496. }
  3497. /**
  3498. * cgroup_init - cgroup initialization
  3499. *
  3500. * Register cgroup filesystem and /proc file, and initialize
  3501. * any subsystems that didn't request early init.
  3502. */
  3503. int __init cgroup_init(void)
  3504. {
  3505. int err;
  3506. int i;
  3507. struct hlist_head *hhead;
  3508. err = bdi_init(&cgroup_backing_dev_info);
  3509. if (err)
  3510. return err;
  3511. /* at bootup time, we don't worry about modular subsystems */
  3512. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3513. struct cgroup_subsys *ss = subsys[i];
  3514. if (!ss->early_init)
  3515. cgroup_init_subsys(ss);
  3516. if (ss->use_id)
  3517. cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
  3518. }
  3519. /* Add init_css_set to the hash table */
  3520. hhead = css_set_hash(init_css_set.subsys);
  3521. hlist_add_head(&init_css_set.hlist, hhead);
  3522. BUG_ON(!init_root_id(&rootnode));
  3523. cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
  3524. if (!cgroup_kobj) {
  3525. err = -ENOMEM;
  3526. goto out;
  3527. }
  3528. err = register_filesystem(&cgroup_fs_type);
  3529. if (err < 0) {
  3530. kobject_put(cgroup_kobj);
  3531. goto out;
  3532. }
  3533. proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
  3534. out:
  3535. if (err)
  3536. bdi_destroy(&cgroup_backing_dev_info);
  3537. return err;
  3538. }
  3539. /*
  3540. * proc_cgroup_show()
  3541. * - Print task's cgroup paths into seq_file, one line for each hierarchy
  3542. * - Used for /proc/<pid>/cgroup.
  3543. * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
  3544. * doesn't really matter if tsk->cgroup changes after we read it,
  3545. * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
  3546. * anyway. No need to check that tsk->cgroup != NULL, thanks to
  3547. * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
  3548. * cgroup to top_cgroup.
  3549. */
  3550. /* TODO: Use a proper seq_file iterator */
  3551. static int proc_cgroup_show(struct seq_file *m, void *v)
  3552. {
  3553. struct pid *pid;
  3554. struct task_struct *tsk;
  3555. char *buf;
  3556. int retval;
  3557. struct cgroupfs_root *root;
  3558. retval = -ENOMEM;
  3559. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3560. if (!buf)
  3561. goto out;
  3562. retval = -ESRCH;
  3563. pid = m->private;
  3564. tsk = get_pid_task(pid, PIDTYPE_PID);
  3565. if (!tsk)
  3566. goto out_free;
  3567. retval = 0;
  3568. mutex_lock(&cgroup_mutex);
  3569. for_each_active_root(root) {
  3570. struct cgroup_subsys *ss;
  3571. struct cgroup *cgrp;
  3572. int count = 0;
  3573. seq_printf(m, "%d:", root->hierarchy_id);
  3574. for_each_subsys(root, ss)
  3575. seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
  3576. if (strlen(root->name))
  3577. seq_printf(m, "%sname=%s", count ? "," : "",
  3578. root->name);
  3579. seq_putc(m, ':');
  3580. cgrp = task_cgroup_from_root(tsk, root);
  3581. retval = cgroup_path(cgrp, buf, PAGE_SIZE);
  3582. if (retval < 0)
  3583. goto out_unlock;
  3584. seq_puts(m, buf);
  3585. seq_putc(m, '\n');
  3586. }
  3587. out_unlock:
  3588. mutex_unlock(&cgroup_mutex);
  3589. put_task_struct(tsk);
  3590. out_free:
  3591. kfree(buf);
  3592. out:
  3593. return retval;
  3594. }
  3595. static int cgroup_open(struct inode *inode, struct file *file)
  3596. {
  3597. struct pid *pid = PROC_I(inode)->pid;
  3598. return single_open(file, proc_cgroup_show, pid);
  3599. }
  3600. const struct file_operations proc_cgroup_operations = {
  3601. .open = cgroup_open,
  3602. .read = seq_read,
  3603. .llseek = seq_lseek,
  3604. .release = single_release,
  3605. };
  3606. /* Display information about each subsystem and each hierarchy */
  3607. static int proc_cgroupstats_show(struct seq_file *m, void *v)
  3608. {
  3609. int i;
  3610. seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
  3611. /*
  3612. * ideally we don't want subsystems moving around while we do this.
  3613. * cgroup_mutex is also necessary to guarantee an atomic snapshot of
  3614. * subsys/hierarchy state.
  3615. */
  3616. mutex_lock(&cgroup_mutex);
  3617. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  3618. struct cgroup_subsys *ss = subsys[i];
  3619. if (ss == NULL)
  3620. continue;
  3621. seq_printf(m, "%s\t%d\t%d\t%d\n",
  3622. ss->name, ss->root->hierarchy_id,
  3623. ss->root->number_of_cgroups, !ss->disabled);
  3624. }
  3625. mutex_unlock(&cgroup_mutex);
  3626. return 0;
  3627. }
  3628. static int cgroupstats_open(struct inode *inode, struct file *file)
  3629. {
  3630. return single_open(file, proc_cgroupstats_show, NULL);
  3631. }
  3632. static const struct file_operations proc_cgroupstats_operations = {
  3633. .open = cgroupstats_open,
  3634. .read = seq_read,
  3635. .llseek = seq_lseek,
  3636. .release = single_release,
  3637. };
  3638. /**
  3639. * cgroup_fork - attach newly forked task to its parents cgroup.
  3640. * @child: pointer to task_struct of forking parent process.
  3641. *
  3642. * Description: A task inherits its parent's cgroup at fork().
  3643. *
  3644. * A pointer to the shared css_set was automatically copied in
  3645. * fork.c by dup_task_struct(). However, we ignore that copy, since
  3646. * it was not made under the protection of RCU or cgroup_mutex, so
  3647. * might no longer be a valid cgroup pointer. cgroup_attach_task() might
  3648. * have already changed current->cgroups, allowing the previously
  3649. * referenced cgroup group to be removed and freed.
  3650. *
  3651. * At the point that cgroup_fork() is called, 'current' is the parent
  3652. * task, and the passed argument 'child' points to the child task.
  3653. */
  3654. void cgroup_fork(struct task_struct *child)
  3655. {
  3656. task_lock(current);
  3657. child->cgroups = current->cgroups;
  3658. get_css_set(child->cgroups);
  3659. task_unlock(current);
  3660. INIT_LIST_HEAD(&child->cg_list);
  3661. }
  3662. /**
  3663. * cgroup_fork_callbacks - run fork callbacks
  3664. * @child: the new task
  3665. *
  3666. * Called on a new task very soon before adding it to the
  3667. * tasklist. No need to take any locks since no-one can
  3668. * be operating on this task.
  3669. */
  3670. void cgroup_fork_callbacks(struct task_struct *child)
  3671. {
  3672. if (need_forkexit_callback) {
  3673. int i;
  3674. /*
  3675. * forkexit callbacks are only supported for builtin
  3676. * subsystems, and the builtin section of the subsys array is
  3677. * immutable, so we don't need to lock the subsys array here.
  3678. */
  3679. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3680. struct cgroup_subsys *ss = subsys[i];
  3681. if (ss->fork)
  3682. ss->fork(ss, child);
  3683. }
  3684. }
  3685. }
  3686. /**
  3687. * cgroup_post_fork - called on a new task after adding it to the task list
  3688. * @child: the task in question
  3689. *
  3690. * Adds the task to the list running through its css_set if necessary.
  3691. * Has to be after the task is visible on the task list in case we race
  3692. * with the first call to cgroup_iter_start() - to guarantee that the
  3693. * new task ends up on its list.
  3694. */
  3695. void cgroup_post_fork(struct task_struct *child)
  3696. {
  3697. if (use_task_css_set_links) {
  3698. write_lock(&css_set_lock);
  3699. task_lock(child);
  3700. if (list_empty(&child->cg_list))
  3701. list_add(&child->cg_list, &child->cgroups->tasks);
  3702. task_unlock(child);
  3703. write_unlock(&css_set_lock);
  3704. }
  3705. }
  3706. /**
  3707. * cgroup_exit - detach cgroup from exiting task
  3708. * @tsk: pointer to task_struct of exiting process
  3709. * @run_callback: run exit callbacks?
  3710. *
  3711. * Description: Detach cgroup from @tsk and release it.
  3712. *
  3713. * Note that cgroups marked notify_on_release force every task in
  3714. * them to take the global cgroup_mutex mutex when exiting.
  3715. * This could impact scaling on very large systems. Be reluctant to
  3716. * use notify_on_release cgroups where very high task exit scaling
  3717. * is required on large systems.
  3718. *
  3719. * the_top_cgroup_hack:
  3720. *
  3721. * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
  3722. *
  3723. * We call cgroup_exit() while the task is still competent to
  3724. * handle notify_on_release(), then leave the task attached to the
  3725. * root cgroup in each hierarchy for the remainder of its exit.
  3726. *
  3727. * To do this properly, we would increment the reference count on
  3728. * top_cgroup, and near the very end of the kernel/exit.c do_exit()
  3729. * code we would add a second cgroup function call, to drop that
  3730. * reference. This would just create an unnecessary hot spot on
  3731. * the top_cgroup reference count, to no avail.
  3732. *
  3733. * Normally, holding a reference to a cgroup without bumping its
  3734. * count is unsafe. The cgroup could go away, or someone could
  3735. * attach us to a different cgroup, decrementing the count on
  3736. * the first cgroup that we never incremented. But in this case,
  3737. * top_cgroup isn't going away, and either task has PF_EXITING set,
  3738. * which wards off any cgroup_attach_task() attempts, or task is a failed
  3739. * fork, never visible to cgroup_attach_task.
  3740. */
  3741. void cgroup_exit(struct task_struct *tsk, int run_callbacks)
  3742. {
  3743. int i;
  3744. struct css_set *cg;
  3745. if (run_callbacks && need_forkexit_callback) {
  3746. /*
  3747. * modular subsystems can't use callbacks, so no need to lock
  3748. * the subsys array
  3749. */
  3750. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3751. struct cgroup_subsys *ss = subsys[i];
  3752. if (ss->exit)
  3753. ss->exit(ss, tsk);
  3754. }
  3755. }
  3756. /*
  3757. * Unlink from the css_set task list if necessary.
  3758. * Optimistically check cg_list before taking
  3759. * css_set_lock
  3760. */
  3761. if (!list_empty(&tsk->cg_list)) {
  3762. write_lock(&css_set_lock);
  3763. if (!list_empty(&tsk->cg_list))
  3764. list_del(&tsk->cg_list);
  3765. write_unlock(&css_set_lock);
  3766. }
  3767. /* Reassign the task to the init_css_set. */
  3768. task_lock(tsk);
  3769. cg = tsk->cgroups;
  3770. tsk->cgroups = &init_css_set;
  3771. task_unlock(tsk);
  3772. if (cg)
  3773. put_css_set_taskexit(cg);
  3774. }
  3775. /**
  3776. * cgroup_clone - clone the cgroup the given subsystem is attached to
  3777. * @tsk: the task to be moved
  3778. * @subsys: the given subsystem
  3779. * @nodename: the name for the new cgroup
  3780. *
  3781. * Duplicate the current cgroup in the hierarchy that the given
  3782. * subsystem is attached to, and move this task into the new
  3783. * child.
  3784. */
  3785. int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
  3786. char *nodename)
  3787. {
  3788. struct dentry *dentry;
  3789. int ret = 0;
  3790. struct cgroup *parent, *child;
  3791. struct inode *inode;
  3792. struct css_set *cg;
  3793. struct cgroupfs_root *root;
  3794. struct cgroup_subsys *ss;
  3795. /* We shouldn't be called by an unregistered subsystem */
  3796. BUG_ON(!subsys->active);
  3797. /* First figure out what hierarchy and cgroup we're dealing
  3798. * with, and pin them so we can drop cgroup_mutex */
  3799. mutex_lock(&cgroup_mutex);
  3800. again:
  3801. root = subsys->root;
  3802. if (root == &rootnode) {
  3803. mutex_unlock(&cgroup_mutex);
  3804. return 0;
  3805. }
  3806. /* Pin the hierarchy */
  3807. if (!atomic_inc_not_zero(&root->sb->s_active)) {
  3808. /* We race with the final deactivate_super() */
  3809. mutex_unlock(&cgroup_mutex);
  3810. return 0;
  3811. }
  3812. /* Keep the cgroup alive */
  3813. task_lock(tsk);
  3814. parent = task_cgroup(tsk, subsys->subsys_id);
  3815. cg = tsk->cgroups;
  3816. get_css_set(cg);
  3817. task_unlock(tsk);
  3818. mutex_unlock(&cgroup_mutex);
  3819. /* Now do the VFS work to create a cgroup */
  3820. inode = parent->dentry->d_inode;
  3821. /* Hold the parent directory mutex across this operation to
  3822. * stop anyone else deleting the new cgroup */
  3823. mutex_lock(&inode->i_mutex);
  3824. dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
  3825. if (IS_ERR(dentry)) {
  3826. printk(KERN_INFO
  3827. "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
  3828. PTR_ERR(dentry));
  3829. ret = PTR_ERR(dentry);
  3830. goto out_release;
  3831. }
  3832. /* Create the cgroup directory, which also creates the cgroup */
  3833. ret = vfs_mkdir(inode, dentry, 0755);
  3834. child = __d_cgrp(dentry);
  3835. dput(dentry);
  3836. if (ret) {
  3837. printk(KERN_INFO
  3838. "Failed to create cgroup %s: %d\n", nodename,
  3839. ret);
  3840. goto out_release;
  3841. }
  3842. /* The cgroup now exists. Retake cgroup_mutex and check
  3843. * that we're still in the same state that we thought we
  3844. * were. */
  3845. mutex_lock(&cgroup_mutex);
  3846. if ((root != subsys->root) ||
  3847. (parent != task_cgroup(tsk, subsys->subsys_id))) {
  3848. /* Aargh, we raced ... */
  3849. mutex_unlock(&inode->i_mutex);
  3850. put_css_set(cg);
  3851. deactivate_super(root->sb);
  3852. /* The cgroup is still accessible in the VFS, but
  3853. * we're not going to try to rmdir() it at this
  3854. * point. */
  3855. printk(KERN_INFO
  3856. "Race in cgroup_clone() - leaking cgroup %s\n",
  3857. nodename);
  3858. goto again;
  3859. }
  3860. /* do any required auto-setup */
  3861. for_each_subsys(root, ss) {
  3862. if (ss->post_clone)
  3863. ss->post_clone(ss, child);
  3864. }
  3865. /* All seems fine. Finish by moving the task into the new cgroup */
  3866. ret = cgroup_attach_task(child, tsk);
  3867. mutex_unlock(&cgroup_mutex);
  3868. out_release:
  3869. mutex_unlock(&inode->i_mutex);
  3870. mutex_lock(&cgroup_mutex);
  3871. put_css_set(cg);
  3872. mutex_unlock(&cgroup_mutex);
  3873. deactivate_super(root->sb);
  3874. return ret;
  3875. }
  3876. /**
  3877. * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
  3878. * @cgrp: the cgroup in question
  3879. * @task: the task in question
  3880. *
  3881. * See if @cgrp is a descendant of @task's cgroup in the appropriate
  3882. * hierarchy.
  3883. *
  3884. * If we are sending in dummytop, then presumably we are creating
  3885. * the top cgroup in the subsystem.
  3886. *
  3887. * Called only by the ns (nsproxy) cgroup.
  3888. */
  3889. int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
  3890. {
  3891. int ret;
  3892. struct cgroup *target;
  3893. if (cgrp == dummytop)
  3894. return 1;
  3895. target = task_cgroup_from_root(task, cgrp->root);
  3896. while (cgrp != target && cgrp!= cgrp->top_cgroup)
  3897. cgrp = cgrp->parent;
  3898. ret = (cgrp == target);
  3899. return ret;
  3900. }
  3901. static void check_for_release(struct cgroup *cgrp)
  3902. {
  3903. /* All of these checks rely on RCU to keep the cgroup
  3904. * structure alive */
  3905. if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
  3906. && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
  3907. /* Control Group is currently removeable. If it's not
  3908. * already queued for a userspace notification, queue
  3909. * it now */
  3910. int need_schedule_work = 0;
  3911. spin_lock(&release_list_lock);
  3912. if (!cgroup_is_removed(cgrp) &&
  3913. list_empty(&cgrp->release_list)) {
  3914. list_add(&cgrp->release_list, &release_list);
  3915. need_schedule_work = 1;
  3916. }
  3917. spin_unlock(&release_list_lock);
  3918. if (need_schedule_work)
  3919. schedule_work(&release_agent_work);
  3920. }
  3921. }
  3922. /* Caller must verify that the css is not for root cgroup */
  3923. void __css_put(struct cgroup_subsys_state *css, int count)
  3924. {
  3925. struct cgroup *cgrp = css->cgroup;
  3926. int val;
  3927. rcu_read_lock();
  3928. val = atomic_sub_return(count, &css->refcnt);
  3929. if (val == 1) {
  3930. if (notify_on_release(cgrp)) {
  3931. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  3932. check_for_release(cgrp);
  3933. }
  3934. cgroup_wakeup_rmdir_waiter(cgrp);
  3935. }
  3936. rcu_read_unlock();
  3937. WARN_ON_ONCE(val < 1);
  3938. }
  3939. EXPORT_SYMBOL_GPL(__css_put);
  3940. /*
  3941. * Notify userspace when a cgroup is released, by running the
  3942. * configured release agent with the name of the cgroup (path
  3943. * relative to the root of cgroup file system) as the argument.
  3944. *
  3945. * Most likely, this user command will try to rmdir this cgroup.
  3946. *
  3947. * This races with the possibility that some other task will be
  3948. * attached to this cgroup before it is removed, or that some other
  3949. * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
  3950. * The presumed 'rmdir' will fail quietly if this cgroup is no longer
  3951. * unused, and this cgroup will be reprieved from its death sentence,
  3952. * to continue to serve a useful existence. Next time it's released,
  3953. * we will get notified again, if it still has 'notify_on_release' set.
  3954. *
  3955. * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
  3956. * means only wait until the task is successfully execve()'d. The
  3957. * separate release agent task is forked by call_usermodehelper(),
  3958. * then control in this thread returns here, without waiting for the
  3959. * release agent task. We don't bother to wait because the caller of
  3960. * this routine has no use for the exit status of the release agent
  3961. * task, so no sense holding our caller up for that.
  3962. */
  3963. static void cgroup_release_agent(struct work_struct *work)
  3964. {
  3965. BUG_ON(work != &release_agent_work);
  3966. mutex_lock(&cgroup_mutex);
  3967. spin_lock(&release_list_lock);
  3968. while (!list_empty(&release_list)) {
  3969. char *argv[3], *envp[3];
  3970. int i;
  3971. char *pathbuf = NULL, *agentbuf = NULL;
  3972. struct cgroup *cgrp = list_entry(release_list.next,
  3973. struct cgroup,
  3974. release_list);
  3975. list_del_init(&cgrp->release_list);
  3976. spin_unlock(&release_list_lock);
  3977. pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3978. if (!pathbuf)
  3979. goto continue_free;
  3980. if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
  3981. goto continue_free;
  3982. agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
  3983. if (!agentbuf)
  3984. goto continue_free;
  3985. i = 0;
  3986. argv[i++] = agentbuf;
  3987. argv[i++] = pathbuf;
  3988. argv[i] = NULL;
  3989. i = 0;
  3990. /* minimal command environment */
  3991. envp[i++] = "HOME=/";
  3992. envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
  3993. envp[i] = NULL;
  3994. /* Drop the lock while we invoke the usermode helper,
  3995. * since the exec could involve hitting disk and hence
  3996. * be a slow process */
  3997. mutex_unlock(&cgroup_mutex);
  3998. call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  3999. mutex_lock(&cgroup_mutex);
  4000. continue_free:
  4001. kfree(pathbuf);
  4002. kfree(agentbuf);
  4003. spin_lock(&release_list_lock);
  4004. }
  4005. spin_unlock(&release_list_lock);
  4006. mutex_unlock(&cgroup_mutex);
  4007. }
  4008. static int __init cgroup_disable(char *str)
  4009. {
  4010. int i;
  4011. char *token;
  4012. while ((token = strsep(&str, ",")) != NULL) {
  4013. if (!*token)
  4014. continue;
  4015. /*
  4016. * cgroup_disable, being at boot time, can't know about module
  4017. * subsystems, so we don't worry about them.
  4018. */
  4019. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  4020. struct cgroup_subsys *ss = subsys[i];
  4021. if (!strcmp(token, ss->name)) {
  4022. ss->disabled = 1;
  4023. printk(KERN_INFO "Disabling %s control group"
  4024. " subsystem\n", ss->name);
  4025. break;
  4026. }
  4027. }
  4028. }
  4029. return 1;
  4030. }
  4031. __setup("cgroup_disable=", cgroup_disable);
  4032. /*
  4033. * Functons for CSS ID.
  4034. */
  4035. /*
  4036. *To get ID other than 0, this should be called when !cgroup_is_removed().
  4037. */
  4038. unsigned short css_id(struct cgroup_subsys_state *css)
  4039. {
  4040. struct css_id *cssid;
  4041. /*
  4042. * This css_id() can return correct value when somone has refcnt
  4043. * on this or this is under rcu_read_lock(). Once css->id is allocated,
  4044. * it's unchanged until freed.
  4045. */
  4046. cssid = rcu_dereference_check(css->id,
  4047. rcu_read_lock_held() || atomic_read(&css->refcnt));
  4048. if (cssid)
  4049. return cssid->id;
  4050. return 0;
  4051. }
  4052. EXPORT_SYMBOL_GPL(css_id);
  4053. unsigned short css_depth(struct cgroup_subsys_state *css)
  4054. {
  4055. struct css_id *cssid;
  4056. cssid = rcu_dereference_check(css->id,
  4057. rcu_read_lock_held() || atomic_read(&css->refcnt));
  4058. if (cssid)
  4059. return cssid->depth;
  4060. return 0;
  4061. }
  4062. EXPORT_SYMBOL_GPL(css_depth);
  4063. /**
  4064. * css_is_ancestor - test "root" css is an ancestor of "child"
  4065. * @child: the css to be tested.
  4066. * @root: the css supporsed to be an ancestor of the child.
  4067. *
  4068. * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
  4069. * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
  4070. * But, considering usual usage, the csses should be valid objects after test.
  4071. * Assuming that the caller will do some action to the child if this returns
  4072. * returns true, the caller must take "child";s reference count.
  4073. * If "child" is valid object and this returns true, "root" is valid, too.
  4074. */
  4075. bool css_is_ancestor(struct cgroup_subsys_state *child,
  4076. const struct cgroup_subsys_state *root)
  4077. {
  4078. struct css_id *child_id;
  4079. struct css_id *root_id;
  4080. bool ret = true;
  4081. rcu_read_lock();
  4082. child_id = rcu_dereference(child->id);
  4083. root_id = rcu_dereference(root->id);
  4084. if (!child_id
  4085. || !root_id
  4086. || (child_id->depth < root_id->depth)
  4087. || (child_id->stack[root_id->depth] != root_id->id))
  4088. ret = false;
  4089. rcu_read_unlock();
  4090. return ret;
  4091. }
  4092. static void __free_css_id_cb(struct rcu_head *head)
  4093. {
  4094. struct css_id *id;
  4095. id = container_of(head, struct css_id, rcu_head);
  4096. kfree(id);
  4097. }
  4098. void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
  4099. {
  4100. struct css_id *id = css->id;
  4101. /* When this is called before css_id initialization, id can be NULL */
  4102. if (!id)
  4103. return;
  4104. BUG_ON(!ss->use_id);
  4105. rcu_assign_pointer(id->css, NULL);
  4106. rcu_assign_pointer(css->id, NULL);
  4107. spin_lock(&ss->id_lock);
  4108. idr_remove(&ss->idr, id->id);
  4109. spin_unlock(&ss->id_lock);
  4110. call_rcu(&id->rcu_head, __free_css_id_cb);
  4111. }
  4112. EXPORT_SYMBOL_GPL(free_css_id);
  4113. /*
  4114. * This is called by init or create(). Then, calls to this function are
  4115. * always serialized (By cgroup_mutex() at create()).
  4116. */
  4117. static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
  4118. {
  4119. struct css_id *newid;
  4120. int myid, error, size;
  4121. BUG_ON(!ss->use_id);
  4122. size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
  4123. newid = kzalloc(size, GFP_KERNEL);
  4124. if (!newid)
  4125. return ERR_PTR(-ENOMEM);
  4126. /* get id */
  4127. if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
  4128. error = -ENOMEM;
  4129. goto err_out;
  4130. }
  4131. spin_lock(&ss->id_lock);
  4132. /* Don't use 0. allocates an ID of 1-65535 */
  4133. error = idr_get_new_above(&ss->idr, newid, 1, &myid);
  4134. spin_unlock(&ss->id_lock);
  4135. /* Returns error when there are no free spaces for new ID.*/
  4136. if (error) {
  4137. error = -ENOSPC;
  4138. goto err_out;
  4139. }
  4140. if (myid > CSS_ID_MAX)
  4141. goto remove_idr;
  4142. newid->id = myid;
  4143. newid->depth = depth;
  4144. return newid;
  4145. remove_idr:
  4146. error = -ENOSPC;
  4147. spin_lock(&ss->id_lock);
  4148. idr_remove(&ss->idr, myid);
  4149. spin_unlock(&ss->id_lock);
  4150. err_out:
  4151. kfree(newid);
  4152. return ERR_PTR(error);
  4153. }
  4154. static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
  4155. struct cgroup_subsys_state *rootcss)
  4156. {
  4157. struct css_id *newid;
  4158. spin_lock_init(&ss->id_lock);
  4159. idr_init(&ss->idr);
  4160. newid = get_new_cssid(ss, 0);
  4161. if (IS_ERR(newid))
  4162. return PTR_ERR(newid);
  4163. newid->stack[0] = newid->id;
  4164. newid->css = rootcss;
  4165. rootcss->id = newid;
  4166. return 0;
  4167. }
  4168. static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
  4169. struct cgroup *child)
  4170. {
  4171. int subsys_id, i, depth = 0;
  4172. struct cgroup_subsys_state *parent_css, *child_css;
  4173. struct css_id *child_id, *parent_id;
  4174. subsys_id = ss->subsys_id;
  4175. parent_css = parent->subsys[subsys_id];
  4176. child_css = child->subsys[subsys_id];
  4177. parent_id = parent_css->id;
  4178. depth = parent_id->depth + 1;
  4179. child_id = get_new_cssid(ss, depth);
  4180. if (IS_ERR(child_id))
  4181. return PTR_ERR(child_id);
  4182. for (i = 0; i < depth; i++)
  4183. child_id->stack[i] = parent_id->stack[i];
  4184. child_id->stack[depth] = child_id->id;
  4185. /*
  4186. * child_id->css pointer will be set after this cgroup is available
  4187. * see cgroup_populate_dir()
  4188. */
  4189. rcu_assign_pointer(child_css->id, child_id);
  4190. return 0;
  4191. }
  4192. /**
  4193. * css_lookup - lookup css by id
  4194. * @ss: cgroup subsys to be looked into.
  4195. * @id: the id
  4196. *
  4197. * Returns pointer to cgroup_subsys_state if there is valid one with id.
  4198. * NULL if not. Should be called under rcu_read_lock()
  4199. */
  4200. struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
  4201. {
  4202. struct css_id *cssid = NULL;
  4203. BUG_ON(!ss->use_id);
  4204. cssid = idr_find(&ss->idr, id);
  4205. if (unlikely(!cssid))
  4206. return NULL;
  4207. return rcu_dereference(cssid->css);
  4208. }
  4209. EXPORT_SYMBOL_GPL(css_lookup);
  4210. /**
  4211. * css_get_next - lookup next cgroup under specified hierarchy.
  4212. * @ss: pointer to subsystem
  4213. * @id: current position of iteration.
  4214. * @root: pointer to css. search tree under this.
  4215. * @foundid: position of found object.
  4216. *
  4217. * Search next css under the specified hierarchy of rootid. Calling under
  4218. * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
  4219. */
  4220. struct cgroup_subsys_state *
  4221. css_get_next(struct cgroup_subsys *ss, int id,
  4222. struct cgroup_subsys_state *root, int *foundid)
  4223. {
  4224. struct cgroup_subsys_state *ret = NULL;
  4225. struct css_id *tmp;
  4226. int tmpid;
  4227. int rootid = css_id(root);
  4228. int depth = css_depth(root);
  4229. if (!rootid)
  4230. return NULL;
  4231. BUG_ON(!ss->use_id);
  4232. /* fill start point for scan */
  4233. tmpid = id;
  4234. while (1) {
  4235. /*
  4236. * scan next entry from bitmap(tree), tmpid is updated after
  4237. * idr_get_next().
  4238. */
  4239. spin_lock(&ss->id_lock);
  4240. tmp = idr_get_next(&ss->idr, &tmpid);
  4241. spin_unlock(&ss->id_lock);
  4242. if (!tmp)
  4243. break;
  4244. if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
  4245. ret = rcu_dereference(tmp->css);
  4246. if (ret) {
  4247. *foundid = tmpid;
  4248. break;
  4249. }
  4250. }
  4251. /* continue to scan from next id */
  4252. tmpid = tmpid + 1;
  4253. }
  4254. return ret;
  4255. }
  4256. #ifdef CONFIG_CGROUP_DEBUG
  4257. static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
  4258. struct cgroup *cont)
  4259. {
  4260. struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
  4261. if (!css)
  4262. return ERR_PTR(-ENOMEM);
  4263. return css;
  4264. }
  4265. static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
  4266. {
  4267. kfree(cont->subsys[debug_subsys_id]);
  4268. }
  4269. static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
  4270. {
  4271. return atomic_read(&cont->count);
  4272. }
  4273. static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
  4274. {
  4275. return cgroup_task_count(cont);
  4276. }
  4277. static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
  4278. {
  4279. return (u64)(unsigned long)current->cgroups;
  4280. }
  4281. static u64 current_css_set_refcount_read(struct cgroup *cont,
  4282. struct cftype *cft)
  4283. {
  4284. u64 count;
  4285. rcu_read_lock();
  4286. count = atomic_read(&current->cgroups->refcount);
  4287. rcu_read_unlock();
  4288. return count;
  4289. }
  4290. static int current_css_set_cg_links_read(struct cgroup *cont,
  4291. struct cftype *cft,
  4292. struct seq_file *seq)
  4293. {
  4294. struct cg_cgroup_link *link;
  4295. struct css_set *cg;
  4296. read_lock(&css_set_lock);
  4297. rcu_read_lock();
  4298. cg = rcu_dereference(current->cgroups);
  4299. list_for_each_entry(link, &cg->cg_links, cg_link_list) {
  4300. struct cgroup *c = link->cgrp;
  4301. const char *name;
  4302. if (c->dentry)
  4303. name = c->dentry->d_name.name;
  4304. else
  4305. name = "?";
  4306. seq_printf(seq, "Root %d group %s\n",
  4307. c->root->hierarchy_id, name);
  4308. }
  4309. rcu_read_unlock();
  4310. read_unlock(&css_set_lock);
  4311. return 0;
  4312. }
  4313. #define MAX_TASKS_SHOWN_PER_CSS 25
  4314. static int cgroup_css_links_read(struct cgroup *cont,
  4315. struct cftype *cft,
  4316. struct seq_file *seq)
  4317. {
  4318. struct cg_cgroup_link *link;
  4319. read_lock(&css_set_lock);
  4320. list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
  4321. struct css_set *cg = link->cg;
  4322. struct task_struct *task;
  4323. int count = 0;
  4324. seq_printf(seq, "css_set %p\n", cg);
  4325. list_for_each_entry(task, &cg->tasks, cg_list) {
  4326. if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
  4327. seq_puts(seq, " ...\n");
  4328. break;
  4329. } else {
  4330. seq_printf(seq, " task %d\n",
  4331. task_pid_vnr(task));
  4332. }
  4333. }
  4334. }
  4335. read_unlock(&css_set_lock);
  4336. return 0;
  4337. }
  4338. static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
  4339. {
  4340. return test_bit(CGRP_RELEASABLE, &cgrp->flags);
  4341. }
  4342. static struct cftype debug_files[] = {
  4343. {
  4344. .name = "cgroup_refcount",
  4345. .read_u64 = cgroup_refcount_read,
  4346. },
  4347. {
  4348. .name = "taskcount",
  4349. .read_u64 = debug_taskcount_read,
  4350. },
  4351. {
  4352. .name = "current_css_set",
  4353. .read_u64 = current_css_set_read,
  4354. },
  4355. {
  4356. .name = "current_css_set_refcount",
  4357. .read_u64 = current_css_set_refcount_read,
  4358. },
  4359. {
  4360. .name = "current_css_set_cg_links",
  4361. .read_seq_string = current_css_set_cg_links_read,
  4362. },
  4363. {
  4364. .name = "cgroup_css_links",
  4365. .read_seq_string = cgroup_css_links_read,
  4366. },
  4367. {
  4368. .name = "releasable",
  4369. .read_u64 = releasable_read,
  4370. },
  4371. };
  4372. static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  4373. {
  4374. return cgroup_add_files(cont, ss, debug_files,
  4375. ARRAY_SIZE(debug_files));
  4376. }
  4377. struct cgroup_subsys debug_subsys = {
  4378. .name = "debug",
  4379. .create = debug_create,
  4380. .destroy = debug_destroy,
  4381. .populate = debug_populate,
  4382. .subsys_id = debug_subsys_id,
  4383. };
  4384. #endif /* CONFIG_CGROUP_DEBUG */