cgroup.c 127 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873
  1. /*
  2. * Generic process-grouping system.
  3. *
  4. * Based originally on the cpuset system, extracted by Paul Menage
  5. * Copyright (C) 2006 Google, Inc
  6. *
  7. * Notifications support
  8. * Copyright (C) 2009 Nokia Corporation
  9. * Author: Kirill A. Shutemov
  10. *
  11. * Copyright notices from the original cpuset code:
  12. * --------------------------------------------------
  13. * Copyright (C) 2003 BULL SA.
  14. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  15. *
  16. * Portions derived from Patrick Mochel's sysfs code.
  17. * sysfs is Copyright (c) 2001-3 Patrick Mochel
  18. *
  19. * 2003-10-10 Written by Simon Derr.
  20. * 2003-10-22 Updates by Stephen Hemminger.
  21. * 2004 May-July Rework by Paul Jackson.
  22. * ---------------------------------------------------
  23. *
  24. * This file is subject to the terms and conditions of the GNU General Public
  25. * License. See the file COPYING in the main directory of the Linux
  26. * distribution for more details.
  27. */
  28. #include <linux/cgroup.h>
  29. #include <linux/ctype.h>
  30. #include <linux/errno.h>
  31. #include <linux/fs.h>
  32. #include <linux/kernel.h>
  33. #include <linux/list.h>
  34. #include <linux/mm.h>
  35. #include <linux/mutex.h>
  36. #include <linux/mount.h>
  37. #include <linux/pagemap.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/rcupdate.h>
  40. #include <linux/sched.h>
  41. #include <linux/backing-dev.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/slab.h>
  44. #include <linux/magic.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/string.h>
  47. #include <linux/sort.h>
  48. #include <linux/kmod.h>
  49. #include <linux/module.h>
  50. #include <linux/delayacct.h>
  51. #include <linux/cgroupstats.h>
  52. #include <linux/hash.h>
  53. #include <linux/namei.h>
  54. #include <linux/pid_namespace.h>
  55. #include <linux/idr.h>
  56. #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
  57. #include <linux/eventfd.h>
  58. #include <linux/poll.h>
  59. #include <asm/atomic.h>
  60. static DEFINE_MUTEX(cgroup_mutex);
  61. /*
  62. * Generate an array of cgroup subsystem pointers. At boot time, this is
  63. * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
  64. * registered after that. The mutable section of this array is protected by
  65. * cgroup_mutex.
  66. */
  67. #define SUBSYS(_x) &_x ## _subsys,
  68. static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
  69. #include <linux/cgroup_subsys.h>
  70. };
  71. #define MAX_CGROUP_ROOT_NAMELEN 64
  72. /*
  73. * A cgroupfs_root represents the root of a cgroup hierarchy,
  74. * and may be associated with a superblock to form an active
  75. * hierarchy
  76. */
  77. struct cgroupfs_root {
  78. struct super_block *sb;
  79. /*
  80. * The bitmask of subsystems intended to be attached to this
  81. * hierarchy
  82. */
  83. unsigned long subsys_bits;
  84. /* Unique id for this hierarchy. */
  85. int hierarchy_id;
  86. /* The bitmask of subsystems currently attached to this hierarchy */
  87. unsigned long actual_subsys_bits;
  88. /* A list running through the attached subsystems */
  89. struct list_head subsys_list;
  90. /* The root cgroup for this hierarchy */
  91. struct cgroup top_cgroup;
  92. /* Tracks how many cgroups are currently defined in hierarchy.*/
  93. int number_of_cgroups;
  94. /* A list running through the active hierarchies */
  95. struct list_head root_list;
  96. /* Hierarchy-specific flags */
  97. unsigned long flags;
  98. /* The path to use for release notifications. */
  99. char release_agent_path[PATH_MAX];
  100. /* The name for this hierarchy - may be empty */
  101. char name[MAX_CGROUP_ROOT_NAMELEN];
  102. };
  103. /*
  104. * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
  105. * subsystems that are otherwise unattached - it never has more than a
  106. * single cgroup, and all tasks are part of that cgroup.
  107. */
  108. static struct cgroupfs_root rootnode;
  109. /*
  110. * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
  111. * cgroup_subsys->use_id != 0.
  112. */
  113. #define CSS_ID_MAX (65535)
  114. struct css_id {
  115. /*
  116. * The css to which this ID points. This pointer is set to valid value
  117. * after cgroup is populated. If cgroup is removed, this will be NULL.
  118. * This pointer is expected to be RCU-safe because destroy()
  119. * is called after synchronize_rcu(). But for safe use, css_is_removed()
  120. * css_tryget() should be used for avoiding race.
  121. */
  122. struct cgroup_subsys_state __rcu *css;
  123. /*
  124. * ID of this css.
  125. */
  126. unsigned short id;
  127. /*
  128. * Depth in hierarchy which this ID belongs to.
  129. */
  130. unsigned short depth;
  131. /*
  132. * ID is freed by RCU. (and lookup routine is RCU safe.)
  133. */
  134. struct rcu_head rcu_head;
  135. /*
  136. * Hierarchy of CSS ID belongs to.
  137. */
  138. unsigned short stack[0]; /* Array of Length (depth+1) */
  139. };
  140. /*
  141. * cgroup_event represents events which userspace want to recieve.
  142. */
  143. struct cgroup_event {
  144. /*
  145. * Cgroup which the event belongs to.
  146. */
  147. struct cgroup *cgrp;
  148. /*
  149. * Control file which the event associated.
  150. */
  151. struct cftype *cft;
  152. /*
  153. * eventfd to signal userspace about the event.
  154. */
  155. struct eventfd_ctx *eventfd;
  156. /*
  157. * Each of these stored in a list by the cgroup.
  158. */
  159. struct list_head list;
  160. /*
  161. * All fields below needed to unregister event when
  162. * userspace closes eventfd.
  163. */
  164. poll_table pt;
  165. wait_queue_head_t *wqh;
  166. wait_queue_t wait;
  167. struct work_struct remove;
  168. };
  169. /* The list of hierarchy roots */
  170. static LIST_HEAD(roots);
  171. static int root_count;
  172. static DEFINE_IDA(hierarchy_ida);
  173. static int next_hierarchy_id;
  174. static DEFINE_SPINLOCK(hierarchy_id_lock);
  175. /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
  176. #define dummytop (&rootnode.top_cgroup)
  177. /* This flag indicates whether tasks in the fork and exit paths should
  178. * check for fork/exit handlers to call. This avoids us having to do
  179. * extra work in the fork/exit path if none of the subsystems need to
  180. * be called.
  181. */
  182. static int need_forkexit_callback __read_mostly;
  183. #ifdef CONFIG_PROVE_LOCKING
  184. int cgroup_lock_is_held(void)
  185. {
  186. return lockdep_is_held(&cgroup_mutex);
  187. }
  188. #else /* #ifdef CONFIG_PROVE_LOCKING */
  189. int cgroup_lock_is_held(void)
  190. {
  191. return mutex_is_locked(&cgroup_mutex);
  192. }
  193. #endif /* #else #ifdef CONFIG_PROVE_LOCKING */
  194. EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
  195. /* convenient tests for these bits */
  196. inline int cgroup_is_removed(const struct cgroup *cgrp)
  197. {
  198. return test_bit(CGRP_REMOVED, &cgrp->flags);
  199. }
  200. /* bits in struct cgroupfs_root flags field */
  201. enum {
  202. ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
  203. };
  204. static int cgroup_is_releasable(const struct cgroup *cgrp)
  205. {
  206. const int bits =
  207. (1 << CGRP_RELEASABLE) |
  208. (1 << CGRP_NOTIFY_ON_RELEASE);
  209. return (cgrp->flags & bits) == bits;
  210. }
  211. static int notify_on_release(const struct cgroup *cgrp)
  212. {
  213. return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  214. }
  215. /*
  216. * for_each_subsys() allows you to iterate on each subsystem attached to
  217. * an active hierarchy
  218. */
  219. #define for_each_subsys(_root, _ss) \
  220. list_for_each_entry(_ss, &_root->subsys_list, sibling)
  221. /* for_each_active_root() allows you to iterate across the active hierarchies */
  222. #define for_each_active_root(_root) \
  223. list_for_each_entry(_root, &roots, root_list)
  224. /* the list of cgroups eligible for automatic release. Protected by
  225. * release_list_lock */
  226. static LIST_HEAD(release_list);
  227. static DEFINE_SPINLOCK(release_list_lock);
  228. static void cgroup_release_agent(struct work_struct *work);
  229. static DECLARE_WORK(release_agent_work, cgroup_release_agent);
  230. static void check_for_release(struct cgroup *cgrp);
  231. /* Link structure for associating css_set objects with cgroups */
  232. struct cg_cgroup_link {
  233. /*
  234. * List running through cg_cgroup_links associated with a
  235. * cgroup, anchored on cgroup->css_sets
  236. */
  237. struct list_head cgrp_link_list;
  238. struct cgroup *cgrp;
  239. /*
  240. * List running through cg_cgroup_links pointing at a
  241. * single css_set object, anchored on css_set->cg_links
  242. */
  243. struct list_head cg_link_list;
  244. struct css_set *cg;
  245. };
  246. /* The default css_set - used by init and its children prior to any
  247. * hierarchies being mounted. It contains a pointer to the root state
  248. * for each subsystem. Also used to anchor the list of css_sets. Not
  249. * reference-counted, to improve performance when child cgroups
  250. * haven't been created.
  251. */
  252. static struct css_set init_css_set;
  253. static struct cg_cgroup_link init_css_set_link;
  254. static int cgroup_init_idr(struct cgroup_subsys *ss,
  255. struct cgroup_subsys_state *css);
  256. /* css_set_lock protects the list of css_set objects, and the
  257. * chain of tasks off each css_set. Nests outside task->alloc_lock
  258. * due to cgroup_iter_start() */
  259. static DEFINE_RWLOCK(css_set_lock);
  260. static int css_set_count;
  261. /*
  262. * hash table for cgroup groups. This improves the performance to find
  263. * an existing css_set. This hash doesn't (currently) take into
  264. * account cgroups in empty hierarchies.
  265. */
  266. #define CSS_SET_HASH_BITS 7
  267. #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
  268. static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
  269. static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
  270. {
  271. int i;
  272. int index;
  273. unsigned long tmp = 0UL;
  274. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
  275. tmp += (unsigned long)css[i];
  276. tmp = (tmp >> 16) ^ tmp;
  277. index = hash_long(tmp, CSS_SET_HASH_BITS);
  278. return &css_set_table[index];
  279. }
  280. static void free_css_set_rcu(struct rcu_head *obj)
  281. {
  282. struct css_set *cg = container_of(obj, struct css_set, rcu_head);
  283. kfree(cg);
  284. }
  285. /* We don't maintain the lists running through each css_set to its
  286. * task until after the first call to cgroup_iter_start(). This
  287. * reduces the fork()/exit() overhead for people who have cgroups
  288. * compiled into their kernel but not actually in use */
  289. static int use_task_css_set_links __read_mostly;
  290. static void __put_css_set(struct css_set *cg, int taskexit)
  291. {
  292. struct cg_cgroup_link *link;
  293. struct cg_cgroup_link *saved_link;
  294. /*
  295. * Ensure that the refcount doesn't hit zero while any readers
  296. * can see it. Similar to atomic_dec_and_lock(), but for an
  297. * rwlock
  298. */
  299. if (atomic_add_unless(&cg->refcount, -1, 1))
  300. return;
  301. write_lock(&css_set_lock);
  302. if (!atomic_dec_and_test(&cg->refcount)) {
  303. write_unlock(&css_set_lock);
  304. return;
  305. }
  306. /* This css_set is dead. unlink it and release cgroup refcounts */
  307. hlist_del(&cg->hlist);
  308. css_set_count--;
  309. list_for_each_entry_safe(link, saved_link, &cg->cg_links,
  310. cg_link_list) {
  311. struct cgroup *cgrp = link->cgrp;
  312. list_del(&link->cg_link_list);
  313. list_del(&link->cgrp_link_list);
  314. if (atomic_dec_and_test(&cgrp->count) &&
  315. notify_on_release(cgrp)) {
  316. if (taskexit)
  317. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  318. check_for_release(cgrp);
  319. }
  320. kfree(link);
  321. }
  322. write_unlock(&css_set_lock);
  323. call_rcu(&cg->rcu_head, free_css_set_rcu);
  324. }
  325. /*
  326. * refcounted get/put for css_set objects
  327. */
  328. static inline void get_css_set(struct css_set *cg)
  329. {
  330. atomic_inc(&cg->refcount);
  331. }
  332. static inline void put_css_set(struct css_set *cg)
  333. {
  334. __put_css_set(cg, 0);
  335. }
  336. static inline void put_css_set_taskexit(struct css_set *cg)
  337. {
  338. __put_css_set(cg, 1);
  339. }
  340. /*
  341. * compare_css_sets - helper function for find_existing_css_set().
  342. * @cg: candidate css_set being tested
  343. * @old_cg: existing css_set for a task
  344. * @new_cgrp: cgroup that's being entered by the task
  345. * @template: desired set of css pointers in css_set (pre-calculated)
  346. *
  347. * Returns true if "cg" matches "old_cg" except for the hierarchy
  348. * which "new_cgrp" belongs to, for which it should match "new_cgrp".
  349. */
  350. static bool compare_css_sets(struct css_set *cg,
  351. struct css_set *old_cg,
  352. struct cgroup *new_cgrp,
  353. struct cgroup_subsys_state *template[])
  354. {
  355. struct list_head *l1, *l2;
  356. if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
  357. /* Not all subsystems matched */
  358. return false;
  359. }
  360. /*
  361. * Compare cgroup pointers in order to distinguish between
  362. * different cgroups in heirarchies with no subsystems. We
  363. * could get by with just this check alone (and skip the
  364. * memcmp above) but on most setups the memcmp check will
  365. * avoid the need for this more expensive check on almost all
  366. * candidates.
  367. */
  368. l1 = &cg->cg_links;
  369. l2 = &old_cg->cg_links;
  370. while (1) {
  371. struct cg_cgroup_link *cgl1, *cgl2;
  372. struct cgroup *cg1, *cg2;
  373. l1 = l1->next;
  374. l2 = l2->next;
  375. /* See if we reached the end - both lists are equal length. */
  376. if (l1 == &cg->cg_links) {
  377. BUG_ON(l2 != &old_cg->cg_links);
  378. break;
  379. } else {
  380. BUG_ON(l2 == &old_cg->cg_links);
  381. }
  382. /* Locate the cgroups associated with these links. */
  383. cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
  384. cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
  385. cg1 = cgl1->cgrp;
  386. cg2 = cgl2->cgrp;
  387. /* Hierarchies should be linked in the same order. */
  388. BUG_ON(cg1->root != cg2->root);
  389. /*
  390. * If this hierarchy is the hierarchy of the cgroup
  391. * that's changing, then we need to check that this
  392. * css_set points to the new cgroup; if it's any other
  393. * hierarchy, then this css_set should point to the
  394. * same cgroup as the old css_set.
  395. */
  396. if (cg1->root == new_cgrp->root) {
  397. if (cg1 != new_cgrp)
  398. return false;
  399. } else {
  400. if (cg1 != cg2)
  401. return false;
  402. }
  403. }
  404. return true;
  405. }
  406. /*
  407. * find_existing_css_set() is a helper for
  408. * find_css_set(), and checks to see whether an existing
  409. * css_set is suitable.
  410. *
  411. * oldcg: the cgroup group that we're using before the cgroup
  412. * transition
  413. *
  414. * cgrp: the cgroup that we're moving into
  415. *
  416. * template: location in which to build the desired set of subsystem
  417. * state objects for the new cgroup group
  418. */
  419. static struct css_set *find_existing_css_set(
  420. struct css_set *oldcg,
  421. struct cgroup *cgrp,
  422. struct cgroup_subsys_state *template[])
  423. {
  424. int i;
  425. struct cgroupfs_root *root = cgrp->root;
  426. struct hlist_head *hhead;
  427. struct hlist_node *node;
  428. struct css_set *cg;
  429. /*
  430. * Build the set of subsystem state objects that we want to see in the
  431. * new css_set. while subsystems can change globally, the entries here
  432. * won't change, so no need for locking.
  433. */
  434. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  435. if (root->subsys_bits & (1UL << i)) {
  436. /* Subsystem is in this hierarchy. So we want
  437. * the subsystem state from the new
  438. * cgroup */
  439. template[i] = cgrp->subsys[i];
  440. } else {
  441. /* Subsystem is not in this hierarchy, so we
  442. * don't want to change the subsystem state */
  443. template[i] = oldcg->subsys[i];
  444. }
  445. }
  446. hhead = css_set_hash(template);
  447. hlist_for_each_entry(cg, node, hhead, hlist) {
  448. if (!compare_css_sets(cg, oldcg, cgrp, template))
  449. continue;
  450. /* This css_set matches what we need */
  451. return cg;
  452. }
  453. /* No existing cgroup group matched */
  454. return NULL;
  455. }
  456. static void free_cg_links(struct list_head *tmp)
  457. {
  458. struct cg_cgroup_link *link;
  459. struct cg_cgroup_link *saved_link;
  460. list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
  461. list_del(&link->cgrp_link_list);
  462. kfree(link);
  463. }
  464. }
  465. /*
  466. * allocate_cg_links() allocates "count" cg_cgroup_link structures
  467. * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
  468. * success or a negative error
  469. */
  470. static int allocate_cg_links(int count, struct list_head *tmp)
  471. {
  472. struct cg_cgroup_link *link;
  473. int i;
  474. INIT_LIST_HEAD(tmp);
  475. for (i = 0; i < count; i++) {
  476. link = kmalloc(sizeof(*link), GFP_KERNEL);
  477. if (!link) {
  478. free_cg_links(tmp);
  479. return -ENOMEM;
  480. }
  481. list_add(&link->cgrp_link_list, tmp);
  482. }
  483. return 0;
  484. }
  485. /**
  486. * link_css_set - a helper function to link a css_set to a cgroup
  487. * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
  488. * @cg: the css_set to be linked
  489. * @cgrp: the destination cgroup
  490. */
  491. static void link_css_set(struct list_head *tmp_cg_links,
  492. struct css_set *cg, struct cgroup *cgrp)
  493. {
  494. struct cg_cgroup_link *link;
  495. BUG_ON(list_empty(tmp_cg_links));
  496. link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
  497. cgrp_link_list);
  498. link->cg = cg;
  499. link->cgrp = cgrp;
  500. atomic_inc(&cgrp->count);
  501. list_move(&link->cgrp_link_list, &cgrp->css_sets);
  502. /*
  503. * Always add links to the tail of the list so that the list
  504. * is sorted by order of hierarchy creation
  505. */
  506. list_add_tail(&link->cg_link_list, &cg->cg_links);
  507. }
  508. /*
  509. * find_css_set() takes an existing cgroup group and a
  510. * cgroup object, and returns a css_set object that's
  511. * equivalent to the old group, but with the given cgroup
  512. * substituted into the appropriate hierarchy. Must be called with
  513. * cgroup_mutex held
  514. */
  515. static struct css_set *find_css_set(
  516. struct css_set *oldcg, struct cgroup *cgrp)
  517. {
  518. struct css_set *res;
  519. struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
  520. struct list_head tmp_cg_links;
  521. struct hlist_head *hhead;
  522. struct cg_cgroup_link *link;
  523. /* First see if we already have a cgroup group that matches
  524. * the desired set */
  525. read_lock(&css_set_lock);
  526. res = find_existing_css_set(oldcg, cgrp, template);
  527. if (res)
  528. get_css_set(res);
  529. read_unlock(&css_set_lock);
  530. if (res)
  531. return res;
  532. res = kmalloc(sizeof(*res), GFP_KERNEL);
  533. if (!res)
  534. return NULL;
  535. /* Allocate all the cg_cgroup_link objects that we'll need */
  536. if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
  537. kfree(res);
  538. return NULL;
  539. }
  540. atomic_set(&res->refcount, 1);
  541. INIT_LIST_HEAD(&res->cg_links);
  542. INIT_LIST_HEAD(&res->tasks);
  543. INIT_HLIST_NODE(&res->hlist);
  544. /* Copy the set of subsystem state objects generated in
  545. * find_existing_css_set() */
  546. memcpy(res->subsys, template, sizeof(res->subsys));
  547. write_lock(&css_set_lock);
  548. /* Add reference counts and links from the new css_set. */
  549. list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
  550. struct cgroup *c = link->cgrp;
  551. if (c->root == cgrp->root)
  552. c = cgrp;
  553. link_css_set(&tmp_cg_links, res, c);
  554. }
  555. BUG_ON(!list_empty(&tmp_cg_links));
  556. css_set_count++;
  557. /* Add this cgroup group to the hash table */
  558. hhead = css_set_hash(res->subsys);
  559. hlist_add_head(&res->hlist, hhead);
  560. write_unlock(&css_set_lock);
  561. return res;
  562. }
  563. /*
  564. * Return the cgroup for "task" from the given hierarchy. Must be
  565. * called with cgroup_mutex held.
  566. */
  567. static struct cgroup *task_cgroup_from_root(struct task_struct *task,
  568. struct cgroupfs_root *root)
  569. {
  570. struct css_set *css;
  571. struct cgroup *res = NULL;
  572. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  573. read_lock(&css_set_lock);
  574. /*
  575. * No need to lock the task - since we hold cgroup_mutex the
  576. * task can't change groups, so the only thing that can happen
  577. * is that it exits and its css is set back to init_css_set.
  578. */
  579. css = task->cgroups;
  580. if (css == &init_css_set) {
  581. res = &root->top_cgroup;
  582. } else {
  583. struct cg_cgroup_link *link;
  584. list_for_each_entry(link, &css->cg_links, cg_link_list) {
  585. struct cgroup *c = link->cgrp;
  586. if (c->root == root) {
  587. res = c;
  588. break;
  589. }
  590. }
  591. }
  592. read_unlock(&css_set_lock);
  593. BUG_ON(!res);
  594. return res;
  595. }
  596. /*
  597. * There is one global cgroup mutex. We also require taking
  598. * task_lock() when dereferencing a task's cgroup subsys pointers.
  599. * See "The task_lock() exception", at the end of this comment.
  600. *
  601. * A task must hold cgroup_mutex to modify cgroups.
  602. *
  603. * Any task can increment and decrement the count field without lock.
  604. * So in general, code holding cgroup_mutex can't rely on the count
  605. * field not changing. However, if the count goes to zero, then only
  606. * cgroup_attach_task() can increment it again. Because a count of zero
  607. * means that no tasks are currently attached, therefore there is no
  608. * way a task attached to that cgroup can fork (the other way to
  609. * increment the count). So code holding cgroup_mutex can safely
  610. * assume that if the count is zero, it will stay zero. Similarly, if
  611. * a task holds cgroup_mutex on a cgroup with zero count, it
  612. * knows that the cgroup won't be removed, as cgroup_rmdir()
  613. * needs that mutex.
  614. *
  615. * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
  616. * (usually) take cgroup_mutex. These are the two most performance
  617. * critical pieces of code here. The exception occurs on cgroup_exit(),
  618. * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
  619. * is taken, and if the cgroup count is zero, a usermode call made
  620. * to the release agent with the name of the cgroup (path relative to
  621. * the root of cgroup file system) as the argument.
  622. *
  623. * A cgroup can only be deleted if both its 'count' of using tasks
  624. * is zero, and its list of 'children' cgroups is empty. Since all
  625. * tasks in the system use _some_ cgroup, and since there is always at
  626. * least one task in the system (init, pid == 1), therefore, top_cgroup
  627. * always has either children cgroups and/or using tasks. So we don't
  628. * need a special hack to ensure that top_cgroup cannot be deleted.
  629. *
  630. * The task_lock() exception
  631. *
  632. * The need for this exception arises from the action of
  633. * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
  634. * another. It does so using cgroup_mutex, however there are
  635. * several performance critical places that need to reference
  636. * task->cgroup without the expense of grabbing a system global
  637. * mutex. Therefore except as noted below, when dereferencing or, as
  638. * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
  639. * task_lock(), which acts on a spinlock (task->alloc_lock) already in
  640. * the task_struct routinely used for such matters.
  641. *
  642. * P.S. One more locking exception. RCU is used to guard the
  643. * update of a tasks cgroup pointer by cgroup_attach_task()
  644. */
  645. /**
  646. * cgroup_lock - lock out any changes to cgroup structures
  647. *
  648. */
  649. void cgroup_lock(void)
  650. {
  651. mutex_lock(&cgroup_mutex);
  652. }
  653. EXPORT_SYMBOL_GPL(cgroup_lock);
  654. /**
  655. * cgroup_unlock - release lock on cgroup changes
  656. *
  657. * Undo the lock taken in a previous cgroup_lock() call.
  658. */
  659. void cgroup_unlock(void)
  660. {
  661. mutex_unlock(&cgroup_mutex);
  662. }
  663. EXPORT_SYMBOL_GPL(cgroup_unlock);
  664. /*
  665. * A couple of forward declarations required, due to cyclic reference loop:
  666. * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
  667. * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
  668. * -> cgroup_mkdir.
  669. */
  670. static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
  671. static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
  672. static int cgroup_populate_dir(struct cgroup *cgrp);
  673. static const struct inode_operations cgroup_dir_inode_operations;
  674. static const struct file_operations proc_cgroupstats_operations;
  675. static struct backing_dev_info cgroup_backing_dev_info = {
  676. .name = "cgroup",
  677. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  678. };
  679. static int alloc_css_id(struct cgroup_subsys *ss,
  680. struct cgroup *parent, struct cgroup *child);
  681. static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
  682. {
  683. struct inode *inode = new_inode(sb);
  684. if (inode) {
  685. inode->i_ino = get_next_ino();
  686. inode->i_mode = mode;
  687. inode->i_uid = current_fsuid();
  688. inode->i_gid = current_fsgid();
  689. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  690. inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
  691. }
  692. return inode;
  693. }
  694. /*
  695. * Call subsys's pre_destroy handler.
  696. * This is called before css refcnt check.
  697. */
  698. static int cgroup_call_pre_destroy(struct cgroup *cgrp)
  699. {
  700. struct cgroup_subsys *ss;
  701. int ret = 0;
  702. for_each_subsys(cgrp->root, ss)
  703. if (ss->pre_destroy) {
  704. ret = ss->pre_destroy(ss, cgrp);
  705. if (ret)
  706. break;
  707. }
  708. return ret;
  709. }
  710. static void free_cgroup_rcu(struct rcu_head *obj)
  711. {
  712. struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
  713. kfree(cgrp);
  714. }
  715. static void cgroup_diput(struct dentry *dentry, struct inode *inode)
  716. {
  717. /* is dentry a directory ? if so, kfree() associated cgroup */
  718. if (S_ISDIR(inode->i_mode)) {
  719. struct cgroup *cgrp = dentry->d_fsdata;
  720. struct cgroup_subsys *ss;
  721. BUG_ON(!(cgroup_is_removed(cgrp)));
  722. /* It's possible for external users to be holding css
  723. * reference counts on a cgroup; css_put() needs to
  724. * be able to access the cgroup after decrementing
  725. * the reference count in order to know if it needs to
  726. * queue the cgroup to be handled by the release
  727. * agent */
  728. synchronize_rcu();
  729. mutex_lock(&cgroup_mutex);
  730. /*
  731. * Release the subsystem state objects.
  732. */
  733. for_each_subsys(cgrp->root, ss)
  734. ss->destroy(ss, cgrp);
  735. cgrp->root->number_of_cgroups--;
  736. mutex_unlock(&cgroup_mutex);
  737. /*
  738. * Drop the active superblock reference that we took when we
  739. * created the cgroup
  740. */
  741. deactivate_super(cgrp->root->sb);
  742. /*
  743. * if we're getting rid of the cgroup, refcount should ensure
  744. * that there are no pidlists left.
  745. */
  746. BUG_ON(!list_empty(&cgrp->pidlists));
  747. call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
  748. }
  749. iput(inode);
  750. }
  751. static void remove_dir(struct dentry *d)
  752. {
  753. struct dentry *parent = dget(d->d_parent);
  754. d_delete(d);
  755. simple_rmdir(parent->d_inode, d);
  756. dput(parent);
  757. }
  758. static void cgroup_clear_directory(struct dentry *dentry)
  759. {
  760. struct list_head *node;
  761. BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
  762. spin_lock(&dcache_lock);
  763. node = dentry->d_subdirs.next;
  764. while (node != &dentry->d_subdirs) {
  765. struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
  766. list_del_init(node);
  767. if (d->d_inode) {
  768. /* This should never be called on a cgroup
  769. * directory with child cgroups */
  770. BUG_ON(d->d_inode->i_mode & S_IFDIR);
  771. d = dget_locked(d);
  772. spin_unlock(&dcache_lock);
  773. d_delete(d);
  774. simple_unlink(dentry->d_inode, d);
  775. dput(d);
  776. spin_lock(&dcache_lock);
  777. }
  778. node = dentry->d_subdirs.next;
  779. }
  780. spin_unlock(&dcache_lock);
  781. }
  782. /*
  783. * NOTE : the dentry must have been dget()'ed
  784. */
  785. static void cgroup_d_remove_dir(struct dentry *dentry)
  786. {
  787. cgroup_clear_directory(dentry);
  788. spin_lock(&dcache_lock);
  789. list_del_init(&dentry->d_u.d_child);
  790. spin_unlock(&dcache_lock);
  791. remove_dir(dentry);
  792. }
  793. /*
  794. * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
  795. * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
  796. * reference to css->refcnt. In general, this refcnt is expected to goes down
  797. * to zero, soon.
  798. *
  799. * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
  800. */
  801. DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
  802. static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
  803. {
  804. if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
  805. wake_up_all(&cgroup_rmdir_waitq);
  806. }
  807. void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
  808. {
  809. css_get(css);
  810. }
  811. void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
  812. {
  813. cgroup_wakeup_rmdir_waiter(css->cgroup);
  814. css_put(css);
  815. }
  816. /*
  817. * Call with cgroup_mutex held. Drops reference counts on modules, including
  818. * any duplicate ones that parse_cgroupfs_options took. If this function
  819. * returns an error, no reference counts are touched.
  820. */
  821. static int rebind_subsystems(struct cgroupfs_root *root,
  822. unsigned long final_bits)
  823. {
  824. unsigned long added_bits, removed_bits;
  825. struct cgroup *cgrp = &root->top_cgroup;
  826. int i;
  827. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  828. removed_bits = root->actual_subsys_bits & ~final_bits;
  829. added_bits = final_bits & ~root->actual_subsys_bits;
  830. /* Check that any added subsystems are currently free */
  831. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  832. unsigned long bit = 1UL << i;
  833. struct cgroup_subsys *ss = subsys[i];
  834. if (!(bit & added_bits))
  835. continue;
  836. /*
  837. * Nobody should tell us to do a subsys that doesn't exist:
  838. * parse_cgroupfs_options should catch that case and refcounts
  839. * ensure that subsystems won't disappear once selected.
  840. */
  841. BUG_ON(ss == NULL);
  842. if (ss->root != &rootnode) {
  843. /* Subsystem isn't free */
  844. return -EBUSY;
  845. }
  846. }
  847. /* Currently we don't handle adding/removing subsystems when
  848. * any child cgroups exist. This is theoretically supportable
  849. * but involves complex error handling, so it's being left until
  850. * later */
  851. if (root->number_of_cgroups > 1)
  852. return -EBUSY;
  853. /* Process each subsystem */
  854. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  855. struct cgroup_subsys *ss = subsys[i];
  856. unsigned long bit = 1UL << i;
  857. if (bit & added_bits) {
  858. /* We're binding this subsystem to this hierarchy */
  859. BUG_ON(ss == NULL);
  860. BUG_ON(cgrp->subsys[i]);
  861. BUG_ON(!dummytop->subsys[i]);
  862. BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
  863. mutex_lock(&ss->hierarchy_mutex);
  864. cgrp->subsys[i] = dummytop->subsys[i];
  865. cgrp->subsys[i]->cgroup = cgrp;
  866. list_move(&ss->sibling, &root->subsys_list);
  867. ss->root = root;
  868. if (ss->bind)
  869. ss->bind(ss, cgrp);
  870. mutex_unlock(&ss->hierarchy_mutex);
  871. /* refcount was already taken, and we're keeping it */
  872. } else if (bit & removed_bits) {
  873. /* We're removing this subsystem */
  874. BUG_ON(ss == NULL);
  875. BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
  876. BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
  877. mutex_lock(&ss->hierarchy_mutex);
  878. if (ss->bind)
  879. ss->bind(ss, dummytop);
  880. dummytop->subsys[i]->cgroup = dummytop;
  881. cgrp->subsys[i] = NULL;
  882. subsys[i]->root = &rootnode;
  883. list_move(&ss->sibling, &rootnode.subsys_list);
  884. mutex_unlock(&ss->hierarchy_mutex);
  885. /* subsystem is now free - drop reference on module */
  886. module_put(ss->module);
  887. } else if (bit & final_bits) {
  888. /* Subsystem state should already exist */
  889. BUG_ON(ss == NULL);
  890. BUG_ON(!cgrp->subsys[i]);
  891. /*
  892. * a refcount was taken, but we already had one, so
  893. * drop the extra reference.
  894. */
  895. module_put(ss->module);
  896. #ifdef CONFIG_MODULE_UNLOAD
  897. BUG_ON(ss->module && !module_refcount(ss->module));
  898. #endif
  899. } else {
  900. /* Subsystem state shouldn't exist */
  901. BUG_ON(cgrp->subsys[i]);
  902. }
  903. }
  904. root->subsys_bits = root->actual_subsys_bits = final_bits;
  905. synchronize_rcu();
  906. return 0;
  907. }
  908. static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
  909. {
  910. struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
  911. struct cgroup_subsys *ss;
  912. mutex_lock(&cgroup_mutex);
  913. for_each_subsys(root, ss)
  914. seq_printf(seq, ",%s", ss->name);
  915. if (test_bit(ROOT_NOPREFIX, &root->flags))
  916. seq_puts(seq, ",noprefix");
  917. if (strlen(root->release_agent_path))
  918. seq_printf(seq, ",release_agent=%s", root->release_agent_path);
  919. if (strlen(root->name))
  920. seq_printf(seq, ",name=%s", root->name);
  921. mutex_unlock(&cgroup_mutex);
  922. return 0;
  923. }
  924. struct cgroup_sb_opts {
  925. unsigned long subsys_bits;
  926. unsigned long flags;
  927. char *release_agent;
  928. char *name;
  929. /* User explicitly requested empty subsystem */
  930. bool none;
  931. struct cgroupfs_root *new_root;
  932. };
  933. /*
  934. * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
  935. * with cgroup_mutex held to protect the subsys[] array. This function takes
  936. * refcounts on subsystems to be used, unless it returns error, in which case
  937. * no refcounts are taken.
  938. */
  939. static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
  940. {
  941. char *token, *o = data ?: "all";
  942. unsigned long mask = (unsigned long)-1;
  943. int i;
  944. bool module_pin_failed = false;
  945. BUG_ON(!mutex_is_locked(&cgroup_mutex));
  946. #ifdef CONFIG_CPUSETS
  947. mask = ~(1UL << cpuset_subsys_id);
  948. #endif
  949. memset(opts, 0, sizeof(*opts));
  950. while ((token = strsep(&o, ",")) != NULL) {
  951. if (!*token)
  952. return -EINVAL;
  953. if (!strcmp(token, "all")) {
  954. /* Add all non-disabled subsystems */
  955. opts->subsys_bits = 0;
  956. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  957. struct cgroup_subsys *ss = subsys[i];
  958. if (ss == NULL)
  959. continue;
  960. if (!ss->disabled)
  961. opts->subsys_bits |= 1ul << i;
  962. }
  963. } else if (!strcmp(token, "none")) {
  964. /* Explicitly have no subsystems */
  965. opts->none = true;
  966. } else if (!strcmp(token, "noprefix")) {
  967. set_bit(ROOT_NOPREFIX, &opts->flags);
  968. } else if (!strncmp(token, "release_agent=", 14)) {
  969. /* Specifying two release agents is forbidden */
  970. if (opts->release_agent)
  971. return -EINVAL;
  972. opts->release_agent =
  973. kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
  974. if (!opts->release_agent)
  975. return -ENOMEM;
  976. } else if (!strncmp(token, "name=", 5)) {
  977. const char *name = token + 5;
  978. /* Can't specify an empty name */
  979. if (!strlen(name))
  980. return -EINVAL;
  981. /* Must match [\w.-]+ */
  982. for (i = 0; i < strlen(name); i++) {
  983. char c = name[i];
  984. if (isalnum(c))
  985. continue;
  986. if ((c == '.') || (c == '-') || (c == '_'))
  987. continue;
  988. return -EINVAL;
  989. }
  990. /* Specifying two names is forbidden */
  991. if (opts->name)
  992. return -EINVAL;
  993. opts->name = kstrndup(name,
  994. MAX_CGROUP_ROOT_NAMELEN - 1,
  995. GFP_KERNEL);
  996. if (!opts->name)
  997. return -ENOMEM;
  998. } else {
  999. struct cgroup_subsys *ss;
  1000. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  1001. ss = subsys[i];
  1002. if (ss == NULL)
  1003. continue;
  1004. if (!strcmp(token, ss->name)) {
  1005. if (!ss->disabled)
  1006. set_bit(i, &opts->subsys_bits);
  1007. break;
  1008. }
  1009. }
  1010. if (i == CGROUP_SUBSYS_COUNT)
  1011. return -ENOENT;
  1012. }
  1013. }
  1014. /* Consistency checks */
  1015. /*
  1016. * Option noprefix was introduced just for backward compatibility
  1017. * with the old cpuset, so we allow noprefix only if mounting just
  1018. * the cpuset subsystem.
  1019. */
  1020. if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
  1021. (opts->subsys_bits & mask))
  1022. return -EINVAL;
  1023. /* Can't specify "none" and some subsystems */
  1024. if (opts->subsys_bits && opts->none)
  1025. return -EINVAL;
  1026. /*
  1027. * We either have to specify by name or by subsystems. (So all
  1028. * empty hierarchies must have a name).
  1029. */
  1030. if (!opts->subsys_bits && !opts->name)
  1031. return -EINVAL;
  1032. /*
  1033. * Grab references on all the modules we'll need, so the subsystems
  1034. * don't dance around before rebind_subsystems attaches them. This may
  1035. * take duplicate reference counts on a subsystem that's already used,
  1036. * but rebind_subsystems handles this case.
  1037. */
  1038. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  1039. unsigned long bit = 1UL << i;
  1040. if (!(bit & opts->subsys_bits))
  1041. continue;
  1042. if (!try_module_get(subsys[i]->module)) {
  1043. module_pin_failed = true;
  1044. break;
  1045. }
  1046. }
  1047. if (module_pin_failed) {
  1048. /*
  1049. * oops, one of the modules was going away. this means that we
  1050. * raced with a module_delete call, and to the user this is
  1051. * essentially a "subsystem doesn't exist" case.
  1052. */
  1053. for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
  1054. /* drop refcounts only on the ones we took */
  1055. unsigned long bit = 1UL << i;
  1056. if (!(bit & opts->subsys_bits))
  1057. continue;
  1058. module_put(subsys[i]->module);
  1059. }
  1060. return -ENOENT;
  1061. }
  1062. return 0;
  1063. }
  1064. static void drop_parsed_module_refcounts(unsigned long subsys_bits)
  1065. {
  1066. int i;
  1067. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  1068. unsigned long bit = 1UL << i;
  1069. if (!(bit & subsys_bits))
  1070. continue;
  1071. module_put(subsys[i]->module);
  1072. }
  1073. }
  1074. static int cgroup_remount(struct super_block *sb, int *flags, char *data)
  1075. {
  1076. int ret = 0;
  1077. struct cgroupfs_root *root = sb->s_fs_info;
  1078. struct cgroup *cgrp = &root->top_cgroup;
  1079. struct cgroup_sb_opts opts;
  1080. mutex_lock(&cgrp->dentry->d_inode->i_mutex);
  1081. mutex_lock(&cgroup_mutex);
  1082. /* See what subsystems are wanted */
  1083. ret = parse_cgroupfs_options(data, &opts);
  1084. if (ret)
  1085. goto out_unlock;
  1086. /* Don't allow flags or name to change at remount */
  1087. if (opts.flags != root->flags ||
  1088. (opts.name && strcmp(opts.name, root->name))) {
  1089. ret = -EINVAL;
  1090. drop_parsed_module_refcounts(opts.subsys_bits);
  1091. goto out_unlock;
  1092. }
  1093. ret = rebind_subsystems(root, opts.subsys_bits);
  1094. if (ret) {
  1095. drop_parsed_module_refcounts(opts.subsys_bits);
  1096. goto out_unlock;
  1097. }
  1098. /* (re)populate subsystem files */
  1099. cgroup_populate_dir(cgrp);
  1100. if (opts.release_agent)
  1101. strcpy(root->release_agent_path, opts.release_agent);
  1102. out_unlock:
  1103. kfree(opts.release_agent);
  1104. kfree(opts.name);
  1105. mutex_unlock(&cgroup_mutex);
  1106. mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
  1107. return ret;
  1108. }
  1109. static const struct super_operations cgroup_ops = {
  1110. .statfs = simple_statfs,
  1111. .drop_inode = generic_delete_inode,
  1112. .show_options = cgroup_show_options,
  1113. .remount_fs = cgroup_remount,
  1114. };
  1115. static void init_cgroup_housekeeping(struct cgroup *cgrp)
  1116. {
  1117. INIT_LIST_HEAD(&cgrp->sibling);
  1118. INIT_LIST_HEAD(&cgrp->children);
  1119. INIT_LIST_HEAD(&cgrp->css_sets);
  1120. INIT_LIST_HEAD(&cgrp->release_list);
  1121. INIT_LIST_HEAD(&cgrp->pidlists);
  1122. mutex_init(&cgrp->pidlist_mutex);
  1123. INIT_LIST_HEAD(&cgrp->event_list);
  1124. spin_lock_init(&cgrp->event_list_lock);
  1125. }
  1126. static void init_cgroup_root(struct cgroupfs_root *root)
  1127. {
  1128. struct cgroup *cgrp = &root->top_cgroup;
  1129. INIT_LIST_HEAD(&root->subsys_list);
  1130. INIT_LIST_HEAD(&root->root_list);
  1131. root->number_of_cgroups = 1;
  1132. cgrp->root = root;
  1133. cgrp->top_cgroup = cgrp;
  1134. init_cgroup_housekeeping(cgrp);
  1135. }
  1136. static bool init_root_id(struct cgroupfs_root *root)
  1137. {
  1138. int ret = 0;
  1139. do {
  1140. if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
  1141. return false;
  1142. spin_lock(&hierarchy_id_lock);
  1143. /* Try to allocate the next unused ID */
  1144. ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
  1145. &root->hierarchy_id);
  1146. if (ret == -ENOSPC)
  1147. /* Try again starting from 0 */
  1148. ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
  1149. if (!ret) {
  1150. next_hierarchy_id = root->hierarchy_id + 1;
  1151. } else if (ret != -EAGAIN) {
  1152. /* Can only get here if the 31-bit IDR is full ... */
  1153. BUG_ON(ret);
  1154. }
  1155. spin_unlock(&hierarchy_id_lock);
  1156. } while (ret);
  1157. return true;
  1158. }
  1159. static int cgroup_test_super(struct super_block *sb, void *data)
  1160. {
  1161. struct cgroup_sb_opts *opts = data;
  1162. struct cgroupfs_root *root = sb->s_fs_info;
  1163. /* If we asked for a name then it must match */
  1164. if (opts->name && strcmp(opts->name, root->name))
  1165. return 0;
  1166. /*
  1167. * If we asked for subsystems (or explicitly for no
  1168. * subsystems) then they must match
  1169. */
  1170. if ((opts->subsys_bits || opts->none)
  1171. && (opts->subsys_bits != root->subsys_bits))
  1172. return 0;
  1173. return 1;
  1174. }
  1175. static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
  1176. {
  1177. struct cgroupfs_root *root;
  1178. if (!opts->subsys_bits && !opts->none)
  1179. return NULL;
  1180. root = kzalloc(sizeof(*root), GFP_KERNEL);
  1181. if (!root)
  1182. return ERR_PTR(-ENOMEM);
  1183. if (!init_root_id(root)) {
  1184. kfree(root);
  1185. return ERR_PTR(-ENOMEM);
  1186. }
  1187. init_cgroup_root(root);
  1188. root->subsys_bits = opts->subsys_bits;
  1189. root->flags = opts->flags;
  1190. if (opts->release_agent)
  1191. strcpy(root->release_agent_path, opts->release_agent);
  1192. if (opts->name)
  1193. strcpy(root->name, opts->name);
  1194. return root;
  1195. }
  1196. static void cgroup_drop_root(struct cgroupfs_root *root)
  1197. {
  1198. if (!root)
  1199. return;
  1200. BUG_ON(!root->hierarchy_id);
  1201. spin_lock(&hierarchy_id_lock);
  1202. ida_remove(&hierarchy_ida, root->hierarchy_id);
  1203. spin_unlock(&hierarchy_id_lock);
  1204. kfree(root);
  1205. }
  1206. static int cgroup_set_super(struct super_block *sb, void *data)
  1207. {
  1208. int ret;
  1209. struct cgroup_sb_opts *opts = data;
  1210. /* If we don't have a new root, we can't set up a new sb */
  1211. if (!opts->new_root)
  1212. return -EINVAL;
  1213. BUG_ON(!opts->subsys_bits && !opts->none);
  1214. ret = set_anon_super(sb, NULL);
  1215. if (ret)
  1216. return ret;
  1217. sb->s_fs_info = opts->new_root;
  1218. opts->new_root->sb = sb;
  1219. sb->s_blocksize = PAGE_CACHE_SIZE;
  1220. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  1221. sb->s_magic = CGROUP_SUPER_MAGIC;
  1222. sb->s_op = &cgroup_ops;
  1223. return 0;
  1224. }
  1225. static int cgroup_get_rootdir(struct super_block *sb)
  1226. {
  1227. struct inode *inode =
  1228. cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
  1229. struct dentry *dentry;
  1230. if (!inode)
  1231. return -ENOMEM;
  1232. inode->i_fop = &simple_dir_operations;
  1233. inode->i_op = &cgroup_dir_inode_operations;
  1234. /* directories start off with i_nlink == 2 (for "." entry) */
  1235. inc_nlink(inode);
  1236. dentry = d_alloc_root(inode);
  1237. if (!dentry) {
  1238. iput(inode);
  1239. return -ENOMEM;
  1240. }
  1241. sb->s_root = dentry;
  1242. return 0;
  1243. }
  1244. static int cgroup_get_sb(struct file_system_type *fs_type,
  1245. int flags, const char *unused_dev_name,
  1246. void *data, struct vfsmount *mnt)
  1247. {
  1248. struct cgroup_sb_opts opts;
  1249. struct cgroupfs_root *root;
  1250. int ret = 0;
  1251. struct super_block *sb;
  1252. struct cgroupfs_root *new_root;
  1253. /* First find the desired set of subsystems */
  1254. mutex_lock(&cgroup_mutex);
  1255. ret = parse_cgroupfs_options(data, &opts);
  1256. mutex_unlock(&cgroup_mutex);
  1257. if (ret)
  1258. goto out_err;
  1259. /*
  1260. * Allocate a new cgroup root. We may not need it if we're
  1261. * reusing an existing hierarchy.
  1262. */
  1263. new_root = cgroup_root_from_opts(&opts);
  1264. if (IS_ERR(new_root)) {
  1265. ret = PTR_ERR(new_root);
  1266. goto drop_modules;
  1267. }
  1268. opts.new_root = new_root;
  1269. /* Locate an existing or new sb for this hierarchy */
  1270. sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
  1271. if (IS_ERR(sb)) {
  1272. ret = PTR_ERR(sb);
  1273. cgroup_drop_root(opts.new_root);
  1274. goto drop_modules;
  1275. }
  1276. root = sb->s_fs_info;
  1277. BUG_ON(!root);
  1278. if (root == opts.new_root) {
  1279. /* We used the new root structure, so this is a new hierarchy */
  1280. struct list_head tmp_cg_links;
  1281. struct cgroup *root_cgrp = &root->top_cgroup;
  1282. struct inode *inode;
  1283. struct cgroupfs_root *existing_root;
  1284. int i;
  1285. BUG_ON(sb->s_root != NULL);
  1286. ret = cgroup_get_rootdir(sb);
  1287. if (ret)
  1288. goto drop_new_super;
  1289. inode = sb->s_root->d_inode;
  1290. mutex_lock(&inode->i_mutex);
  1291. mutex_lock(&cgroup_mutex);
  1292. if (strlen(root->name)) {
  1293. /* Check for name clashes with existing mounts */
  1294. for_each_active_root(existing_root) {
  1295. if (!strcmp(existing_root->name, root->name)) {
  1296. ret = -EBUSY;
  1297. mutex_unlock(&cgroup_mutex);
  1298. mutex_unlock(&inode->i_mutex);
  1299. goto drop_new_super;
  1300. }
  1301. }
  1302. }
  1303. /*
  1304. * We're accessing css_set_count without locking
  1305. * css_set_lock here, but that's OK - it can only be
  1306. * increased by someone holding cgroup_lock, and
  1307. * that's us. The worst that can happen is that we
  1308. * have some link structures left over
  1309. */
  1310. ret = allocate_cg_links(css_set_count, &tmp_cg_links);
  1311. if (ret) {
  1312. mutex_unlock(&cgroup_mutex);
  1313. mutex_unlock(&inode->i_mutex);
  1314. goto drop_new_super;
  1315. }
  1316. ret = rebind_subsystems(root, root->subsys_bits);
  1317. if (ret == -EBUSY) {
  1318. mutex_unlock(&cgroup_mutex);
  1319. mutex_unlock(&inode->i_mutex);
  1320. free_cg_links(&tmp_cg_links);
  1321. goto drop_new_super;
  1322. }
  1323. /*
  1324. * There must be no failure case after here, since rebinding
  1325. * takes care of subsystems' refcounts, which are explicitly
  1326. * dropped in the failure exit path.
  1327. */
  1328. /* EBUSY should be the only error here */
  1329. BUG_ON(ret);
  1330. list_add(&root->root_list, &roots);
  1331. root_count++;
  1332. sb->s_root->d_fsdata = root_cgrp;
  1333. root->top_cgroup.dentry = sb->s_root;
  1334. /* Link the top cgroup in this hierarchy into all
  1335. * the css_set objects */
  1336. write_lock(&css_set_lock);
  1337. for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
  1338. struct hlist_head *hhead = &css_set_table[i];
  1339. struct hlist_node *node;
  1340. struct css_set *cg;
  1341. hlist_for_each_entry(cg, node, hhead, hlist)
  1342. link_css_set(&tmp_cg_links, cg, root_cgrp);
  1343. }
  1344. write_unlock(&css_set_lock);
  1345. free_cg_links(&tmp_cg_links);
  1346. BUG_ON(!list_empty(&root_cgrp->sibling));
  1347. BUG_ON(!list_empty(&root_cgrp->children));
  1348. BUG_ON(root->number_of_cgroups != 1);
  1349. cgroup_populate_dir(root_cgrp);
  1350. mutex_unlock(&cgroup_mutex);
  1351. mutex_unlock(&inode->i_mutex);
  1352. } else {
  1353. /*
  1354. * We re-used an existing hierarchy - the new root (if
  1355. * any) is not needed
  1356. */
  1357. cgroup_drop_root(opts.new_root);
  1358. /* no subsys rebinding, so refcounts don't change */
  1359. drop_parsed_module_refcounts(opts.subsys_bits);
  1360. }
  1361. simple_set_mnt(mnt, sb);
  1362. kfree(opts.release_agent);
  1363. kfree(opts.name);
  1364. return 0;
  1365. drop_new_super:
  1366. deactivate_locked_super(sb);
  1367. drop_modules:
  1368. drop_parsed_module_refcounts(opts.subsys_bits);
  1369. out_err:
  1370. kfree(opts.release_agent);
  1371. kfree(opts.name);
  1372. return ret;
  1373. }
  1374. static void cgroup_kill_sb(struct super_block *sb) {
  1375. struct cgroupfs_root *root = sb->s_fs_info;
  1376. struct cgroup *cgrp = &root->top_cgroup;
  1377. int ret;
  1378. struct cg_cgroup_link *link;
  1379. struct cg_cgroup_link *saved_link;
  1380. BUG_ON(!root);
  1381. BUG_ON(root->number_of_cgroups != 1);
  1382. BUG_ON(!list_empty(&cgrp->children));
  1383. BUG_ON(!list_empty(&cgrp->sibling));
  1384. mutex_lock(&cgroup_mutex);
  1385. /* Rebind all subsystems back to the default hierarchy */
  1386. ret = rebind_subsystems(root, 0);
  1387. /* Shouldn't be able to fail ... */
  1388. BUG_ON(ret);
  1389. /*
  1390. * Release all the links from css_sets to this hierarchy's
  1391. * root cgroup
  1392. */
  1393. write_lock(&css_set_lock);
  1394. list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
  1395. cgrp_link_list) {
  1396. list_del(&link->cg_link_list);
  1397. list_del(&link->cgrp_link_list);
  1398. kfree(link);
  1399. }
  1400. write_unlock(&css_set_lock);
  1401. if (!list_empty(&root->root_list)) {
  1402. list_del(&root->root_list);
  1403. root_count--;
  1404. }
  1405. mutex_unlock(&cgroup_mutex);
  1406. kill_litter_super(sb);
  1407. cgroup_drop_root(root);
  1408. }
  1409. static struct file_system_type cgroup_fs_type = {
  1410. .name = "cgroup",
  1411. .get_sb = cgroup_get_sb,
  1412. .kill_sb = cgroup_kill_sb,
  1413. };
  1414. static struct kobject *cgroup_kobj;
  1415. static inline struct cgroup *__d_cgrp(struct dentry *dentry)
  1416. {
  1417. return dentry->d_fsdata;
  1418. }
  1419. static inline struct cftype *__d_cft(struct dentry *dentry)
  1420. {
  1421. return dentry->d_fsdata;
  1422. }
  1423. /**
  1424. * cgroup_path - generate the path of a cgroup
  1425. * @cgrp: the cgroup in question
  1426. * @buf: the buffer to write the path into
  1427. * @buflen: the length of the buffer
  1428. *
  1429. * Called with cgroup_mutex held or else with an RCU-protected cgroup
  1430. * reference. Writes path of cgroup into buf. Returns 0 on success,
  1431. * -errno on error.
  1432. */
  1433. int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
  1434. {
  1435. char *start;
  1436. struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
  1437. rcu_read_lock_held() ||
  1438. cgroup_lock_is_held());
  1439. if (!dentry || cgrp == dummytop) {
  1440. /*
  1441. * Inactive subsystems have no dentry for their root
  1442. * cgroup
  1443. */
  1444. strcpy(buf, "/");
  1445. return 0;
  1446. }
  1447. start = buf + buflen;
  1448. *--start = '\0';
  1449. for (;;) {
  1450. int len = dentry->d_name.len;
  1451. if ((start -= len) < buf)
  1452. return -ENAMETOOLONG;
  1453. memcpy(start, dentry->d_name.name, len);
  1454. cgrp = cgrp->parent;
  1455. if (!cgrp)
  1456. break;
  1457. dentry = rcu_dereference_check(cgrp->dentry,
  1458. rcu_read_lock_held() ||
  1459. cgroup_lock_is_held());
  1460. if (!cgrp->parent)
  1461. continue;
  1462. if (--start < buf)
  1463. return -ENAMETOOLONG;
  1464. *start = '/';
  1465. }
  1466. memmove(buf, start, buf + buflen - start);
  1467. return 0;
  1468. }
  1469. EXPORT_SYMBOL_GPL(cgroup_path);
  1470. /**
  1471. * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
  1472. * @cgrp: the cgroup the task is attaching to
  1473. * @tsk: the task to be attached
  1474. *
  1475. * Call holding cgroup_mutex. May take task_lock of
  1476. * the task 'tsk' during call.
  1477. */
  1478. int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  1479. {
  1480. int retval = 0;
  1481. struct cgroup_subsys *ss, *failed_ss = NULL;
  1482. struct cgroup *oldcgrp;
  1483. struct css_set *cg;
  1484. struct css_set *newcg;
  1485. struct cgroupfs_root *root = cgrp->root;
  1486. /* Nothing to do if the task is already in that cgroup */
  1487. oldcgrp = task_cgroup_from_root(tsk, root);
  1488. if (cgrp == oldcgrp)
  1489. return 0;
  1490. for_each_subsys(root, ss) {
  1491. if (ss->can_attach) {
  1492. retval = ss->can_attach(ss, cgrp, tsk, false);
  1493. if (retval) {
  1494. /*
  1495. * Remember on which subsystem the can_attach()
  1496. * failed, so that we only call cancel_attach()
  1497. * against the subsystems whose can_attach()
  1498. * succeeded. (See below)
  1499. */
  1500. failed_ss = ss;
  1501. goto out;
  1502. }
  1503. }
  1504. }
  1505. task_lock(tsk);
  1506. cg = tsk->cgroups;
  1507. get_css_set(cg);
  1508. task_unlock(tsk);
  1509. /*
  1510. * Locate or allocate a new css_set for this task,
  1511. * based on its final set of cgroups
  1512. */
  1513. newcg = find_css_set(cg, cgrp);
  1514. put_css_set(cg);
  1515. if (!newcg) {
  1516. retval = -ENOMEM;
  1517. goto out;
  1518. }
  1519. task_lock(tsk);
  1520. if (tsk->flags & PF_EXITING) {
  1521. task_unlock(tsk);
  1522. put_css_set(newcg);
  1523. retval = -ESRCH;
  1524. goto out;
  1525. }
  1526. rcu_assign_pointer(tsk->cgroups, newcg);
  1527. task_unlock(tsk);
  1528. /* Update the css_set linked lists if we're using them */
  1529. write_lock(&css_set_lock);
  1530. if (!list_empty(&tsk->cg_list)) {
  1531. list_del(&tsk->cg_list);
  1532. list_add(&tsk->cg_list, &newcg->tasks);
  1533. }
  1534. write_unlock(&css_set_lock);
  1535. for_each_subsys(root, ss) {
  1536. if (ss->attach)
  1537. ss->attach(ss, cgrp, oldcgrp, tsk, false);
  1538. }
  1539. set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
  1540. synchronize_rcu();
  1541. put_css_set(cg);
  1542. /*
  1543. * wake up rmdir() waiter. the rmdir should fail since the cgroup
  1544. * is no longer empty.
  1545. */
  1546. cgroup_wakeup_rmdir_waiter(cgrp);
  1547. out:
  1548. if (retval) {
  1549. for_each_subsys(root, ss) {
  1550. if (ss == failed_ss)
  1551. /*
  1552. * This subsystem was the one that failed the
  1553. * can_attach() check earlier, so we don't need
  1554. * to call cancel_attach() against it or any
  1555. * remaining subsystems.
  1556. */
  1557. break;
  1558. if (ss->cancel_attach)
  1559. ss->cancel_attach(ss, cgrp, tsk, false);
  1560. }
  1561. }
  1562. return retval;
  1563. }
  1564. /**
  1565. * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  1566. * @from: attach to all cgroups of a given task
  1567. * @tsk: the task to be attached
  1568. */
  1569. int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  1570. {
  1571. struct cgroupfs_root *root;
  1572. int retval = 0;
  1573. cgroup_lock();
  1574. for_each_active_root(root) {
  1575. struct cgroup *from_cg = task_cgroup_from_root(from, root);
  1576. retval = cgroup_attach_task(from_cg, tsk);
  1577. if (retval)
  1578. break;
  1579. }
  1580. cgroup_unlock();
  1581. return retval;
  1582. }
  1583. EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  1584. /*
  1585. * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
  1586. * held. May take task_lock of task
  1587. */
  1588. static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
  1589. {
  1590. struct task_struct *tsk;
  1591. const struct cred *cred = current_cred(), *tcred;
  1592. int ret;
  1593. if (pid) {
  1594. rcu_read_lock();
  1595. tsk = find_task_by_vpid(pid);
  1596. if (!tsk || tsk->flags & PF_EXITING) {
  1597. rcu_read_unlock();
  1598. return -ESRCH;
  1599. }
  1600. tcred = __task_cred(tsk);
  1601. if (cred->euid &&
  1602. cred->euid != tcred->uid &&
  1603. cred->euid != tcred->suid) {
  1604. rcu_read_unlock();
  1605. return -EACCES;
  1606. }
  1607. get_task_struct(tsk);
  1608. rcu_read_unlock();
  1609. } else {
  1610. tsk = current;
  1611. get_task_struct(tsk);
  1612. }
  1613. ret = cgroup_attach_task(cgrp, tsk);
  1614. put_task_struct(tsk);
  1615. return ret;
  1616. }
  1617. static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
  1618. {
  1619. int ret;
  1620. if (!cgroup_lock_live_group(cgrp))
  1621. return -ENODEV;
  1622. ret = attach_task_by_pid(cgrp, pid);
  1623. cgroup_unlock();
  1624. return ret;
  1625. }
  1626. /**
  1627. * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
  1628. * @cgrp: the cgroup to be checked for liveness
  1629. *
  1630. * On success, returns true; the lock should be later released with
  1631. * cgroup_unlock(). On failure returns false with no lock held.
  1632. */
  1633. bool cgroup_lock_live_group(struct cgroup *cgrp)
  1634. {
  1635. mutex_lock(&cgroup_mutex);
  1636. if (cgroup_is_removed(cgrp)) {
  1637. mutex_unlock(&cgroup_mutex);
  1638. return false;
  1639. }
  1640. return true;
  1641. }
  1642. EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
  1643. static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
  1644. const char *buffer)
  1645. {
  1646. BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
  1647. if (!cgroup_lock_live_group(cgrp))
  1648. return -ENODEV;
  1649. strcpy(cgrp->root->release_agent_path, buffer);
  1650. cgroup_unlock();
  1651. return 0;
  1652. }
  1653. static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
  1654. struct seq_file *seq)
  1655. {
  1656. if (!cgroup_lock_live_group(cgrp))
  1657. return -ENODEV;
  1658. seq_puts(seq, cgrp->root->release_agent_path);
  1659. seq_putc(seq, '\n');
  1660. cgroup_unlock();
  1661. return 0;
  1662. }
  1663. /* A buffer size big enough for numbers or short strings */
  1664. #define CGROUP_LOCAL_BUFFER_SIZE 64
  1665. static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
  1666. struct file *file,
  1667. const char __user *userbuf,
  1668. size_t nbytes, loff_t *unused_ppos)
  1669. {
  1670. char buffer[CGROUP_LOCAL_BUFFER_SIZE];
  1671. int retval = 0;
  1672. char *end;
  1673. if (!nbytes)
  1674. return -EINVAL;
  1675. if (nbytes >= sizeof(buffer))
  1676. return -E2BIG;
  1677. if (copy_from_user(buffer, userbuf, nbytes))
  1678. return -EFAULT;
  1679. buffer[nbytes] = 0; /* nul-terminate */
  1680. if (cft->write_u64) {
  1681. u64 val = simple_strtoull(strstrip(buffer), &end, 0);
  1682. if (*end)
  1683. return -EINVAL;
  1684. retval = cft->write_u64(cgrp, cft, val);
  1685. } else {
  1686. s64 val = simple_strtoll(strstrip(buffer), &end, 0);
  1687. if (*end)
  1688. return -EINVAL;
  1689. retval = cft->write_s64(cgrp, cft, val);
  1690. }
  1691. if (!retval)
  1692. retval = nbytes;
  1693. return retval;
  1694. }
  1695. static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
  1696. struct file *file,
  1697. const char __user *userbuf,
  1698. size_t nbytes, loff_t *unused_ppos)
  1699. {
  1700. char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
  1701. int retval = 0;
  1702. size_t max_bytes = cft->max_write_len;
  1703. char *buffer = local_buffer;
  1704. if (!max_bytes)
  1705. max_bytes = sizeof(local_buffer) - 1;
  1706. if (nbytes >= max_bytes)
  1707. return -E2BIG;
  1708. /* Allocate a dynamic buffer if we need one */
  1709. if (nbytes >= sizeof(local_buffer)) {
  1710. buffer = kmalloc(nbytes + 1, GFP_KERNEL);
  1711. if (buffer == NULL)
  1712. return -ENOMEM;
  1713. }
  1714. if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
  1715. retval = -EFAULT;
  1716. goto out;
  1717. }
  1718. buffer[nbytes] = 0; /* nul-terminate */
  1719. retval = cft->write_string(cgrp, cft, strstrip(buffer));
  1720. if (!retval)
  1721. retval = nbytes;
  1722. out:
  1723. if (buffer != local_buffer)
  1724. kfree(buffer);
  1725. return retval;
  1726. }
  1727. static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
  1728. size_t nbytes, loff_t *ppos)
  1729. {
  1730. struct cftype *cft = __d_cft(file->f_dentry);
  1731. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  1732. if (cgroup_is_removed(cgrp))
  1733. return -ENODEV;
  1734. if (cft->write)
  1735. return cft->write(cgrp, cft, file, buf, nbytes, ppos);
  1736. if (cft->write_u64 || cft->write_s64)
  1737. return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
  1738. if (cft->write_string)
  1739. return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
  1740. if (cft->trigger) {
  1741. int ret = cft->trigger(cgrp, (unsigned int)cft->private);
  1742. return ret ? ret : nbytes;
  1743. }
  1744. return -EINVAL;
  1745. }
  1746. static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
  1747. struct file *file,
  1748. char __user *buf, size_t nbytes,
  1749. loff_t *ppos)
  1750. {
  1751. char tmp[CGROUP_LOCAL_BUFFER_SIZE];
  1752. u64 val = cft->read_u64(cgrp, cft);
  1753. int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
  1754. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  1755. }
  1756. static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
  1757. struct file *file,
  1758. char __user *buf, size_t nbytes,
  1759. loff_t *ppos)
  1760. {
  1761. char tmp[CGROUP_LOCAL_BUFFER_SIZE];
  1762. s64 val = cft->read_s64(cgrp, cft);
  1763. int len = sprintf(tmp, "%lld\n", (long long) val);
  1764. return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
  1765. }
  1766. static ssize_t cgroup_file_read(struct file *file, char __user *buf,
  1767. size_t nbytes, loff_t *ppos)
  1768. {
  1769. struct cftype *cft = __d_cft(file->f_dentry);
  1770. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  1771. if (cgroup_is_removed(cgrp))
  1772. return -ENODEV;
  1773. if (cft->read)
  1774. return cft->read(cgrp, cft, file, buf, nbytes, ppos);
  1775. if (cft->read_u64)
  1776. return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
  1777. if (cft->read_s64)
  1778. return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
  1779. return -EINVAL;
  1780. }
  1781. /*
  1782. * seqfile ops/methods for returning structured data. Currently just
  1783. * supports string->u64 maps, but can be extended in future.
  1784. */
  1785. struct cgroup_seqfile_state {
  1786. struct cftype *cft;
  1787. struct cgroup *cgroup;
  1788. };
  1789. static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
  1790. {
  1791. struct seq_file *sf = cb->state;
  1792. return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
  1793. }
  1794. static int cgroup_seqfile_show(struct seq_file *m, void *arg)
  1795. {
  1796. struct cgroup_seqfile_state *state = m->private;
  1797. struct cftype *cft = state->cft;
  1798. if (cft->read_map) {
  1799. struct cgroup_map_cb cb = {
  1800. .fill = cgroup_map_add,
  1801. .state = m,
  1802. };
  1803. return cft->read_map(state->cgroup, cft, &cb);
  1804. }
  1805. return cft->read_seq_string(state->cgroup, cft, m);
  1806. }
  1807. static int cgroup_seqfile_release(struct inode *inode, struct file *file)
  1808. {
  1809. struct seq_file *seq = file->private_data;
  1810. kfree(seq->private);
  1811. return single_release(inode, file);
  1812. }
  1813. static const struct file_operations cgroup_seqfile_operations = {
  1814. .read = seq_read,
  1815. .write = cgroup_file_write,
  1816. .llseek = seq_lseek,
  1817. .release = cgroup_seqfile_release,
  1818. };
  1819. static int cgroup_file_open(struct inode *inode, struct file *file)
  1820. {
  1821. int err;
  1822. struct cftype *cft;
  1823. err = generic_file_open(inode, file);
  1824. if (err)
  1825. return err;
  1826. cft = __d_cft(file->f_dentry);
  1827. if (cft->read_map || cft->read_seq_string) {
  1828. struct cgroup_seqfile_state *state =
  1829. kzalloc(sizeof(*state), GFP_USER);
  1830. if (!state)
  1831. return -ENOMEM;
  1832. state->cft = cft;
  1833. state->cgroup = __d_cgrp(file->f_dentry->d_parent);
  1834. file->f_op = &cgroup_seqfile_operations;
  1835. err = single_open(file, cgroup_seqfile_show, state);
  1836. if (err < 0)
  1837. kfree(state);
  1838. } else if (cft->open)
  1839. err = cft->open(inode, file);
  1840. else
  1841. err = 0;
  1842. return err;
  1843. }
  1844. static int cgroup_file_release(struct inode *inode, struct file *file)
  1845. {
  1846. struct cftype *cft = __d_cft(file->f_dentry);
  1847. if (cft->release)
  1848. return cft->release(inode, file);
  1849. return 0;
  1850. }
  1851. /*
  1852. * cgroup_rename - Only allow simple rename of directories in place.
  1853. */
  1854. static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
  1855. struct inode *new_dir, struct dentry *new_dentry)
  1856. {
  1857. if (!S_ISDIR(old_dentry->d_inode->i_mode))
  1858. return -ENOTDIR;
  1859. if (new_dentry->d_inode)
  1860. return -EEXIST;
  1861. if (old_dir != new_dir)
  1862. return -EIO;
  1863. return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
  1864. }
  1865. static const struct file_operations cgroup_file_operations = {
  1866. .read = cgroup_file_read,
  1867. .write = cgroup_file_write,
  1868. .llseek = generic_file_llseek,
  1869. .open = cgroup_file_open,
  1870. .release = cgroup_file_release,
  1871. };
  1872. static const struct inode_operations cgroup_dir_inode_operations = {
  1873. .lookup = simple_lookup,
  1874. .mkdir = cgroup_mkdir,
  1875. .rmdir = cgroup_rmdir,
  1876. .rename = cgroup_rename,
  1877. };
  1878. /*
  1879. * Check if a file is a control file
  1880. */
  1881. static inline struct cftype *__file_cft(struct file *file)
  1882. {
  1883. if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
  1884. return ERR_PTR(-EINVAL);
  1885. return __d_cft(file->f_dentry);
  1886. }
  1887. static int cgroup_create_file(struct dentry *dentry, mode_t mode,
  1888. struct super_block *sb)
  1889. {
  1890. static const struct dentry_operations cgroup_dops = {
  1891. .d_iput = cgroup_diput,
  1892. };
  1893. struct inode *inode;
  1894. if (!dentry)
  1895. return -ENOENT;
  1896. if (dentry->d_inode)
  1897. return -EEXIST;
  1898. inode = cgroup_new_inode(mode, sb);
  1899. if (!inode)
  1900. return -ENOMEM;
  1901. if (S_ISDIR(mode)) {
  1902. inode->i_op = &cgroup_dir_inode_operations;
  1903. inode->i_fop = &simple_dir_operations;
  1904. /* start off with i_nlink == 2 (for "." entry) */
  1905. inc_nlink(inode);
  1906. /* start with the directory inode held, so that we can
  1907. * populate it without racing with another mkdir */
  1908. mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
  1909. } else if (S_ISREG(mode)) {
  1910. inode->i_size = 0;
  1911. inode->i_fop = &cgroup_file_operations;
  1912. }
  1913. dentry->d_op = &cgroup_dops;
  1914. d_instantiate(dentry, inode);
  1915. dget(dentry); /* Extra count - pin the dentry in core */
  1916. return 0;
  1917. }
  1918. /*
  1919. * cgroup_create_dir - create a directory for an object.
  1920. * @cgrp: the cgroup we create the directory for. It must have a valid
  1921. * ->parent field. And we are going to fill its ->dentry field.
  1922. * @dentry: dentry of the new cgroup
  1923. * @mode: mode to set on new directory.
  1924. */
  1925. static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
  1926. mode_t mode)
  1927. {
  1928. struct dentry *parent;
  1929. int error = 0;
  1930. parent = cgrp->parent->dentry;
  1931. error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
  1932. if (!error) {
  1933. dentry->d_fsdata = cgrp;
  1934. inc_nlink(parent->d_inode);
  1935. rcu_assign_pointer(cgrp->dentry, dentry);
  1936. dget(dentry);
  1937. }
  1938. dput(dentry);
  1939. return error;
  1940. }
  1941. /**
  1942. * cgroup_file_mode - deduce file mode of a control file
  1943. * @cft: the control file in question
  1944. *
  1945. * returns cft->mode if ->mode is not 0
  1946. * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
  1947. * returns S_IRUGO if it has only a read handler
  1948. * returns S_IWUSR if it has only a write hander
  1949. */
  1950. static mode_t cgroup_file_mode(const struct cftype *cft)
  1951. {
  1952. mode_t mode = 0;
  1953. if (cft->mode)
  1954. return cft->mode;
  1955. if (cft->read || cft->read_u64 || cft->read_s64 ||
  1956. cft->read_map || cft->read_seq_string)
  1957. mode |= S_IRUGO;
  1958. if (cft->write || cft->write_u64 || cft->write_s64 ||
  1959. cft->write_string || cft->trigger)
  1960. mode |= S_IWUSR;
  1961. return mode;
  1962. }
  1963. int cgroup_add_file(struct cgroup *cgrp,
  1964. struct cgroup_subsys *subsys,
  1965. const struct cftype *cft)
  1966. {
  1967. struct dentry *dir = cgrp->dentry;
  1968. struct dentry *dentry;
  1969. int error;
  1970. mode_t mode;
  1971. char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
  1972. if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
  1973. strcpy(name, subsys->name);
  1974. strcat(name, ".");
  1975. }
  1976. strcat(name, cft->name);
  1977. BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
  1978. dentry = lookup_one_len(name, dir, strlen(name));
  1979. if (!IS_ERR(dentry)) {
  1980. mode = cgroup_file_mode(cft);
  1981. error = cgroup_create_file(dentry, mode | S_IFREG,
  1982. cgrp->root->sb);
  1983. if (!error)
  1984. dentry->d_fsdata = (void *)cft;
  1985. dput(dentry);
  1986. } else
  1987. error = PTR_ERR(dentry);
  1988. return error;
  1989. }
  1990. EXPORT_SYMBOL_GPL(cgroup_add_file);
  1991. int cgroup_add_files(struct cgroup *cgrp,
  1992. struct cgroup_subsys *subsys,
  1993. const struct cftype cft[],
  1994. int count)
  1995. {
  1996. int i, err;
  1997. for (i = 0; i < count; i++) {
  1998. err = cgroup_add_file(cgrp, subsys, &cft[i]);
  1999. if (err)
  2000. return err;
  2001. }
  2002. return 0;
  2003. }
  2004. EXPORT_SYMBOL_GPL(cgroup_add_files);
  2005. /**
  2006. * cgroup_task_count - count the number of tasks in a cgroup.
  2007. * @cgrp: the cgroup in question
  2008. *
  2009. * Return the number of tasks in the cgroup.
  2010. */
  2011. int cgroup_task_count(const struct cgroup *cgrp)
  2012. {
  2013. int count = 0;
  2014. struct cg_cgroup_link *link;
  2015. read_lock(&css_set_lock);
  2016. list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
  2017. count += atomic_read(&link->cg->refcount);
  2018. }
  2019. read_unlock(&css_set_lock);
  2020. return count;
  2021. }
  2022. /*
  2023. * Advance a list_head iterator. The iterator should be positioned at
  2024. * the start of a css_set
  2025. */
  2026. static void cgroup_advance_iter(struct cgroup *cgrp,
  2027. struct cgroup_iter *it)
  2028. {
  2029. struct list_head *l = it->cg_link;
  2030. struct cg_cgroup_link *link;
  2031. struct css_set *cg;
  2032. /* Advance to the next non-empty css_set */
  2033. do {
  2034. l = l->next;
  2035. if (l == &cgrp->css_sets) {
  2036. it->cg_link = NULL;
  2037. return;
  2038. }
  2039. link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
  2040. cg = link->cg;
  2041. } while (list_empty(&cg->tasks));
  2042. it->cg_link = l;
  2043. it->task = cg->tasks.next;
  2044. }
  2045. /*
  2046. * To reduce the fork() overhead for systems that are not actually
  2047. * using their cgroups capability, we don't maintain the lists running
  2048. * through each css_set to its tasks until we see the list actually
  2049. * used - in other words after the first call to cgroup_iter_start().
  2050. *
  2051. * The tasklist_lock is not held here, as do_each_thread() and
  2052. * while_each_thread() are protected by RCU.
  2053. */
  2054. static void cgroup_enable_task_cg_lists(void)
  2055. {
  2056. struct task_struct *p, *g;
  2057. write_lock(&css_set_lock);
  2058. use_task_css_set_links = 1;
  2059. do_each_thread(g, p) {
  2060. task_lock(p);
  2061. /*
  2062. * We should check if the process is exiting, otherwise
  2063. * it will race with cgroup_exit() in that the list
  2064. * entry won't be deleted though the process has exited.
  2065. */
  2066. if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
  2067. list_add(&p->cg_list, &p->cgroups->tasks);
  2068. task_unlock(p);
  2069. } while_each_thread(g, p);
  2070. write_unlock(&css_set_lock);
  2071. }
  2072. void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
  2073. {
  2074. /*
  2075. * The first time anyone tries to iterate across a cgroup,
  2076. * we need to enable the list linking each css_set to its
  2077. * tasks, and fix up all existing tasks.
  2078. */
  2079. if (!use_task_css_set_links)
  2080. cgroup_enable_task_cg_lists();
  2081. read_lock(&css_set_lock);
  2082. it->cg_link = &cgrp->css_sets;
  2083. cgroup_advance_iter(cgrp, it);
  2084. }
  2085. struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
  2086. struct cgroup_iter *it)
  2087. {
  2088. struct task_struct *res;
  2089. struct list_head *l = it->task;
  2090. struct cg_cgroup_link *link;
  2091. /* If the iterator cg is NULL, we have no tasks */
  2092. if (!it->cg_link)
  2093. return NULL;
  2094. res = list_entry(l, struct task_struct, cg_list);
  2095. /* Advance iterator to find next entry */
  2096. l = l->next;
  2097. link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
  2098. if (l == &link->cg->tasks) {
  2099. /* We reached the end of this task list - move on to
  2100. * the next cg_cgroup_link */
  2101. cgroup_advance_iter(cgrp, it);
  2102. } else {
  2103. it->task = l;
  2104. }
  2105. return res;
  2106. }
  2107. void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
  2108. {
  2109. read_unlock(&css_set_lock);
  2110. }
  2111. static inline int started_after_time(struct task_struct *t1,
  2112. struct timespec *time,
  2113. struct task_struct *t2)
  2114. {
  2115. int start_diff = timespec_compare(&t1->start_time, time);
  2116. if (start_diff > 0) {
  2117. return 1;
  2118. } else if (start_diff < 0) {
  2119. return 0;
  2120. } else {
  2121. /*
  2122. * Arbitrarily, if two processes started at the same
  2123. * time, we'll say that the lower pointer value
  2124. * started first. Note that t2 may have exited by now
  2125. * so this may not be a valid pointer any longer, but
  2126. * that's fine - it still serves to distinguish
  2127. * between two tasks started (effectively) simultaneously.
  2128. */
  2129. return t1 > t2;
  2130. }
  2131. }
  2132. /*
  2133. * This function is a callback from heap_insert() and is used to order
  2134. * the heap.
  2135. * In this case we order the heap in descending task start time.
  2136. */
  2137. static inline int started_after(void *p1, void *p2)
  2138. {
  2139. struct task_struct *t1 = p1;
  2140. struct task_struct *t2 = p2;
  2141. return started_after_time(t1, &t2->start_time, t2);
  2142. }
  2143. /**
  2144. * cgroup_scan_tasks - iterate though all the tasks in a cgroup
  2145. * @scan: struct cgroup_scanner containing arguments for the scan
  2146. *
  2147. * Arguments include pointers to callback functions test_task() and
  2148. * process_task().
  2149. * Iterate through all the tasks in a cgroup, calling test_task() for each,
  2150. * and if it returns true, call process_task() for it also.
  2151. * The test_task pointer may be NULL, meaning always true (select all tasks).
  2152. * Effectively duplicates cgroup_iter_{start,next,end}()
  2153. * but does not lock css_set_lock for the call to process_task().
  2154. * The struct cgroup_scanner may be embedded in any structure of the caller's
  2155. * creation.
  2156. * It is guaranteed that process_task() will act on every task that
  2157. * is a member of the cgroup for the duration of this call. This
  2158. * function may or may not call process_task() for tasks that exit
  2159. * or move to a different cgroup during the call, or are forked or
  2160. * move into the cgroup during the call.
  2161. *
  2162. * Note that test_task() may be called with locks held, and may in some
  2163. * situations be called multiple times for the same task, so it should
  2164. * be cheap.
  2165. * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
  2166. * pre-allocated and will be used for heap operations (and its "gt" member will
  2167. * be overwritten), else a temporary heap will be used (allocation of which
  2168. * may cause this function to fail).
  2169. */
  2170. int cgroup_scan_tasks(struct cgroup_scanner *scan)
  2171. {
  2172. int retval, i;
  2173. struct cgroup_iter it;
  2174. struct task_struct *p, *dropped;
  2175. /* Never dereference latest_task, since it's not refcounted */
  2176. struct task_struct *latest_task = NULL;
  2177. struct ptr_heap tmp_heap;
  2178. struct ptr_heap *heap;
  2179. struct timespec latest_time = { 0, 0 };
  2180. if (scan->heap) {
  2181. /* The caller supplied our heap and pre-allocated its memory */
  2182. heap = scan->heap;
  2183. heap->gt = &started_after;
  2184. } else {
  2185. /* We need to allocate our own heap memory */
  2186. heap = &tmp_heap;
  2187. retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
  2188. if (retval)
  2189. /* cannot allocate the heap */
  2190. return retval;
  2191. }
  2192. again:
  2193. /*
  2194. * Scan tasks in the cgroup, using the scanner's "test_task" callback
  2195. * to determine which are of interest, and using the scanner's
  2196. * "process_task" callback to process any of them that need an update.
  2197. * Since we don't want to hold any locks during the task updates,
  2198. * gather tasks to be processed in a heap structure.
  2199. * The heap is sorted by descending task start time.
  2200. * If the statically-sized heap fills up, we overflow tasks that
  2201. * started later, and in future iterations only consider tasks that
  2202. * started after the latest task in the previous pass. This
  2203. * guarantees forward progress and that we don't miss any tasks.
  2204. */
  2205. heap->size = 0;
  2206. cgroup_iter_start(scan->cg, &it);
  2207. while ((p = cgroup_iter_next(scan->cg, &it))) {
  2208. /*
  2209. * Only affect tasks that qualify per the caller's callback,
  2210. * if he provided one
  2211. */
  2212. if (scan->test_task && !scan->test_task(p, scan))
  2213. continue;
  2214. /*
  2215. * Only process tasks that started after the last task
  2216. * we processed
  2217. */
  2218. if (!started_after_time(p, &latest_time, latest_task))
  2219. continue;
  2220. dropped = heap_insert(heap, p);
  2221. if (dropped == NULL) {
  2222. /*
  2223. * The new task was inserted; the heap wasn't
  2224. * previously full
  2225. */
  2226. get_task_struct(p);
  2227. } else if (dropped != p) {
  2228. /*
  2229. * The new task was inserted, and pushed out a
  2230. * different task
  2231. */
  2232. get_task_struct(p);
  2233. put_task_struct(dropped);
  2234. }
  2235. /*
  2236. * Else the new task was newer than anything already in
  2237. * the heap and wasn't inserted
  2238. */
  2239. }
  2240. cgroup_iter_end(scan->cg, &it);
  2241. if (heap->size) {
  2242. for (i = 0; i < heap->size; i++) {
  2243. struct task_struct *q = heap->ptrs[i];
  2244. if (i == 0) {
  2245. latest_time = q->start_time;
  2246. latest_task = q;
  2247. }
  2248. /* Process the task per the caller's callback */
  2249. scan->process_task(q, scan);
  2250. put_task_struct(q);
  2251. }
  2252. /*
  2253. * If we had to process any tasks at all, scan again
  2254. * in case some of them were in the middle of forking
  2255. * children that didn't get processed.
  2256. * Not the most efficient way to do it, but it avoids
  2257. * having to take callback_mutex in the fork path
  2258. */
  2259. goto again;
  2260. }
  2261. if (heap == &tmp_heap)
  2262. heap_free(&tmp_heap);
  2263. return 0;
  2264. }
  2265. /*
  2266. * Stuff for reading the 'tasks'/'procs' files.
  2267. *
  2268. * Reading this file can return large amounts of data if a cgroup has
  2269. * *lots* of attached tasks. So it may need several calls to read(),
  2270. * but we cannot guarantee that the information we produce is correct
  2271. * unless we produce it entirely atomically.
  2272. *
  2273. */
  2274. /*
  2275. * The following two functions "fix" the issue where there are more pids
  2276. * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
  2277. * TODO: replace with a kernel-wide solution to this problem
  2278. */
  2279. #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
  2280. static void *pidlist_allocate(int count)
  2281. {
  2282. if (PIDLIST_TOO_LARGE(count))
  2283. return vmalloc(count * sizeof(pid_t));
  2284. else
  2285. return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
  2286. }
  2287. static void pidlist_free(void *p)
  2288. {
  2289. if (is_vmalloc_addr(p))
  2290. vfree(p);
  2291. else
  2292. kfree(p);
  2293. }
  2294. static void *pidlist_resize(void *p, int newcount)
  2295. {
  2296. void *newlist;
  2297. /* note: if new alloc fails, old p will still be valid either way */
  2298. if (is_vmalloc_addr(p)) {
  2299. newlist = vmalloc(newcount * sizeof(pid_t));
  2300. if (!newlist)
  2301. return NULL;
  2302. memcpy(newlist, p, newcount * sizeof(pid_t));
  2303. vfree(p);
  2304. } else {
  2305. newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
  2306. }
  2307. return newlist;
  2308. }
  2309. /*
  2310. * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
  2311. * If the new stripped list is sufficiently smaller and there's enough memory
  2312. * to allocate a new buffer, will let go of the unneeded memory. Returns the
  2313. * number of unique elements.
  2314. */
  2315. /* is the size difference enough that we should re-allocate the array? */
  2316. #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
  2317. static int pidlist_uniq(pid_t **p, int length)
  2318. {
  2319. int src, dest = 1;
  2320. pid_t *list = *p;
  2321. pid_t *newlist;
  2322. /*
  2323. * we presume the 0th element is unique, so i starts at 1. trivial
  2324. * edge cases first; no work needs to be done for either
  2325. */
  2326. if (length == 0 || length == 1)
  2327. return length;
  2328. /* src and dest walk down the list; dest counts unique elements */
  2329. for (src = 1; src < length; src++) {
  2330. /* find next unique element */
  2331. while (list[src] == list[src-1]) {
  2332. src++;
  2333. if (src == length)
  2334. goto after;
  2335. }
  2336. /* dest always points to where the next unique element goes */
  2337. list[dest] = list[src];
  2338. dest++;
  2339. }
  2340. after:
  2341. /*
  2342. * if the length difference is large enough, we want to allocate a
  2343. * smaller buffer to save memory. if this fails due to out of memory,
  2344. * we'll just stay with what we've got.
  2345. */
  2346. if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
  2347. newlist = pidlist_resize(list, dest);
  2348. if (newlist)
  2349. *p = newlist;
  2350. }
  2351. return dest;
  2352. }
  2353. static int cmppid(const void *a, const void *b)
  2354. {
  2355. return *(pid_t *)a - *(pid_t *)b;
  2356. }
  2357. /*
  2358. * find the appropriate pidlist for our purpose (given procs vs tasks)
  2359. * returns with the lock on that pidlist already held, and takes care
  2360. * of the use count, or returns NULL with no locks held if we're out of
  2361. * memory.
  2362. */
  2363. static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
  2364. enum cgroup_filetype type)
  2365. {
  2366. struct cgroup_pidlist *l;
  2367. /* don't need task_nsproxy() if we're looking at ourself */
  2368. struct pid_namespace *ns = current->nsproxy->pid_ns;
  2369. /*
  2370. * We can't drop the pidlist_mutex before taking the l->mutex in case
  2371. * the last ref-holder is trying to remove l from the list at the same
  2372. * time. Holding the pidlist_mutex precludes somebody taking whichever
  2373. * list we find out from under us - compare release_pid_array().
  2374. */
  2375. mutex_lock(&cgrp->pidlist_mutex);
  2376. list_for_each_entry(l, &cgrp->pidlists, links) {
  2377. if (l->key.type == type && l->key.ns == ns) {
  2378. /* make sure l doesn't vanish out from under us */
  2379. down_write(&l->mutex);
  2380. mutex_unlock(&cgrp->pidlist_mutex);
  2381. return l;
  2382. }
  2383. }
  2384. /* entry not found; create a new one */
  2385. l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
  2386. if (!l) {
  2387. mutex_unlock(&cgrp->pidlist_mutex);
  2388. return l;
  2389. }
  2390. init_rwsem(&l->mutex);
  2391. down_write(&l->mutex);
  2392. l->key.type = type;
  2393. l->key.ns = get_pid_ns(ns);
  2394. l->use_count = 0; /* don't increment here */
  2395. l->list = NULL;
  2396. l->owner = cgrp;
  2397. list_add(&l->links, &cgrp->pidlists);
  2398. mutex_unlock(&cgrp->pidlist_mutex);
  2399. return l;
  2400. }
  2401. /*
  2402. * Load a cgroup's pidarray with either procs' tgids or tasks' pids
  2403. */
  2404. static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
  2405. struct cgroup_pidlist **lp)
  2406. {
  2407. pid_t *array;
  2408. int length;
  2409. int pid, n = 0; /* used for populating the array */
  2410. struct cgroup_iter it;
  2411. struct task_struct *tsk;
  2412. struct cgroup_pidlist *l;
  2413. /*
  2414. * If cgroup gets more users after we read count, we won't have
  2415. * enough space - tough. This race is indistinguishable to the
  2416. * caller from the case that the additional cgroup users didn't
  2417. * show up until sometime later on.
  2418. */
  2419. length = cgroup_task_count(cgrp);
  2420. array = pidlist_allocate(length);
  2421. if (!array)
  2422. return -ENOMEM;
  2423. /* now, populate the array */
  2424. cgroup_iter_start(cgrp, &it);
  2425. while ((tsk = cgroup_iter_next(cgrp, &it))) {
  2426. if (unlikely(n == length))
  2427. break;
  2428. /* get tgid or pid for procs or tasks file respectively */
  2429. if (type == CGROUP_FILE_PROCS)
  2430. pid = task_tgid_vnr(tsk);
  2431. else
  2432. pid = task_pid_vnr(tsk);
  2433. if (pid > 0) /* make sure to only use valid results */
  2434. array[n++] = pid;
  2435. }
  2436. cgroup_iter_end(cgrp, &it);
  2437. length = n;
  2438. /* now sort & (if procs) strip out duplicates */
  2439. sort(array, length, sizeof(pid_t), cmppid, NULL);
  2440. if (type == CGROUP_FILE_PROCS)
  2441. length = pidlist_uniq(&array, length);
  2442. l = cgroup_pidlist_find(cgrp, type);
  2443. if (!l) {
  2444. pidlist_free(array);
  2445. return -ENOMEM;
  2446. }
  2447. /* store array, freeing old if necessary - lock already held */
  2448. pidlist_free(l->list);
  2449. l->list = array;
  2450. l->length = length;
  2451. l->use_count++;
  2452. up_write(&l->mutex);
  2453. *lp = l;
  2454. return 0;
  2455. }
  2456. /**
  2457. * cgroupstats_build - build and fill cgroupstats
  2458. * @stats: cgroupstats to fill information into
  2459. * @dentry: A dentry entry belonging to the cgroup for which stats have
  2460. * been requested.
  2461. *
  2462. * Build and fill cgroupstats so that taskstats can export it to user
  2463. * space.
  2464. */
  2465. int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
  2466. {
  2467. int ret = -EINVAL;
  2468. struct cgroup *cgrp;
  2469. struct cgroup_iter it;
  2470. struct task_struct *tsk;
  2471. /*
  2472. * Validate dentry by checking the superblock operations,
  2473. * and make sure it's a directory.
  2474. */
  2475. if (dentry->d_sb->s_op != &cgroup_ops ||
  2476. !S_ISDIR(dentry->d_inode->i_mode))
  2477. goto err;
  2478. ret = 0;
  2479. cgrp = dentry->d_fsdata;
  2480. cgroup_iter_start(cgrp, &it);
  2481. while ((tsk = cgroup_iter_next(cgrp, &it))) {
  2482. switch (tsk->state) {
  2483. case TASK_RUNNING:
  2484. stats->nr_running++;
  2485. break;
  2486. case TASK_INTERRUPTIBLE:
  2487. stats->nr_sleeping++;
  2488. break;
  2489. case TASK_UNINTERRUPTIBLE:
  2490. stats->nr_uninterruptible++;
  2491. break;
  2492. case TASK_STOPPED:
  2493. stats->nr_stopped++;
  2494. break;
  2495. default:
  2496. if (delayacct_is_task_waiting_on_io(tsk))
  2497. stats->nr_io_wait++;
  2498. break;
  2499. }
  2500. }
  2501. cgroup_iter_end(cgrp, &it);
  2502. err:
  2503. return ret;
  2504. }
  2505. /*
  2506. * seq_file methods for the tasks/procs files. The seq_file position is the
  2507. * next pid to display; the seq_file iterator is a pointer to the pid
  2508. * in the cgroup->l->list array.
  2509. */
  2510. static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
  2511. {
  2512. /*
  2513. * Initially we receive a position value that corresponds to
  2514. * one more than the last pid shown (or 0 on the first call or
  2515. * after a seek to the start). Use a binary-search to find the
  2516. * next pid to display, if any
  2517. */
  2518. struct cgroup_pidlist *l = s->private;
  2519. int index = 0, pid = *pos;
  2520. int *iter;
  2521. down_read(&l->mutex);
  2522. if (pid) {
  2523. int end = l->length;
  2524. while (index < end) {
  2525. int mid = (index + end) / 2;
  2526. if (l->list[mid] == pid) {
  2527. index = mid;
  2528. break;
  2529. } else if (l->list[mid] <= pid)
  2530. index = mid + 1;
  2531. else
  2532. end = mid;
  2533. }
  2534. }
  2535. /* If we're off the end of the array, we're done */
  2536. if (index >= l->length)
  2537. return NULL;
  2538. /* Update the abstract position to be the actual pid that we found */
  2539. iter = l->list + index;
  2540. *pos = *iter;
  2541. return iter;
  2542. }
  2543. static void cgroup_pidlist_stop(struct seq_file *s, void *v)
  2544. {
  2545. struct cgroup_pidlist *l = s->private;
  2546. up_read(&l->mutex);
  2547. }
  2548. static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
  2549. {
  2550. struct cgroup_pidlist *l = s->private;
  2551. pid_t *p = v;
  2552. pid_t *end = l->list + l->length;
  2553. /*
  2554. * Advance to the next pid in the array. If this goes off the
  2555. * end, we're done
  2556. */
  2557. p++;
  2558. if (p >= end) {
  2559. return NULL;
  2560. } else {
  2561. *pos = *p;
  2562. return p;
  2563. }
  2564. }
  2565. static int cgroup_pidlist_show(struct seq_file *s, void *v)
  2566. {
  2567. return seq_printf(s, "%d\n", *(int *)v);
  2568. }
  2569. /*
  2570. * seq_operations functions for iterating on pidlists through seq_file -
  2571. * independent of whether it's tasks or procs
  2572. */
  2573. static const struct seq_operations cgroup_pidlist_seq_operations = {
  2574. .start = cgroup_pidlist_start,
  2575. .stop = cgroup_pidlist_stop,
  2576. .next = cgroup_pidlist_next,
  2577. .show = cgroup_pidlist_show,
  2578. };
  2579. static void cgroup_release_pid_array(struct cgroup_pidlist *l)
  2580. {
  2581. /*
  2582. * the case where we're the last user of this particular pidlist will
  2583. * have us remove it from the cgroup's list, which entails taking the
  2584. * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
  2585. * pidlist_mutex, we have to take pidlist_mutex first.
  2586. */
  2587. mutex_lock(&l->owner->pidlist_mutex);
  2588. down_write(&l->mutex);
  2589. BUG_ON(!l->use_count);
  2590. if (!--l->use_count) {
  2591. /* we're the last user if refcount is 0; remove and free */
  2592. list_del(&l->links);
  2593. mutex_unlock(&l->owner->pidlist_mutex);
  2594. pidlist_free(l->list);
  2595. put_pid_ns(l->key.ns);
  2596. up_write(&l->mutex);
  2597. kfree(l);
  2598. return;
  2599. }
  2600. mutex_unlock(&l->owner->pidlist_mutex);
  2601. up_write(&l->mutex);
  2602. }
  2603. static int cgroup_pidlist_release(struct inode *inode, struct file *file)
  2604. {
  2605. struct cgroup_pidlist *l;
  2606. if (!(file->f_mode & FMODE_READ))
  2607. return 0;
  2608. /*
  2609. * the seq_file will only be initialized if the file was opened for
  2610. * reading; hence we check if it's not null only in that case.
  2611. */
  2612. l = ((struct seq_file *)file->private_data)->private;
  2613. cgroup_release_pid_array(l);
  2614. return seq_release(inode, file);
  2615. }
  2616. static const struct file_operations cgroup_pidlist_operations = {
  2617. .read = seq_read,
  2618. .llseek = seq_lseek,
  2619. .write = cgroup_file_write,
  2620. .release = cgroup_pidlist_release,
  2621. };
  2622. /*
  2623. * The following functions handle opens on a file that displays a pidlist
  2624. * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
  2625. * in the cgroup.
  2626. */
  2627. /* helper function for the two below it */
  2628. static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
  2629. {
  2630. struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
  2631. struct cgroup_pidlist *l;
  2632. int retval;
  2633. /* Nothing to do for write-only files */
  2634. if (!(file->f_mode & FMODE_READ))
  2635. return 0;
  2636. /* have the array populated */
  2637. retval = pidlist_array_load(cgrp, type, &l);
  2638. if (retval)
  2639. return retval;
  2640. /* configure file information */
  2641. file->f_op = &cgroup_pidlist_operations;
  2642. retval = seq_open(file, &cgroup_pidlist_seq_operations);
  2643. if (retval) {
  2644. cgroup_release_pid_array(l);
  2645. return retval;
  2646. }
  2647. ((struct seq_file *)file->private_data)->private = l;
  2648. return 0;
  2649. }
  2650. static int cgroup_tasks_open(struct inode *unused, struct file *file)
  2651. {
  2652. return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
  2653. }
  2654. static int cgroup_procs_open(struct inode *unused, struct file *file)
  2655. {
  2656. return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
  2657. }
  2658. static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
  2659. struct cftype *cft)
  2660. {
  2661. return notify_on_release(cgrp);
  2662. }
  2663. static int cgroup_write_notify_on_release(struct cgroup *cgrp,
  2664. struct cftype *cft,
  2665. u64 val)
  2666. {
  2667. clear_bit(CGRP_RELEASABLE, &cgrp->flags);
  2668. if (val)
  2669. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2670. else
  2671. clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2672. return 0;
  2673. }
  2674. /*
  2675. * Unregister event and free resources.
  2676. *
  2677. * Gets called from workqueue.
  2678. */
  2679. static void cgroup_event_remove(struct work_struct *work)
  2680. {
  2681. struct cgroup_event *event = container_of(work, struct cgroup_event,
  2682. remove);
  2683. struct cgroup *cgrp = event->cgrp;
  2684. event->cft->unregister_event(cgrp, event->cft, event->eventfd);
  2685. eventfd_ctx_put(event->eventfd);
  2686. kfree(event);
  2687. dput(cgrp->dentry);
  2688. }
  2689. /*
  2690. * Gets called on POLLHUP on eventfd when user closes it.
  2691. *
  2692. * Called with wqh->lock held and interrupts disabled.
  2693. */
  2694. static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
  2695. int sync, void *key)
  2696. {
  2697. struct cgroup_event *event = container_of(wait,
  2698. struct cgroup_event, wait);
  2699. struct cgroup *cgrp = event->cgrp;
  2700. unsigned long flags = (unsigned long)key;
  2701. if (flags & POLLHUP) {
  2702. __remove_wait_queue(event->wqh, &event->wait);
  2703. spin_lock(&cgrp->event_list_lock);
  2704. list_del(&event->list);
  2705. spin_unlock(&cgrp->event_list_lock);
  2706. /*
  2707. * We are in atomic context, but cgroup_event_remove() may
  2708. * sleep, so we have to call it in workqueue.
  2709. */
  2710. schedule_work(&event->remove);
  2711. }
  2712. return 0;
  2713. }
  2714. static void cgroup_event_ptable_queue_proc(struct file *file,
  2715. wait_queue_head_t *wqh, poll_table *pt)
  2716. {
  2717. struct cgroup_event *event = container_of(pt,
  2718. struct cgroup_event, pt);
  2719. event->wqh = wqh;
  2720. add_wait_queue(wqh, &event->wait);
  2721. }
  2722. /*
  2723. * Parse input and register new cgroup event handler.
  2724. *
  2725. * Input must be in format '<event_fd> <control_fd> <args>'.
  2726. * Interpretation of args is defined by control file implementation.
  2727. */
  2728. static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
  2729. const char *buffer)
  2730. {
  2731. struct cgroup_event *event = NULL;
  2732. unsigned int efd, cfd;
  2733. struct file *efile = NULL;
  2734. struct file *cfile = NULL;
  2735. char *endp;
  2736. int ret;
  2737. efd = simple_strtoul(buffer, &endp, 10);
  2738. if (*endp != ' ')
  2739. return -EINVAL;
  2740. buffer = endp + 1;
  2741. cfd = simple_strtoul(buffer, &endp, 10);
  2742. if ((*endp != ' ') && (*endp != '\0'))
  2743. return -EINVAL;
  2744. buffer = endp + 1;
  2745. event = kzalloc(sizeof(*event), GFP_KERNEL);
  2746. if (!event)
  2747. return -ENOMEM;
  2748. event->cgrp = cgrp;
  2749. INIT_LIST_HEAD(&event->list);
  2750. init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
  2751. init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
  2752. INIT_WORK(&event->remove, cgroup_event_remove);
  2753. efile = eventfd_fget(efd);
  2754. if (IS_ERR(efile)) {
  2755. ret = PTR_ERR(efile);
  2756. goto fail;
  2757. }
  2758. event->eventfd = eventfd_ctx_fileget(efile);
  2759. if (IS_ERR(event->eventfd)) {
  2760. ret = PTR_ERR(event->eventfd);
  2761. goto fail;
  2762. }
  2763. cfile = fget(cfd);
  2764. if (!cfile) {
  2765. ret = -EBADF;
  2766. goto fail;
  2767. }
  2768. /* the process need read permission on control file */
  2769. ret = file_permission(cfile, MAY_READ);
  2770. if (ret < 0)
  2771. goto fail;
  2772. event->cft = __file_cft(cfile);
  2773. if (IS_ERR(event->cft)) {
  2774. ret = PTR_ERR(event->cft);
  2775. goto fail;
  2776. }
  2777. if (!event->cft->register_event || !event->cft->unregister_event) {
  2778. ret = -EINVAL;
  2779. goto fail;
  2780. }
  2781. ret = event->cft->register_event(cgrp, event->cft,
  2782. event->eventfd, buffer);
  2783. if (ret)
  2784. goto fail;
  2785. if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
  2786. event->cft->unregister_event(cgrp, event->cft, event->eventfd);
  2787. ret = 0;
  2788. goto fail;
  2789. }
  2790. /*
  2791. * Events should be removed after rmdir of cgroup directory, but before
  2792. * destroying subsystem state objects. Let's take reference to cgroup
  2793. * directory dentry to do that.
  2794. */
  2795. dget(cgrp->dentry);
  2796. spin_lock(&cgrp->event_list_lock);
  2797. list_add(&event->list, &cgrp->event_list);
  2798. spin_unlock(&cgrp->event_list_lock);
  2799. fput(cfile);
  2800. fput(efile);
  2801. return 0;
  2802. fail:
  2803. if (cfile)
  2804. fput(cfile);
  2805. if (event && event->eventfd && !IS_ERR(event->eventfd))
  2806. eventfd_ctx_put(event->eventfd);
  2807. if (!IS_ERR_OR_NULL(efile))
  2808. fput(efile);
  2809. kfree(event);
  2810. return ret;
  2811. }
  2812. /*
  2813. * for the common functions, 'private' gives the type of file
  2814. */
  2815. /* for hysterical raisins, we can't put this on the older files */
  2816. #define CGROUP_FILE_GENERIC_PREFIX "cgroup."
  2817. static struct cftype files[] = {
  2818. {
  2819. .name = "tasks",
  2820. .open = cgroup_tasks_open,
  2821. .write_u64 = cgroup_tasks_write,
  2822. .release = cgroup_pidlist_release,
  2823. .mode = S_IRUGO | S_IWUSR,
  2824. },
  2825. {
  2826. .name = CGROUP_FILE_GENERIC_PREFIX "procs",
  2827. .open = cgroup_procs_open,
  2828. /* .write_u64 = cgroup_procs_write, TODO */
  2829. .release = cgroup_pidlist_release,
  2830. .mode = S_IRUGO,
  2831. },
  2832. {
  2833. .name = "notify_on_release",
  2834. .read_u64 = cgroup_read_notify_on_release,
  2835. .write_u64 = cgroup_write_notify_on_release,
  2836. },
  2837. {
  2838. .name = CGROUP_FILE_GENERIC_PREFIX "event_control",
  2839. .write_string = cgroup_write_event_control,
  2840. .mode = S_IWUGO,
  2841. },
  2842. };
  2843. static struct cftype cft_release_agent = {
  2844. .name = "release_agent",
  2845. .read_seq_string = cgroup_release_agent_show,
  2846. .write_string = cgroup_release_agent_write,
  2847. .max_write_len = PATH_MAX,
  2848. };
  2849. static int cgroup_populate_dir(struct cgroup *cgrp)
  2850. {
  2851. int err;
  2852. struct cgroup_subsys *ss;
  2853. /* First clear out any existing files */
  2854. cgroup_clear_directory(cgrp->dentry);
  2855. err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
  2856. if (err < 0)
  2857. return err;
  2858. if (cgrp == cgrp->top_cgroup) {
  2859. if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
  2860. return err;
  2861. }
  2862. for_each_subsys(cgrp->root, ss) {
  2863. if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
  2864. return err;
  2865. }
  2866. /* This cgroup is ready now */
  2867. for_each_subsys(cgrp->root, ss) {
  2868. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  2869. /*
  2870. * Update id->css pointer and make this css visible from
  2871. * CSS ID functions. This pointer will be dereferened
  2872. * from RCU-read-side without locks.
  2873. */
  2874. if (css->id)
  2875. rcu_assign_pointer(css->id->css, css);
  2876. }
  2877. return 0;
  2878. }
  2879. static void init_cgroup_css(struct cgroup_subsys_state *css,
  2880. struct cgroup_subsys *ss,
  2881. struct cgroup *cgrp)
  2882. {
  2883. css->cgroup = cgrp;
  2884. atomic_set(&css->refcnt, 1);
  2885. css->flags = 0;
  2886. css->id = NULL;
  2887. if (cgrp == dummytop)
  2888. set_bit(CSS_ROOT, &css->flags);
  2889. BUG_ON(cgrp->subsys[ss->subsys_id]);
  2890. cgrp->subsys[ss->subsys_id] = css;
  2891. }
  2892. static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
  2893. {
  2894. /* We need to take each hierarchy_mutex in a consistent order */
  2895. int i;
  2896. /*
  2897. * No worry about a race with rebind_subsystems that might mess up the
  2898. * locking order, since both parties are under cgroup_mutex.
  2899. */
  2900. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  2901. struct cgroup_subsys *ss = subsys[i];
  2902. if (ss == NULL)
  2903. continue;
  2904. if (ss->root == root)
  2905. mutex_lock(&ss->hierarchy_mutex);
  2906. }
  2907. }
  2908. static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
  2909. {
  2910. int i;
  2911. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  2912. struct cgroup_subsys *ss = subsys[i];
  2913. if (ss == NULL)
  2914. continue;
  2915. if (ss->root == root)
  2916. mutex_unlock(&ss->hierarchy_mutex);
  2917. }
  2918. }
  2919. /*
  2920. * cgroup_create - create a cgroup
  2921. * @parent: cgroup that will be parent of the new cgroup
  2922. * @dentry: dentry of the new cgroup
  2923. * @mode: mode to set on new inode
  2924. *
  2925. * Must be called with the mutex on the parent inode held
  2926. */
  2927. static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
  2928. mode_t mode)
  2929. {
  2930. struct cgroup *cgrp;
  2931. struct cgroupfs_root *root = parent->root;
  2932. int err = 0;
  2933. struct cgroup_subsys *ss;
  2934. struct super_block *sb = root->sb;
  2935. cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
  2936. if (!cgrp)
  2937. return -ENOMEM;
  2938. /* Grab a reference on the superblock so the hierarchy doesn't
  2939. * get deleted on unmount if there are child cgroups. This
  2940. * can be done outside cgroup_mutex, since the sb can't
  2941. * disappear while someone has an open control file on the
  2942. * fs */
  2943. atomic_inc(&sb->s_active);
  2944. mutex_lock(&cgroup_mutex);
  2945. init_cgroup_housekeeping(cgrp);
  2946. cgrp->parent = parent;
  2947. cgrp->root = parent->root;
  2948. cgrp->top_cgroup = parent->top_cgroup;
  2949. if (notify_on_release(parent))
  2950. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  2951. for_each_subsys(root, ss) {
  2952. struct cgroup_subsys_state *css = ss->create(ss, cgrp);
  2953. if (IS_ERR(css)) {
  2954. err = PTR_ERR(css);
  2955. goto err_destroy;
  2956. }
  2957. init_cgroup_css(css, ss, cgrp);
  2958. if (ss->use_id) {
  2959. err = alloc_css_id(ss, parent, cgrp);
  2960. if (err)
  2961. goto err_destroy;
  2962. }
  2963. /* At error, ->destroy() callback has to free assigned ID. */
  2964. }
  2965. cgroup_lock_hierarchy(root);
  2966. list_add(&cgrp->sibling, &cgrp->parent->children);
  2967. cgroup_unlock_hierarchy(root);
  2968. root->number_of_cgroups++;
  2969. err = cgroup_create_dir(cgrp, dentry, mode);
  2970. if (err < 0)
  2971. goto err_remove;
  2972. /* The cgroup directory was pre-locked for us */
  2973. BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
  2974. err = cgroup_populate_dir(cgrp);
  2975. /* If err < 0, we have a half-filled directory - oh well ;) */
  2976. mutex_unlock(&cgroup_mutex);
  2977. mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
  2978. return 0;
  2979. err_remove:
  2980. cgroup_lock_hierarchy(root);
  2981. list_del(&cgrp->sibling);
  2982. cgroup_unlock_hierarchy(root);
  2983. root->number_of_cgroups--;
  2984. err_destroy:
  2985. for_each_subsys(root, ss) {
  2986. if (cgrp->subsys[ss->subsys_id])
  2987. ss->destroy(ss, cgrp);
  2988. }
  2989. mutex_unlock(&cgroup_mutex);
  2990. /* Release the reference count that we took on the superblock */
  2991. deactivate_super(sb);
  2992. kfree(cgrp);
  2993. return err;
  2994. }
  2995. static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  2996. {
  2997. struct cgroup *c_parent = dentry->d_parent->d_fsdata;
  2998. /* the vfs holds inode->i_mutex already */
  2999. return cgroup_create(c_parent, dentry, mode | S_IFDIR);
  3000. }
  3001. static int cgroup_has_css_refs(struct cgroup *cgrp)
  3002. {
  3003. /* Check the reference count on each subsystem. Since we
  3004. * already established that there are no tasks in the
  3005. * cgroup, if the css refcount is also 1, then there should
  3006. * be no outstanding references, so the subsystem is safe to
  3007. * destroy. We scan across all subsystems rather than using
  3008. * the per-hierarchy linked list of mounted subsystems since
  3009. * we can be called via check_for_release() with no
  3010. * synchronization other than RCU, and the subsystem linked
  3011. * list isn't RCU-safe */
  3012. int i;
  3013. /*
  3014. * We won't need to lock the subsys array, because the subsystems
  3015. * we're concerned about aren't going anywhere since our cgroup root
  3016. * has a reference on them.
  3017. */
  3018. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  3019. struct cgroup_subsys *ss = subsys[i];
  3020. struct cgroup_subsys_state *css;
  3021. /* Skip subsystems not present or not in this hierarchy */
  3022. if (ss == NULL || ss->root != cgrp->root)
  3023. continue;
  3024. css = cgrp->subsys[ss->subsys_id];
  3025. /* When called from check_for_release() it's possible
  3026. * that by this point the cgroup has been removed
  3027. * and the css deleted. But a false-positive doesn't
  3028. * matter, since it can only happen if the cgroup
  3029. * has been deleted and hence no longer needs the
  3030. * release agent to be called anyway. */
  3031. if (css && (atomic_read(&css->refcnt) > 1))
  3032. return 1;
  3033. }
  3034. return 0;
  3035. }
  3036. /*
  3037. * Atomically mark all (or else none) of the cgroup's CSS objects as
  3038. * CSS_REMOVED. Return true on success, or false if the cgroup has
  3039. * busy subsystems. Call with cgroup_mutex held
  3040. */
  3041. static int cgroup_clear_css_refs(struct cgroup *cgrp)
  3042. {
  3043. struct cgroup_subsys *ss;
  3044. unsigned long flags;
  3045. bool failed = false;
  3046. local_irq_save(flags);
  3047. for_each_subsys(cgrp->root, ss) {
  3048. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  3049. int refcnt;
  3050. while (1) {
  3051. /* We can only remove a CSS with a refcnt==1 */
  3052. refcnt = atomic_read(&css->refcnt);
  3053. if (refcnt > 1) {
  3054. failed = true;
  3055. goto done;
  3056. }
  3057. BUG_ON(!refcnt);
  3058. /*
  3059. * Drop the refcnt to 0 while we check other
  3060. * subsystems. This will cause any racing
  3061. * css_tryget() to spin until we set the
  3062. * CSS_REMOVED bits or abort
  3063. */
  3064. if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
  3065. break;
  3066. cpu_relax();
  3067. }
  3068. }
  3069. done:
  3070. for_each_subsys(cgrp->root, ss) {
  3071. struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
  3072. if (failed) {
  3073. /*
  3074. * Restore old refcnt if we previously managed
  3075. * to clear it from 1 to 0
  3076. */
  3077. if (!atomic_read(&css->refcnt))
  3078. atomic_set(&css->refcnt, 1);
  3079. } else {
  3080. /* Commit the fact that the CSS is removed */
  3081. set_bit(CSS_REMOVED, &css->flags);
  3082. }
  3083. }
  3084. local_irq_restore(flags);
  3085. return !failed;
  3086. }
  3087. static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
  3088. {
  3089. struct cgroup *cgrp = dentry->d_fsdata;
  3090. struct dentry *d;
  3091. struct cgroup *parent;
  3092. DEFINE_WAIT(wait);
  3093. struct cgroup_event *event, *tmp;
  3094. int ret;
  3095. /* the vfs holds both inode->i_mutex already */
  3096. again:
  3097. mutex_lock(&cgroup_mutex);
  3098. if (atomic_read(&cgrp->count) != 0) {
  3099. mutex_unlock(&cgroup_mutex);
  3100. return -EBUSY;
  3101. }
  3102. if (!list_empty(&cgrp->children)) {
  3103. mutex_unlock(&cgroup_mutex);
  3104. return -EBUSY;
  3105. }
  3106. mutex_unlock(&cgroup_mutex);
  3107. /*
  3108. * In general, subsystem has no css->refcnt after pre_destroy(). But
  3109. * in racy cases, subsystem may have to get css->refcnt after
  3110. * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
  3111. * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
  3112. * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
  3113. * and subsystem's reference count handling. Please see css_get/put
  3114. * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
  3115. */
  3116. set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3117. /*
  3118. * Call pre_destroy handlers of subsys. Notify subsystems
  3119. * that rmdir() request comes.
  3120. */
  3121. ret = cgroup_call_pre_destroy(cgrp);
  3122. if (ret) {
  3123. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3124. return ret;
  3125. }
  3126. mutex_lock(&cgroup_mutex);
  3127. parent = cgrp->parent;
  3128. if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
  3129. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3130. mutex_unlock(&cgroup_mutex);
  3131. return -EBUSY;
  3132. }
  3133. prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
  3134. if (!cgroup_clear_css_refs(cgrp)) {
  3135. mutex_unlock(&cgroup_mutex);
  3136. /*
  3137. * Because someone may call cgroup_wakeup_rmdir_waiter() before
  3138. * prepare_to_wait(), we need to check this flag.
  3139. */
  3140. if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
  3141. schedule();
  3142. finish_wait(&cgroup_rmdir_waitq, &wait);
  3143. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3144. if (signal_pending(current))
  3145. return -EINTR;
  3146. goto again;
  3147. }
  3148. /* NO css_tryget() can success after here. */
  3149. finish_wait(&cgroup_rmdir_waitq, &wait);
  3150. clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
  3151. spin_lock(&release_list_lock);
  3152. set_bit(CGRP_REMOVED, &cgrp->flags);
  3153. if (!list_empty(&cgrp->release_list))
  3154. list_del(&cgrp->release_list);
  3155. spin_unlock(&release_list_lock);
  3156. cgroup_lock_hierarchy(cgrp->root);
  3157. /* delete this cgroup from parent->children */
  3158. list_del(&cgrp->sibling);
  3159. cgroup_unlock_hierarchy(cgrp->root);
  3160. spin_lock(&cgrp->dentry->d_lock);
  3161. d = dget(cgrp->dentry);
  3162. spin_unlock(&d->d_lock);
  3163. cgroup_d_remove_dir(d);
  3164. dput(d);
  3165. set_bit(CGRP_RELEASABLE, &parent->flags);
  3166. check_for_release(parent);
  3167. /*
  3168. * Unregister events and notify userspace.
  3169. * Notify userspace about cgroup removing only after rmdir of cgroup
  3170. * directory to avoid race between userspace and kernelspace
  3171. */
  3172. spin_lock(&cgrp->event_list_lock);
  3173. list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
  3174. list_del(&event->list);
  3175. remove_wait_queue(event->wqh, &event->wait);
  3176. eventfd_signal(event->eventfd, 1);
  3177. schedule_work(&event->remove);
  3178. }
  3179. spin_unlock(&cgrp->event_list_lock);
  3180. mutex_unlock(&cgroup_mutex);
  3181. return 0;
  3182. }
  3183. static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
  3184. {
  3185. struct cgroup_subsys_state *css;
  3186. printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
  3187. /* Create the top cgroup state for this subsystem */
  3188. list_add(&ss->sibling, &rootnode.subsys_list);
  3189. ss->root = &rootnode;
  3190. css = ss->create(ss, dummytop);
  3191. /* We don't handle early failures gracefully */
  3192. BUG_ON(IS_ERR(css));
  3193. init_cgroup_css(css, ss, dummytop);
  3194. /* Update the init_css_set to contain a subsys
  3195. * pointer to this state - since the subsystem is
  3196. * newly registered, all tasks and hence the
  3197. * init_css_set is in the subsystem's top cgroup. */
  3198. init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
  3199. need_forkexit_callback |= ss->fork || ss->exit;
  3200. /* At system boot, before all subsystems have been
  3201. * registered, no tasks have been forked, so we don't
  3202. * need to invoke fork callbacks here. */
  3203. BUG_ON(!list_empty(&init_task.tasks));
  3204. mutex_init(&ss->hierarchy_mutex);
  3205. lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
  3206. ss->active = 1;
  3207. /* this function shouldn't be used with modular subsystems, since they
  3208. * need to register a subsys_id, among other things */
  3209. BUG_ON(ss->module);
  3210. }
  3211. /**
  3212. * cgroup_load_subsys: load and register a modular subsystem at runtime
  3213. * @ss: the subsystem to load
  3214. *
  3215. * This function should be called in a modular subsystem's initcall. If the
  3216. * subsystem is built as a module, it will be assigned a new subsys_id and set
  3217. * up for use. If the subsystem is built-in anyway, work is delegated to the
  3218. * simpler cgroup_init_subsys.
  3219. */
  3220. int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
  3221. {
  3222. int i;
  3223. struct cgroup_subsys_state *css;
  3224. /* check name and function validity */
  3225. if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
  3226. ss->create == NULL || ss->destroy == NULL)
  3227. return -EINVAL;
  3228. /*
  3229. * we don't support callbacks in modular subsystems. this check is
  3230. * before the ss->module check for consistency; a subsystem that could
  3231. * be a module should still have no callbacks even if the user isn't
  3232. * compiling it as one.
  3233. */
  3234. if (ss->fork || ss->exit)
  3235. return -EINVAL;
  3236. /*
  3237. * an optionally modular subsystem is built-in: we want to do nothing,
  3238. * since cgroup_init_subsys will have already taken care of it.
  3239. */
  3240. if (ss->module == NULL) {
  3241. /* a few sanity checks */
  3242. BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
  3243. BUG_ON(subsys[ss->subsys_id] != ss);
  3244. return 0;
  3245. }
  3246. /*
  3247. * need to register a subsys id before anything else - for example,
  3248. * init_cgroup_css needs it.
  3249. */
  3250. mutex_lock(&cgroup_mutex);
  3251. /* find the first empty slot in the array */
  3252. for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
  3253. if (subsys[i] == NULL)
  3254. break;
  3255. }
  3256. if (i == CGROUP_SUBSYS_COUNT) {
  3257. /* maximum number of subsystems already registered! */
  3258. mutex_unlock(&cgroup_mutex);
  3259. return -EBUSY;
  3260. }
  3261. /* assign ourselves the subsys_id */
  3262. ss->subsys_id = i;
  3263. subsys[i] = ss;
  3264. /*
  3265. * no ss->create seems to need anything important in the ss struct, so
  3266. * this can happen first (i.e. before the rootnode attachment).
  3267. */
  3268. css = ss->create(ss, dummytop);
  3269. if (IS_ERR(css)) {
  3270. /* failure case - need to deassign the subsys[] slot. */
  3271. subsys[i] = NULL;
  3272. mutex_unlock(&cgroup_mutex);
  3273. return PTR_ERR(css);
  3274. }
  3275. list_add(&ss->sibling, &rootnode.subsys_list);
  3276. ss->root = &rootnode;
  3277. /* our new subsystem will be attached to the dummy hierarchy. */
  3278. init_cgroup_css(css, ss, dummytop);
  3279. /* init_idr must be after init_cgroup_css because it sets css->id. */
  3280. if (ss->use_id) {
  3281. int ret = cgroup_init_idr(ss, css);
  3282. if (ret) {
  3283. dummytop->subsys[ss->subsys_id] = NULL;
  3284. ss->destroy(ss, dummytop);
  3285. subsys[i] = NULL;
  3286. mutex_unlock(&cgroup_mutex);
  3287. return ret;
  3288. }
  3289. }
  3290. /*
  3291. * Now we need to entangle the css into the existing css_sets. unlike
  3292. * in cgroup_init_subsys, there are now multiple css_sets, so each one
  3293. * will need a new pointer to it; done by iterating the css_set_table.
  3294. * furthermore, modifying the existing css_sets will corrupt the hash
  3295. * table state, so each changed css_set will need its hash recomputed.
  3296. * this is all done under the css_set_lock.
  3297. */
  3298. write_lock(&css_set_lock);
  3299. for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
  3300. struct css_set *cg;
  3301. struct hlist_node *node, *tmp;
  3302. struct hlist_head *bucket = &css_set_table[i], *new_bucket;
  3303. hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) {
  3304. /* skip entries that we already rehashed */
  3305. if (cg->subsys[ss->subsys_id])
  3306. continue;
  3307. /* remove existing entry */
  3308. hlist_del(&cg->hlist);
  3309. /* set new value */
  3310. cg->subsys[ss->subsys_id] = css;
  3311. /* recompute hash and restore entry */
  3312. new_bucket = css_set_hash(cg->subsys);
  3313. hlist_add_head(&cg->hlist, new_bucket);
  3314. }
  3315. }
  3316. write_unlock(&css_set_lock);
  3317. mutex_init(&ss->hierarchy_mutex);
  3318. lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
  3319. ss->active = 1;
  3320. /* success! */
  3321. mutex_unlock(&cgroup_mutex);
  3322. return 0;
  3323. }
  3324. EXPORT_SYMBOL_GPL(cgroup_load_subsys);
  3325. /**
  3326. * cgroup_unload_subsys: unload a modular subsystem
  3327. * @ss: the subsystem to unload
  3328. *
  3329. * This function should be called in a modular subsystem's exitcall. When this
  3330. * function is invoked, the refcount on the subsystem's module will be 0, so
  3331. * the subsystem will not be attached to any hierarchy.
  3332. */
  3333. void cgroup_unload_subsys(struct cgroup_subsys *ss)
  3334. {
  3335. struct cg_cgroup_link *link;
  3336. struct hlist_head *hhead;
  3337. BUG_ON(ss->module == NULL);
  3338. /*
  3339. * we shouldn't be called if the subsystem is in use, and the use of
  3340. * try_module_get in parse_cgroupfs_options should ensure that it
  3341. * doesn't start being used while we're killing it off.
  3342. */
  3343. BUG_ON(ss->root != &rootnode);
  3344. mutex_lock(&cgroup_mutex);
  3345. /* deassign the subsys_id */
  3346. BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
  3347. subsys[ss->subsys_id] = NULL;
  3348. /* remove subsystem from rootnode's list of subsystems */
  3349. list_del(&ss->sibling);
  3350. /*
  3351. * disentangle the css from all css_sets attached to the dummytop. as
  3352. * in loading, we need to pay our respects to the hashtable gods.
  3353. */
  3354. write_lock(&css_set_lock);
  3355. list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
  3356. struct css_set *cg = link->cg;
  3357. hlist_del(&cg->hlist);
  3358. BUG_ON(!cg->subsys[ss->subsys_id]);
  3359. cg->subsys[ss->subsys_id] = NULL;
  3360. hhead = css_set_hash(cg->subsys);
  3361. hlist_add_head(&cg->hlist, hhead);
  3362. }
  3363. write_unlock(&css_set_lock);
  3364. /*
  3365. * remove subsystem's css from the dummytop and free it - need to free
  3366. * before marking as null because ss->destroy needs the cgrp->subsys
  3367. * pointer to find their state. note that this also takes care of
  3368. * freeing the css_id.
  3369. */
  3370. ss->destroy(ss, dummytop);
  3371. dummytop->subsys[ss->subsys_id] = NULL;
  3372. mutex_unlock(&cgroup_mutex);
  3373. }
  3374. EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
  3375. /**
  3376. * cgroup_init_early - cgroup initialization at system boot
  3377. *
  3378. * Initialize cgroups at system boot, and initialize any
  3379. * subsystems that request early init.
  3380. */
  3381. int __init cgroup_init_early(void)
  3382. {
  3383. int i;
  3384. atomic_set(&init_css_set.refcount, 1);
  3385. INIT_LIST_HEAD(&init_css_set.cg_links);
  3386. INIT_LIST_HEAD(&init_css_set.tasks);
  3387. INIT_HLIST_NODE(&init_css_set.hlist);
  3388. css_set_count = 1;
  3389. init_cgroup_root(&rootnode);
  3390. root_count = 1;
  3391. init_task.cgroups = &init_css_set;
  3392. init_css_set_link.cg = &init_css_set;
  3393. init_css_set_link.cgrp = dummytop;
  3394. list_add(&init_css_set_link.cgrp_link_list,
  3395. &rootnode.top_cgroup.css_sets);
  3396. list_add(&init_css_set_link.cg_link_list,
  3397. &init_css_set.cg_links);
  3398. for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
  3399. INIT_HLIST_HEAD(&css_set_table[i]);
  3400. /* at bootup time, we don't worry about modular subsystems */
  3401. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3402. struct cgroup_subsys *ss = subsys[i];
  3403. BUG_ON(!ss->name);
  3404. BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
  3405. BUG_ON(!ss->create);
  3406. BUG_ON(!ss->destroy);
  3407. if (ss->subsys_id != i) {
  3408. printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
  3409. ss->name, ss->subsys_id);
  3410. BUG();
  3411. }
  3412. if (ss->early_init)
  3413. cgroup_init_subsys(ss);
  3414. }
  3415. return 0;
  3416. }
  3417. /**
  3418. * cgroup_init - cgroup initialization
  3419. *
  3420. * Register cgroup filesystem and /proc file, and initialize
  3421. * any subsystems that didn't request early init.
  3422. */
  3423. int __init cgroup_init(void)
  3424. {
  3425. int err;
  3426. int i;
  3427. struct hlist_head *hhead;
  3428. err = bdi_init(&cgroup_backing_dev_info);
  3429. if (err)
  3430. return err;
  3431. /* at bootup time, we don't worry about modular subsystems */
  3432. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3433. struct cgroup_subsys *ss = subsys[i];
  3434. if (!ss->early_init)
  3435. cgroup_init_subsys(ss);
  3436. if (ss->use_id)
  3437. cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
  3438. }
  3439. /* Add init_css_set to the hash table */
  3440. hhead = css_set_hash(init_css_set.subsys);
  3441. hlist_add_head(&init_css_set.hlist, hhead);
  3442. BUG_ON(!init_root_id(&rootnode));
  3443. cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
  3444. if (!cgroup_kobj) {
  3445. err = -ENOMEM;
  3446. goto out;
  3447. }
  3448. err = register_filesystem(&cgroup_fs_type);
  3449. if (err < 0) {
  3450. kobject_put(cgroup_kobj);
  3451. goto out;
  3452. }
  3453. proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
  3454. out:
  3455. if (err)
  3456. bdi_destroy(&cgroup_backing_dev_info);
  3457. return err;
  3458. }
  3459. /*
  3460. * proc_cgroup_show()
  3461. * - Print task's cgroup paths into seq_file, one line for each hierarchy
  3462. * - Used for /proc/<pid>/cgroup.
  3463. * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
  3464. * doesn't really matter if tsk->cgroup changes after we read it,
  3465. * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
  3466. * anyway. No need to check that tsk->cgroup != NULL, thanks to
  3467. * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
  3468. * cgroup to top_cgroup.
  3469. */
  3470. /* TODO: Use a proper seq_file iterator */
  3471. static int proc_cgroup_show(struct seq_file *m, void *v)
  3472. {
  3473. struct pid *pid;
  3474. struct task_struct *tsk;
  3475. char *buf;
  3476. int retval;
  3477. struct cgroupfs_root *root;
  3478. retval = -ENOMEM;
  3479. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3480. if (!buf)
  3481. goto out;
  3482. retval = -ESRCH;
  3483. pid = m->private;
  3484. tsk = get_pid_task(pid, PIDTYPE_PID);
  3485. if (!tsk)
  3486. goto out_free;
  3487. retval = 0;
  3488. mutex_lock(&cgroup_mutex);
  3489. for_each_active_root(root) {
  3490. struct cgroup_subsys *ss;
  3491. struct cgroup *cgrp;
  3492. int count = 0;
  3493. seq_printf(m, "%d:", root->hierarchy_id);
  3494. for_each_subsys(root, ss)
  3495. seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
  3496. if (strlen(root->name))
  3497. seq_printf(m, "%sname=%s", count ? "," : "",
  3498. root->name);
  3499. seq_putc(m, ':');
  3500. cgrp = task_cgroup_from_root(tsk, root);
  3501. retval = cgroup_path(cgrp, buf, PAGE_SIZE);
  3502. if (retval < 0)
  3503. goto out_unlock;
  3504. seq_puts(m, buf);
  3505. seq_putc(m, '\n');
  3506. }
  3507. out_unlock:
  3508. mutex_unlock(&cgroup_mutex);
  3509. put_task_struct(tsk);
  3510. out_free:
  3511. kfree(buf);
  3512. out:
  3513. return retval;
  3514. }
  3515. static int cgroup_open(struct inode *inode, struct file *file)
  3516. {
  3517. struct pid *pid = PROC_I(inode)->pid;
  3518. return single_open(file, proc_cgroup_show, pid);
  3519. }
  3520. const struct file_operations proc_cgroup_operations = {
  3521. .open = cgroup_open,
  3522. .read = seq_read,
  3523. .llseek = seq_lseek,
  3524. .release = single_release,
  3525. };
  3526. /* Display information about each subsystem and each hierarchy */
  3527. static int proc_cgroupstats_show(struct seq_file *m, void *v)
  3528. {
  3529. int i;
  3530. seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
  3531. /*
  3532. * ideally we don't want subsystems moving around while we do this.
  3533. * cgroup_mutex is also necessary to guarantee an atomic snapshot of
  3534. * subsys/hierarchy state.
  3535. */
  3536. mutex_lock(&cgroup_mutex);
  3537. for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
  3538. struct cgroup_subsys *ss = subsys[i];
  3539. if (ss == NULL)
  3540. continue;
  3541. seq_printf(m, "%s\t%d\t%d\t%d\n",
  3542. ss->name, ss->root->hierarchy_id,
  3543. ss->root->number_of_cgroups, !ss->disabled);
  3544. }
  3545. mutex_unlock(&cgroup_mutex);
  3546. return 0;
  3547. }
  3548. static int cgroupstats_open(struct inode *inode, struct file *file)
  3549. {
  3550. return single_open(file, proc_cgroupstats_show, NULL);
  3551. }
  3552. static const struct file_operations proc_cgroupstats_operations = {
  3553. .open = cgroupstats_open,
  3554. .read = seq_read,
  3555. .llseek = seq_lseek,
  3556. .release = single_release,
  3557. };
  3558. /**
  3559. * cgroup_fork - attach newly forked task to its parents cgroup.
  3560. * @child: pointer to task_struct of forking parent process.
  3561. *
  3562. * Description: A task inherits its parent's cgroup at fork().
  3563. *
  3564. * A pointer to the shared css_set was automatically copied in
  3565. * fork.c by dup_task_struct(). However, we ignore that copy, since
  3566. * it was not made under the protection of RCU or cgroup_mutex, so
  3567. * might no longer be a valid cgroup pointer. cgroup_attach_task() might
  3568. * have already changed current->cgroups, allowing the previously
  3569. * referenced cgroup group to be removed and freed.
  3570. *
  3571. * At the point that cgroup_fork() is called, 'current' is the parent
  3572. * task, and the passed argument 'child' points to the child task.
  3573. */
  3574. void cgroup_fork(struct task_struct *child)
  3575. {
  3576. task_lock(current);
  3577. child->cgroups = current->cgroups;
  3578. get_css_set(child->cgroups);
  3579. task_unlock(current);
  3580. INIT_LIST_HEAD(&child->cg_list);
  3581. }
  3582. /**
  3583. * cgroup_fork_callbacks - run fork callbacks
  3584. * @child: the new task
  3585. *
  3586. * Called on a new task very soon before adding it to the
  3587. * tasklist. No need to take any locks since no-one can
  3588. * be operating on this task.
  3589. */
  3590. void cgroup_fork_callbacks(struct task_struct *child)
  3591. {
  3592. if (need_forkexit_callback) {
  3593. int i;
  3594. /*
  3595. * forkexit callbacks are only supported for builtin
  3596. * subsystems, and the builtin section of the subsys array is
  3597. * immutable, so we don't need to lock the subsys array here.
  3598. */
  3599. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3600. struct cgroup_subsys *ss = subsys[i];
  3601. if (ss->fork)
  3602. ss->fork(ss, child);
  3603. }
  3604. }
  3605. }
  3606. /**
  3607. * cgroup_post_fork - called on a new task after adding it to the task list
  3608. * @child: the task in question
  3609. *
  3610. * Adds the task to the list running through its css_set if necessary.
  3611. * Has to be after the task is visible on the task list in case we race
  3612. * with the first call to cgroup_iter_start() - to guarantee that the
  3613. * new task ends up on its list.
  3614. */
  3615. void cgroup_post_fork(struct task_struct *child)
  3616. {
  3617. if (use_task_css_set_links) {
  3618. write_lock(&css_set_lock);
  3619. task_lock(child);
  3620. if (list_empty(&child->cg_list))
  3621. list_add(&child->cg_list, &child->cgroups->tasks);
  3622. task_unlock(child);
  3623. write_unlock(&css_set_lock);
  3624. }
  3625. }
  3626. /**
  3627. * cgroup_exit - detach cgroup from exiting task
  3628. * @tsk: pointer to task_struct of exiting process
  3629. * @run_callback: run exit callbacks?
  3630. *
  3631. * Description: Detach cgroup from @tsk and release it.
  3632. *
  3633. * Note that cgroups marked notify_on_release force every task in
  3634. * them to take the global cgroup_mutex mutex when exiting.
  3635. * This could impact scaling on very large systems. Be reluctant to
  3636. * use notify_on_release cgroups where very high task exit scaling
  3637. * is required on large systems.
  3638. *
  3639. * the_top_cgroup_hack:
  3640. *
  3641. * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
  3642. *
  3643. * We call cgroup_exit() while the task is still competent to
  3644. * handle notify_on_release(), then leave the task attached to the
  3645. * root cgroup in each hierarchy for the remainder of its exit.
  3646. *
  3647. * To do this properly, we would increment the reference count on
  3648. * top_cgroup, and near the very end of the kernel/exit.c do_exit()
  3649. * code we would add a second cgroup function call, to drop that
  3650. * reference. This would just create an unnecessary hot spot on
  3651. * the top_cgroup reference count, to no avail.
  3652. *
  3653. * Normally, holding a reference to a cgroup without bumping its
  3654. * count is unsafe. The cgroup could go away, or someone could
  3655. * attach us to a different cgroup, decrementing the count on
  3656. * the first cgroup that we never incremented. But in this case,
  3657. * top_cgroup isn't going away, and either task has PF_EXITING set,
  3658. * which wards off any cgroup_attach_task() attempts, or task is a failed
  3659. * fork, never visible to cgroup_attach_task.
  3660. */
  3661. void cgroup_exit(struct task_struct *tsk, int run_callbacks)
  3662. {
  3663. int i;
  3664. struct css_set *cg;
  3665. if (run_callbacks && need_forkexit_callback) {
  3666. /*
  3667. * modular subsystems can't use callbacks, so no need to lock
  3668. * the subsys array
  3669. */
  3670. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3671. struct cgroup_subsys *ss = subsys[i];
  3672. if (ss->exit)
  3673. ss->exit(ss, tsk);
  3674. }
  3675. }
  3676. /*
  3677. * Unlink from the css_set task list if necessary.
  3678. * Optimistically check cg_list before taking
  3679. * css_set_lock
  3680. */
  3681. if (!list_empty(&tsk->cg_list)) {
  3682. write_lock(&css_set_lock);
  3683. if (!list_empty(&tsk->cg_list))
  3684. list_del(&tsk->cg_list);
  3685. write_unlock(&css_set_lock);
  3686. }
  3687. /* Reassign the task to the init_css_set. */
  3688. task_lock(tsk);
  3689. cg = tsk->cgroups;
  3690. tsk->cgroups = &init_css_set;
  3691. task_unlock(tsk);
  3692. if (cg)
  3693. put_css_set_taskexit(cg);
  3694. }
  3695. /**
  3696. * cgroup_clone - clone the cgroup the given subsystem is attached to
  3697. * @tsk: the task to be moved
  3698. * @subsys: the given subsystem
  3699. * @nodename: the name for the new cgroup
  3700. *
  3701. * Duplicate the current cgroup in the hierarchy that the given
  3702. * subsystem is attached to, and move this task into the new
  3703. * child.
  3704. */
  3705. int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
  3706. char *nodename)
  3707. {
  3708. struct dentry *dentry;
  3709. int ret = 0;
  3710. struct cgroup *parent, *child;
  3711. struct inode *inode;
  3712. struct css_set *cg;
  3713. struct cgroupfs_root *root;
  3714. struct cgroup_subsys *ss;
  3715. /* We shouldn't be called by an unregistered subsystem */
  3716. BUG_ON(!subsys->active);
  3717. /* First figure out what hierarchy and cgroup we're dealing
  3718. * with, and pin them so we can drop cgroup_mutex */
  3719. mutex_lock(&cgroup_mutex);
  3720. again:
  3721. root = subsys->root;
  3722. if (root == &rootnode) {
  3723. mutex_unlock(&cgroup_mutex);
  3724. return 0;
  3725. }
  3726. /* Pin the hierarchy */
  3727. if (!atomic_inc_not_zero(&root->sb->s_active)) {
  3728. /* We race with the final deactivate_super() */
  3729. mutex_unlock(&cgroup_mutex);
  3730. return 0;
  3731. }
  3732. /* Keep the cgroup alive */
  3733. task_lock(tsk);
  3734. parent = task_cgroup(tsk, subsys->subsys_id);
  3735. cg = tsk->cgroups;
  3736. get_css_set(cg);
  3737. task_unlock(tsk);
  3738. mutex_unlock(&cgroup_mutex);
  3739. /* Now do the VFS work to create a cgroup */
  3740. inode = parent->dentry->d_inode;
  3741. /* Hold the parent directory mutex across this operation to
  3742. * stop anyone else deleting the new cgroup */
  3743. mutex_lock(&inode->i_mutex);
  3744. dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
  3745. if (IS_ERR(dentry)) {
  3746. printk(KERN_INFO
  3747. "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
  3748. PTR_ERR(dentry));
  3749. ret = PTR_ERR(dentry);
  3750. goto out_release;
  3751. }
  3752. /* Create the cgroup directory, which also creates the cgroup */
  3753. ret = vfs_mkdir(inode, dentry, 0755);
  3754. child = __d_cgrp(dentry);
  3755. dput(dentry);
  3756. if (ret) {
  3757. printk(KERN_INFO
  3758. "Failed to create cgroup %s: %d\n", nodename,
  3759. ret);
  3760. goto out_release;
  3761. }
  3762. /* The cgroup now exists. Retake cgroup_mutex and check
  3763. * that we're still in the same state that we thought we
  3764. * were. */
  3765. mutex_lock(&cgroup_mutex);
  3766. if ((root != subsys->root) ||
  3767. (parent != task_cgroup(tsk, subsys->subsys_id))) {
  3768. /* Aargh, we raced ... */
  3769. mutex_unlock(&inode->i_mutex);
  3770. put_css_set(cg);
  3771. deactivate_super(root->sb);
  3772. /* The cgroup is still accessible in the VFS, but
  3773. * we're not going to try to rmdir() it at this
  3774. * point. */
  3775. printk(KERN_INFO
  3776. "Race in cgroup_clone() - leaking cgroup %s\n",
  3777. nodename);
  3778. goto again;
  3779. }
  3780. /* do any required auto-setup */
  3781. for_each_subsys(root, ss) {
  3782. if (ss->post_clone)
  3783. ss->post_clone(ss, child);
  3784. }
  3785. /* All seems fine. Finish by moving the task into the new cgroup */
  3786. ret = cgroup_attach_task(child, tsk);
  3787. mutex_unlock(&cgroup_mutex);
  3788. out_release:
  3789. mutex_unlock(&inode->i_mutex);
  3790. mutex_lock(&cgroup_mutex);
  3791. put_css_set(cg);
  3792. mutex_unlock(&cgroup_mutex);
  3793. deactivate_super(root->sb);
  3794. return ret;
  3795. }
  3796. /**
  3797. * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
  3798. * @cgrp: the cgroup in question
  3799. * @task: the task in question
  3800. *
  3801. * See if @cgrp is a descendant of @task's cgroup in the appropriate
  3802. * hierarchy.
  3803. *
  3804. * If we are sending in dummytop, then presumably we are creating
  3805. * the top cgroup in the subsystem.
  3806. *
  3807. * Called only by the ns (nsproxy) cgroup.
  3808. */
  3809. int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
  3810. {
  3811. int ret;
  3812. struct cgroup *target;
  3813. if (cgrp == dummytop)
  3814. return 1;
  3815. target = task_cgroup_from_root(task, cgrp->root);
  3816. while (cgrp != target && cgrp!= cgrp->top_cgroup)
  3817. cgrp = cgrp->parent;
  3818. ret = (cgrp == target);
  3819. return ret;
  3820. }
  3821. static void check_for_release(struct cgroup *cgrp)
  3822. {
  3823. /* All of these checks rely on RCU to keep the cgroup
  3824. * structure alive */
  3825. if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
  3826. && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
  3827. /* Control Group is currently removeable. If it's not
  3828. * already queued for a userspace notification, queue
  3829. * it now */
  3830. int need_schedule_work = 0;
  3831. spin_lock(&release_list_lock);
  3832. if (!cgroup_is_removed(cgrp) &&
  3833. list_empty(&cgrp->release_list)) {
  3834. list_add(&cgrp->release_list, &release_list);
  3835. need_schedule_work = 1;
  3836. }
  3837. spin_unlock(&release_list_lock);
  3838. if (need_schedule_work)
  3839. schedule_work(&release_agent_work);
  3840. }
  3841. }
  3842. /* Caller must verify that the css is not for root cgroup */
  3843. void __css_put(struct cgroup_subsys_state *css, int count)
  3844. {
  3845. struct cgroup *cgrp = css->cgroup;
  3846. int val;
  3847. rcu_read_lock();
  3848. val = atomic_sub_return(count, &css->refcnt);
  3849. if (val == 1) {
  3850. if (notify_on_release(cgrp)) {
  3851. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  3852. check_for_release(cgrp);
  3853. }
  3854. cgroup_wakeup_rmdir_waiter(cgrp);
  3855. }
  3856. rcu_read_unlock();
  3857. WARN_ON_ONCE(val < 1);
  3858. }
  3859. EXPORT_SYMBOL_GPL(__css_put);
  3860. /*
  3861. * Notify userspace when a cgroup is released, by running the
  3862. * configured release agent with the name of the cgroup (path
  3863. * relative to the root of cgroup file system) as the argument.
  3864. *
  3865. * Most likely, this user command will try to rmdir this cgroup.
  3866. *
  3867. * This races with the possibility that some other task will be
  3868. * attached to this cgroup before it is removed, or that some other
  3869. * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
  3870. * The presumed 'rmdir' will fail quietly if this cgroup is no longer
  3871. * unused, and this cgroup will be reprieved from its death sentence,
  3872. * to continue to serve a useful existence. Next time it's released,
  3873. * we will get notified again, if it still has 'notify_on_release' set.
  3874. *
  3875. * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
  3876. * means only wait until the task is successfully execve()'d. The
  3877. * separate release agent task is forked by call_usermodehelper(),
  3878. * then control in this thread returns here, without waiting for the
  3879. * release agent task. We don't bother to wait because the caller of
  3880. * this routine has no use for the exit status of the release agent
  3881. * task, so no sense holding our caller up for that.
  3882. */
  3883. static void cgroup_release_agent(struct work_struct *work)
  3884. {
  3885. BUG_ON(work != &release_agent_work);
  3886. mutex_lock(&cgroup_mutex);
  3887. spin_lock(&release_list_lock);
  3888. while (!list_empty(&release_list)) {
  3889. char *argv[3], *envp[3];
  3890. int i;
  3891. char *pathbuf = NULL, *agentbuf = NULL;
  3892. struct cgroup *cgrp = list_entry(release_list.next,
  3893. struct cgroup,
  3894. release_list);
  3895. list_del_init(&cgrp->release_list);
  3896. spin_unlock(&release_list_lock);
  3897. pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3898. if (!pathbuf)
  3899. goto continue_free;
  3900. if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
  3901. goto continue_free;
  3902. agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
  3903. if (!agentbuf)
  3904. goto continue_free;
  3905. i = 0;
  3906. argv[i++] = agentbuf;
  3907. argv[i++] = pathbuf;
  3908. argv[i] = NULL;
  3909. i = 0;
  3910. /* minimal command environment */
  3911. envp[i++] = "HOME=/";
  3912. envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
  3913. envp[i] = NULL;
  3914. /* Drop the lock while we invoke the usermode helper,
  3915. * since the exec could involve hitting disk and hence
  3916. * be a slow process */
  3917. mutex_unlock(&cgroup_mutex);
  3918. call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  3919. mutex_lock(&cgroup_mutex);
  3920. continue_free:
  3921. kfree(pathbuf);
  3922. kfree(agentbuf);
  3923. spin_lock(&release_list_lock);
  3924. }
  3925. spin_unlock(&release_list_lock);
  3926. mutex_unlock(&cgroup_mutex);
  3927. }
  3928. static int __init cgroup_disable(char *str)
  3929. {
  3930. int i;
  3931. char *token;
  3932. while ((token = strsep(&str, ",")) != NULL) {
  3933. if (!*token)
  3934. continue;
  3935. /*
  3936. * cgroup_disable, being at boot time, can't know about module
  3937. * subsystems, so we don't worry about them.
  3938. */
  3939. for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
  3940. struct cgroup_subsys *ss = subsys[i];
  3941. if (!strcmp(token, ss->name)) {
  3942. ss->disabled = 1;
  3943. printk(KERN_INFO "Disabling %s control group"
  3944. " subsystem\n", ss->name);
  3945. break;
  3946. }
  3947. }
  3948. }
  3949. return 1;
  3950. }
  3951. __setup("cgroup_disable=", cgroup_disable);
  3952. /*
  3953. * Functons for CSS ID.
  3954. */
  3955. /*
  3956. *To get ID other than 0, this should be called when !cgroup_is_removed().
  3957. */
  3958. unsigned short css_id(struct cgroup_subsys_state *css)
  3959. {
  3960. struct css_id *cssid;
  3961. /*
  3962. * This css_id() can return correct value when somone has refcnt
  3963. * on this or this is under rcu_read_lock(). Once css->id is allocated,
  3964. * it's unchanged until freed.
  3965. */
  3966. cssid = rcu_dereference_check(css->id,
  3967. rcu_read_lock_held() || atomic_read(&css->refcnt));
  3968. if (cssid)
  3969. return cssid->id;
  3970. return 0;
  3971. }
  3972. EXPORT_SYMBOL_GPL(css_id);
  3973. unsigned short css_depth(struct cgroup_subsys_state *css)
  3974. {
  3975. struct css_id *cssid;
  3976. cssid = rcu_dereference_check(css->id,
  3977. rcu_read_lock_held() || atomic_read(&css->refcnt));
  3978. if (cssid)
  3979. return cssid->depth;
  3980. return 0;
  3981. }
  3982. EXPORT_SYMBOL_GPL(css_depth);
  3983. /**
  3984. * css_is_ancestor - test "root" css is an ancestor of "child"
  3985. * @child: the css to be tested.
  3986. * @root: the css supporsed to be an ancestor of the child.
  3987. *
  3988. * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
  3989. * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
  3990. * But, considering usual usage, the csses should be valid objects after test.
  3991. * Assuming that the caller will do some action to the child if this returns
  3992. * returns true, the caller must take "child";s reference count.
  3993. * If "child" is valid object and this returns true, "root" is valid, too.
  3994. */
  3995. bool css_is_ancestor(struct cgroup_subsys_state *child,
  3996. const struct cgroup_subsys_state *root)
  3997. {
  3998. struct css_id *child_id;
  3999. struct css_id *root_id;
  4000. bool ret = true;
  4001. rcu_read_lock();
  4002. child_id = rcu_dereference(child->id);
  4003. root_id = rcu_dereference(root->id);
  4004. if (!child_id
  4005. || !root_id
  4006. || (child_id->depth < root_id->depth)
  4007. || (child_id->stack[root_id->depth] != root_id->id))
  4008. ret = false;
  4009. rcu_read_unlock();
  4010. return ret;
  4011. }
  4012. static void __free_css_id_cb(struct rcu_head *head)
  4013. {
  4014. struct css_id *id;
  4015. id = container_of(head, struct css_id, rcu_head);
  4016. kfree(id);
  4017. }
  4018. void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
  4019. {
  4020. struct css_id *id = css->id;
  4021. /* When this is called before css_id initialization, id can be NULL */
  4022. if (!id)
  4023. return;
  4024. BUG_ON(!ss->use_id);
  4025. rcu_assign_pointer(id->css, NULL);
  4026. rcu_assign_pointer(css->id, NULL);
  4027. spin_lock(&ss->id_lock);
  4028. idr_remove(&ss->idr, id->id);
  4029. spin_unlock(&ss->id_lock);
  4030. call_rcu(&id->rcu_head, __free_css_id_cb);
  4031. }
  4032. EXPORT_SYMBOL_GPL(free_css_id);
  4033. /*
  4034. * This is called by init or create(). Then, calls to this function are
  4035. * always serialized (By cgroup_mutex() at create()).
  4036. */
  4037. static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
  4038. {
  4039. struct css_id *newid;
  4040. int myid, error, size;
  4041. BUG_ON(!ss->use_id);
  4042. size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
  4043. newid = kzalloc(size, GFP_KERNEL);
  4044. if (!newid)
  4045. return ERR_PTR(-ENOMEM);
  4046. /* get id */
  4047. if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
  4048. error = -ENOMEM;
  4049. goto err_out;
  4050. }
  4051. spin_lock(&ss->id_lock);
  4052. /* Don't use 0. allocates an ID of 1-65535 */
  4053. error = idr_get_new_above(&ss->idr, newid, 1, &myid);
  4054. spin_unlock(&ss->id_lock);
  4055. /* Returns error when there are no free spaces for new ID.*/
  4056. if (error) {
  4057. error = -ENOSPC;
  4058. goto err_out;
  4059. }
  4060. if (myid > CSS_ID_MAX)
  4061. goto remove_idr;
  4062. newid->id = myid;
  4063. newid->depth = depth;
  4064. return newid;
  4065. remove_idr:
  4066. error = -ENOSPC;
  4067. spin_lock(&ss->id_lock);
  4068. idr_remove(&ss->idr, myid);
  4069. spin_unlock(&ss->id_lock);
  4070. err_out:
  4071. kfree(newid);
  4072. return ERR_PTR(error);
  4073. }
  4074. static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
  4075. struct cgroup_subsys_state *rootcss)
  4076. {
  4077. struct css_id *newid;
  4078. spin_lock_init(&ss->id_lock);
  4079. idr_init(&ss->idr);
  4080. newid = get_new_cssid(ss, 0);
  4081. if (IS_ERR(newid))
  4082. return PTR_ERR(newid);
  4083. newid->stack[0] = newid->id;
  4084. newid->css = rootcss;
  4085. rootcss->id = newid;
  4086. return 0;
  4087. }
  4088. static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
  4089. struct cgroup *child)
  4090. {
  4091. int subsys_id, i, depth = 0;
  4092. struct cgroup_subsys_state *parent_css, *child_css;
  4093. struct css_id *child_id, *parent_id;
  4094. subsys_id = ss->subsys_id;
  4095. parent_css = parent->subsys[subsys_id];
  4096. child_css = child->subsys[subsys_id];
  4097. parent_id = parent_css->id;
  4098. depth = parent_id->depth + 1;
  4099. child_id = get_new_cssid(ss, depth);
  4100. if (IS_ERR(child_id))
  4101. return PTR_ERR(child_id);
  4102. for (i = 0; i < depth; i++)
  4103. child_id->stack[i] = parent_id->stack[i];
  4104. child_id->stack[depth] = child_id->id;
  4105. /*
  4106. * child_id->css pointer will be set after this cgroup is available
  4107. * see cgroup_populate_dir()
  4108. */
  4109. rcu_assign_pointer(child_css->id, child_id);
  4110. return 0;
  4111. }
  4112. /**
  4113. * css_lookup - lookup css by id
  4114. * @ss: cgroup subsys to be looked into.
  4115. * @id: the id
  4116. *
  4117. * Returns pointer to cgroup_subsys_state if there is valid one with id.
  4118. * NULL if not. Should be called under rcu_read_lock()
  4119. */
  4120. struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
  4121. {
  4122. struct css_id *cssid = NULL;
  4123. BUG_ON(!ss->use_id);
  4124. cssid = idr_find(&ss->idr, id);
  4125. if (unlikely(!cssid))
  4126. return NULL;
  4127. return rcu_dereference(cssid->css);
  4128. }
  4129. EXPORT_SYMBOL_GPL(css_lookup);
  4130. /**
  4131. * css_get_next - lookup next cgroup under specified hierarchy.
  4132. * @ss: pointer to subsystem
  4133. * @id: current position of iteration.
  4134. * @root: pointer to css. search tree under this.
  4135. * @foundid: position of found object.
  4136. *
  4137. * Search next css under the specified hierarchy of rootid. Calling under
  4138. * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
  4139. */
  4140. struct cgroup_subsys_state *
  4141. css_get_next(struct cgroup_subsys *ss, int id,
  4142. struct cgroup_subsys_state *root, int *foundid)
  4143. {
  4144. struct cgroup_subsys_state *ret = NULL;
  4145. struct css_id *tmp;
  4146. int tmpid;
  4147. int rootid = css_id(root);
  4148. int depth = css_depth(root);
  4149. if (!rootid)
  4150. return NULL;
  4151. BUG_ON(!ss->use_id);
  4152. /* fill start point for scan */
  4153. tmpid = id;
  4154. while (1) {
  4155. /*
  4156. * scan next entry from bitmap(tree), tmpid is updated after
  4157. * idr_get_next().
  4158. */
  4159. spin_lock(&ss->id_lock);
  4160. tmp = idr_get_next(&ss->idr, &tmpid);
  4161. spin_unlock(&ss->id_lock);
  4162. if (!tmp)
  4163. break;
  4164. if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
  4165. ret = rcu_dereference(tmp->css);
  4166. if (ret) {
  4167. *foundid = tmpid;
  4168. break;
  4169. }
  4170. }
  4171. /* continue to scan from next id */
  4172. tmpid = tmpid + 1;
  4173. }
  4174. return ret;
  4175. }
  4176. #ifdef CONFIG_CGROUP_DEBUG
  4177. static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
  4178. struct cgroup *cont)
  4179. {
  4180. struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
  4181. if (!css)
  4182. return ERR_PTR(-ENOMEM);
  4183. return css;
  4184. }
  4185. static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
  4186. {
  4187. kfree(cont->subsys[debug_subsys_id]);
  4188. }
  4189. static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
  4190. {
  4191. return atomic_read(&cont->count);
  4192. }
  4193. static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
  4194. {
  4195. return cgroup_task_count(cont);
  4196. }
  4197. static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
  4198. {
  4199. return (u64)(unsigned long)current->cgroups;
  4200. }
  4201. static u64 current_css_set_refcount_read(struct cgroup *cont,
  4202. struct cftype *cft)
  4203. {
  4204. u64 count;
  4205. rcu_read_lock();
  4206. count = atomic_read(&current->cgroups->refcount);
  4207. rcu_read_unlock();
  4208. return count;
  4209. }
  4210. static int current_css_set_cg_links_read(struct cgroup *cont,
  4211. struct cftype *cft,
  4212. struct seq_file *seq)
  4213. {
  4214. struct cg_cgroup_link *link;
  4215. struct css_set *cg;
  4216. read_lock(&css_set_lock);
  4217. rcu_read_lock();
  4218. cg = rcu_dereference(current->cgroups);
  4219. list_for_each_entry(link, &cg->cg_links, cg_link_list) {
  4220. struct cgroup *c = link->cgrp;
  4221. const char *name;
  4222. if (c->dentry)
  4223. name = c->dentry->d_name.name;
  4224. else
  4225. name = "?";
  4226. seq_printf(seq, "Root %d group %s\n",
  4227. c->root->hierarchy_id, name);
  4228. }
  4229. rcu_read_unlock();
  4230. read_unlock(&css_set_lock);
  4231. return 0;
  4232. }
  4233. #define MAX_TASKS_SHOWN_PER_CSS 25
  4234. static int cgroup_css_links_read(struct cgroup *cont,
  4235. struct cftype *cft,
  4236. struct seq_file *seq)
  4237. {
  4238. struct cg_cgroup_link *link;
  4239. read_lock(&css_set_lock);
  4240. list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
  4241. struct css_set *cg = link->cg;
  4242. struct task_struct *task;
  4243. int count = 0;
  4244. seq_printf(seq, "css_set %p\n", cg);
  4245. list_for_each_entry(task, &cg->tasks, cg_list) {
  4246. if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
  4247. seq_puts(seq, " ...\n");
  4248. break;
  4249. } else {
  4250. seq_printf(seq, " task %d\n",
  4251. task_pid_vnr(task));
  4252. }
  4253. }
  4254. }
  4255. read_unlock(&css_set_lock);
  4256. return 0;
  4257. }
  4258. static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
  4259. {
  4260. return test_bit(CGRP_RELEASABLE, &cgrp->flags);
  4261. }
  4262. static struct cftype debug_files[] = {
  4263. {
  4264. .name = "cgroup_refcount",
  4265. .read_u64 = cgroup_refcount_read,
  4266. },
  4267. {
  4268. .name = "taskcount",
  4269. .read_u64 = debug_taskcount_read,
  4270. },
  4271. {
  4272. .name = "current_css_set",
  4273. .read_u64 = current_css_set_read,
  4274. },
  4275. {
  4276. .name = "current_css_set_refcount",
  4277. .read_u64 = current_css_set_refcount_read,
  4278. },
  4279. {
  4280. .name = "current_css_set_cg_links",
  4281. .read_seq_string = current_css_set_cg_links_read,
  4282. },
  4283. {
  4284. .name = "cgroup_css_links",
  4285. .read_seq_string = cgroup_css_links_read,
  4286. },
  4287. {
  4288. .name = "releasable",
  4289. .read_u64 = releasable_read,
  4290. },
  4291. };
  4292. static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  4293. {
  4294. return cgroup_add_files(cont, ss, debug_files,
  4295. ARRAY_SIZE(debug_files));
  4296. }
  4297. struct cgroup_subsys debug_subsys = {
  4298. .name = "debug",
  4299. .create = debug_create,
  4300. .destroy = debug_destroy,
  4301. .populate = debug_populate,
  4302. .subsys_id = debug_subsys_id,
  4303. };
  4304. #endif /* CONFIG_CGROUP_DEBUG */