123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658 |
- /*
- * Generic process-grouping system.
- *
- * Based originally on the cpuset system, extracted by Paul Menage
- * Copyright (C) 2006 Google, Inc
- *
- * Notifications support
- * Copyright (C) 2009 Nokia Corporation
- * Author: Kirill A. Shutemov
- *
- * Copyright notices from the original cpuset code:
- * --------------------------------------------------
- * Copyright (C) 2003 BULL SA.
- * Copyright (C) 2004-2006 Silicon Graphics, Inc.
- *
- * Portions derived from Patrick Mochel's sysfs code.
- * sysfs is Copyright (c) 2001-3 Patrick Mochel
- *
- * 2003-10-10 Written by Simon Derr.
- * 2003-10-22 Updates by Stephen Hemminger.
- * 2004 May-July Rework by Paul Jackson.
- * ---------------------------------------------------
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
- #include <linux/cgroup.h>
- #include <linux/cred.h>
- #include <linux/ctype.h>
- #include <linux/errno.h>
- #include <linux/fs.h>
- #include <linux/init_task.h>
- #include <linux/kernel.h>
- #include <linux/list.h>
- #include <linux/mm.h>
- #include <linux/mutex.h>
- #include <linux/mount.h>
- #include <linux/pagemap.h>
- #include <linux/proc_fs.h>
- #include <linux/rcupdate.h>
- #include <linux/sched.h>
- #include <linux/backing-dev.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
- #include <linux/magic.h>
- #include <linux/spinlock.h>
- #include <linux/string.h>
- #include <linux/sort.h>
- #include <linux/kmod.h>
- #include <linux/module.h>
- #include <linux/delayacct.h>
- #include <linux/cgroupstats.h>
- #include <linux/hashtable.h>
- #include <linux/namei.h>
- #include <linux/pid_namespace.h>
- #include <linux/idr.h>
- #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
- #include <linux/eventfd.h>
- #include <linux/poll.h>
- #include <linux/flex_array.h> /* used in cgroup_attach_proc */
- #include <linux/kthread.h>
- #include <linux/atomic.h>
- /* css deactivation bias, makes css->refcnt negative to deny new trygets */
- #define CSS_DEACT_BIAS INT_MIN
- /*
- * cgroup_mutex is the master lock. Any modification to cgroup or its
- * hierarchy must be performed while holding it.
- *
- * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
- * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
- * release_agent_path and so on. Modifying requires both cgroup_mutex and
- * cgroup_root_mutex. Readers can acquire either of the two. This is to
- * break the following locking order cycle.
- *
- * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
- * B. namespace_sem -> cgroup_mutex
- *
- * B happens only through cgroup_show_options() and using cgroup_root_mutex
- * breaks it.
- */
- static DEFINE_MUTEX(cgroup_mutex);
- static DEFINE_MUTEX(cgroup_root_mutex);
- /*
- * Generate an array of cgroup subsystem pointers. At boot time, this is
- * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
- * registered after that. The mutable section of this array is protected by
- * cgroup_mutex.
- */
- #define SUBSYS(_x) &_x ## _subsys,
- static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
- #include <linux/cgroup_subsys.h>
- };
- #define MAX_CGROUP_ROOT_NAMELEN 64
- /*
- * A cgroupfs_root represents the root of a cgroup hierarchy,
- * and may be associated with a superblock to form an active
- * hierarchy
- */
- struct cgroupfs_root {
- struct super_block *sb;
- /*
- * The bitmask of subsystems intended to be attached to this
- * hierarchy
- */
- unsigned long subsys_mask;
- /* Unique id for this hierarchy. */
- int hierarchy_id;
- /* The bitmask of subsystems currently attached to this hierarchy */
- unsigned long actual_subsys_mask;
- /* A list running through the attached subsystems */
- struct list_head subsys_list;
- /* The root cgroup for this hierarchy */
- struct cgroup top_cgroup;
- /* Tracks how many cgroups are currently defined in hierarchy.*/
- int number_of_cgroups;
- /* A list running through the active hierarchies */
- struct list_head root_list;
- /* All cgroups on this root, cgroup_mutex protected */
- struct list_head allcg_list;
- /* Hierarchy-specific flags */
- unsigned long flags;
- /* The path to use for release notifications. */
- char release_agent_path[PATH_MAX];
- /* The name for this hierarchy - may be empty */
- char name[MAX_CGROUP_ROOT_NAMELEN];
- };
- /*
- * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
- * subsystems that are otherwise unattached - it never has more than a
- * single cgroup, and all tasks are part of that cgroup.
- */
- static struct cgroupfs_root rootnode;
- /*
- * cgroupfs file entry, pointed to from leaf dentry->d_fsdata.
- */
- struct cfent {
- struct list_head node;
- struct dentry *dentry;
- struct cftype *type;
- };
- /*
- * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
- * cgroup_subsys->use_id != 0.
- */
- #define CSS_ID_MAX (65535)
- struct css_id {
- /*
- * The css to which this ID points. This pointer is set to valid value
- * after cgroup is populated. If cgroup is removed, this will be NULL.
- * This pointer is expected to be RCU-safe because destroy()
- * is called after synchronize_rcu(). But for safe use, css_is_removed()
- * css_tryget() should be used for avoiding race.
- */
- struct cgroup_subsys_state __rcu *css;
- /*
- * ID of this css.
- */
- unsigned short id;
- /*
- * Depth in hierarchy which this ID belongs to.
- */
- unsigned short depth;
- /*
- * ID is freed by RCU. (and lookup routine is RCU safe.)
- */
- struct rcu_head rcu_head;
- /*
- * Hierarchy of CSS ID belongs to.
- */
- unsigned short stack[0]; /* Array of Length (depth+1) */
- };
- /*
- * cgroup_event represents events which userspace want to receive.
- */
- struct cgroup_event {
- /*
- * Cgroup which the event belongs to.
- */
- struct cgroup *cgrp;
- /*
- * Control file which the event associated.
- */
- struct cftype *cft;
- /*
- * eventfd to signal userspace about the event.
- */
- struct eventfd_ctx *eventfd;
- /*
- * Each of these stored in a list by the cgroup.
- */
- struct list_head list;
- /*
- * All fields below needed to unregister event when
- * userspace closes eventfd.
- */
- poll_table pt;
- wait_queue_head_t *wqh;
- wait_queue_t wait;
- struct work_struct remove;
- };
- /* The list of hierarchy roots */
- static LIST_HEAD(roots);
- static int root_count;
- static DEFINE_IDA(hierarchy_ida);
- static int next_hierarchy_id;
- static DEFINE_SPINLOCK(hierarchy_id_lock);
- /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
- #define dummytop (&rootnode.top_cgroup)
- /* This flag indicates whether tasks in the fork and exit paths should
- * check for fork/exit handlers to call. This avoids us having to do
- * extra work in the fork/exit path if none of the subsystems need to
- * be called.
- */
- static int need_forkexit_callback __read_mostly;
- #ifdef CONFIG_PROVE_LOCKING
- int cgroup_lock_is_held(void)
- {
- return lockdep_is_held(&cgroup_mutex);
- }
- #else /* #ifdef CONFIG_PROVE_LOCKING */
- int cgroup_lock_is_held(void)
- {
- return mutex_is_locked(&cgroup_mutex);
- }
- #endif /* #else #ifdef CONFIG_PROVE_LOCKING */
- EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
- static int css_unbias_refcnt(int refcnt)
- {
- return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
- }
- /* the current nr of refs, always >= 0 whether @css is deactivated or not */
- static int css_refcnt(struct cgroup_subsys_state *css)
- {
- int v = atomic_read(&css->refcnt);
- return css_unbias_refcnt(v);
- }
- /* convenient tests for these bits */
- inline int cgroup_is_removed(const struct cgroup *cgrp)
- {
- return test_bit(CGRP_REMOVED, &cgrp->flags);
- }
- /* bits in struct cgroupfs_root flags field */
- enum {
- ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
- ROOT_XATTR, /* supports extended attributes */
- };
- static int cgroup_is_releasable(const struct cgroup *cgrp)
- {
- const int bits =
- (1 << CGRP_RELEASABLE) |
- (1 << CGRP_NOTIFY_ON_RELEASE);
- return (cgrp->flags & bits) == bits;
- }
- static int notify_on_release(const struct cgroup *cgrp)
- {
- return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
- }
- static int clone_children(const struct cgroup *cgrp)
- {
- return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
- }
- /*
- * for_each_subsys() allows you to iterate on each subsystem attached to
- * an active hierarchy
- */
- #define for_each_subsys(_root, _ss) \
- list_for_each_entry(_ss, &_root->subsys_list, sibling)
- /* for_each_active_root() allows you to iterate across the active hierarchies */
- #define for_each_active_root(_root) \
- list_for_each_entry(_root, &roots, root_list)
- static inline struct cgroup *__d_cgrp(struct dentry *dentry)
- {
- return dentry->d_fsdata;
- }
- static inline struct cfent *__d_cfe(struct dentry *dentry)
- {
- return dentry->d_fsdata;
- }
- static inline struct cftype *__d_cft(struct dentry *dentry)
- {
- return __d_cfe(dentry)->type;
- }
- /* the list of cgroups eligible for automatic release. Protected by
- * release_list_lock */
- static LIST_HEAD(release_list);
- static DEFINE_RAW_SPINLOCK(release_list_lock);
- static void cgroup_release_agent(struct work_struct *work);
- static DECLARE_WORK(release_agent_work, cgroup_release_agent);
- static void check_for_release(struct cgroup *cgrp);
- /*
- * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
- * list_empty(&cgroup->children) && subsys has some
- * reference to css->refcnt. In general, this refcnt is expected to goes down
- * to zero, soon.
- *
- * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
- */
- static DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
- static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
- {
- if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
- wake_up_all(&cgroup_rmdir_waitq);
- }
- void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
- {
- css_get(css);
- }
- void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
- {
- cgroup_wakeup_rmdir_waiter(css->cgroup);
- css_put(css);
- }
- /* Link structure for associating css_set objects with cgroups */
- struct cg_cgroup_link {
- /*
- * List running through cg_cgroup_links associated with a
- * cgroup, anchored on cgroup->css_sets
- */
- struct list_head cgrp_link_list;
- struct cgroup *cgrp;
- /*
- * List running through cg_cgroup_links pointing at a
- * single css_set object, anchored on css_set->cg_links
- */
- struct list_head cg_link_list;
- struct css_set *cg;
- };
- /* The default css_set - used by init and its children prior to any
- * hierarchies being mounted. It contains a pointer to the root state
- * for each subsystem. Also used to anchor the list of css_sets. Not
- * reference-counted, to improve performance when child cgroups
- * haven't been created.
- */
- static struct css_set init_css_set;
- static struct cg_cgroup_link init_css_set_link;
- static int cgroup_init_idr(struct cgroup_subsys *ss,
- struct cgroup_subsys_state *css);
- /* css_set_lock protects the list of css_set objects, and the
- * chain of tasks off each css_set. Nests outside task->alloc_lock
- * due to cgroup_iter_start() */
- static DEFINE_RWLOCK(css_set_lock);
- static int css_set_count;
- /*
- * hash table for cgroup groups. This improves the performance to find
- * an existing css_set. This hash doesn't (currently) take into
- * account cgroups in empty hierarchies.
- */
- #define CSS_SET_HASH_BITS 7
- static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
- static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
- {
- int i;
- unsigned long key = 0UL;
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
- key += (unsigned long)css[i];
- key = (key >> 16) ^ key;
- return key;
- }
- /* We don't maintain the lists running through each css_set to its
- * task until after the first call to cgroup_iter_start(). This
- * reduces the fork()/exit() overhead for people who have cgroups
- * compiled into their kernel but not actually in use */
- static int use_task_css_set_links __read_mostly;
- /*
- * refcounted get/put for css_set objects
- */
- static inline void get_css_set(struct css_set *cg)
- {
- atomic_inc(&cg->refcount);
- }
- static void put_css_set(struct css_set *cg)
- {
- struct cg_cgroup_link *link;
- struct cg_cgroup_link *saved_link;
- /*
- * Ensure that the refcount doesn't hit zero while any readers
- * can see it. Similar to atomic_dec_and_lock(), but for an
- * rwlock
- */
- if (atomic_add_unless(&cg->refcount, -1, 1))
- return;
- write_lock(&css_set_lock);
- if (!atomic_dec_and_test(&cg->refcount)) {
- write_unlock(&css_set_lock);
- return;
- }
- hash_del(&cg->hlist);
- css_set_count--;
- list_for_each_entry_safe(link, saved_link, &cg->cg_links,
- cg_link_list) {
- struct cgroup *cgrp = link->cgrp;
- list_del(&link->cg_link_list);
- list_del(&link->cgrp_link_list);
- /*
- * We may not be holding cgroup_mutex, and if cgrp->count is
- * dropped to 0 the cgroup can be destroyed at any time, hence
- * rcu_read_lock is used to keep it alive.
- */
- rcu_read_lock();
- if (atomic_dec_and_test(&cgrp->count)) {
- check_for_release(cgrp);
- cgroup_wakeup_rmdir_waiter(cgrp);
- }
- rcu_read_unlock();
- kfree(link);
- }
- write_unlock(&css_set_lock);
- kfree_rcu(cg, rcu_head);
- }
- /*
- * compare_css_sets - helper function for find_existing_css_set().
- * @cg: candidate css_set being tested
- * @old_cg: existing css_set for a task
- * @new_cgrp: cgroup that's being entered by the task
- * @template: desired set of css pointers in css_set (pre-calculated)
- *
- * Returns true if "cg" matches "old_cg" except for the hierarchy
- * which "new_cgrp" belongs to, for which it should match "new_cgrp".
- */
- static bool compare_css_sets(struct css_set *cg,
- struct css_set *old_cg,
- struct cgroup *new_cgrp,
- struct cgroup_subsys_state *template[])
- {
- struct list_head *l1, *l2;
- if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
- /* Not all subsystems matched */
- return false;
- }
- /*
- * Compare cgroup pointers in order to distinguish between
- * different cgroups in heirarchies with no subsystems. We
- * could get by with just this check alone (and skip the
- * memcmp above) but on most setups the memcmp check will
- * avoid the need for this more expensive check on almost all
- * candidates.
- */
- l1 = &cg->cg_links;
- l2 = &old_cg->cg_links;
- while (1) {
- struct cg_cgroup_link *cgl1, *cgl2;
- struct cgroup *cg1, *cg2;
- l1 = l1->next;
- l2 = l2->next;
- /* See if we reached the end - both lists are equal length. */
- if (l1 == &cg->cg_links) {
- BUG_ON(l2 != &old_cg->cg_links);
- break;
- } else {
- BUG_ON(l2 == &old_cg->cg_links);
- }
- /* Locate the cgroups associated with these links. */
- cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
- cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
- cg1 = cgl1->cgrp;
- cg2 = cgl2->cgrp;
- /* Hierarchies should be linked in the same order. */
- BUG_ON(cg1->root != cg2->root);
- /*
- * If this hierarchy is the hierarchy of the cgroup
- * that's changing, then we need to check that this
- * css_set points to the new cgroup; if it's any other
- * hierarchy, then this css_set should point to the
- * same cgroup as the old css_set.
- */
- if (cg1->root == new_cgrp->root) {
- if (cg1 != new_cgrp)
- return false;
- } else {
- if (cg1 != cg2)
- return false;
- }
- }
- return true;
- }
- /*
- * find_existing_css_set() is a helper for
- * find_css_set(), and checks to see whether an existing
- * css_set is suitable.
- *
- * oldcg: the cgroup group that we're using before the cgroup
- * transition
- *
- * cgrp: the cgroup that we're moving into
- *
- * template: location in which to build the desired set of subsystem
- * state objects for the new cgroup group
- */
- static struct css_set *find_existing_css_set(
- struct css_set *oldcg,
- struct cgroup *cgrp,
- struct cgroup_subsys_state *template[])
- {
- int i;
- struct cgroupfs_root *root = cgrp->root;
- struct hlist_node *node;
- struct css_set *cg;
- unsigned long key;
- /*
- * Build the set of subsystem state objects that we want to see in the
- * new css_set. while subsystems can change globally, the entries here
- * won't change, so no need for locking.
- */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- if (root->subsys_mask & (1UL << i)) {
- /* Subsystem is in this hierarchy. So we want
- * the subsystem state from the new
- * cgroup */
- template[i] = cgrp->subsys[i];
- } else {
- /* Subsystem is not in this hierarchy, so we
- * don't want to change the subsystem state */
- template[i] = oldcg->subsys[i];
- }
- }
- key = css_set_hash(template);
- hash_for_each_possible(css_set_table, cg, node, hlist, key) {
- if (!compare_css_sets(cg, oldcg, cgrp, template))
- continue;
- /* This css_set matches what we need */
- return cg;
- }
- /* No existing cgroup group matched */
- return NULL;
- }
- static void free_cg_links(struct list_head *tmp)
- {
- struct cg_cgroup_link *link;
- struct cg_cgroup_link *saved_link;
- list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
- list_del(&link->cgrp_link_list);
- kfree(link);
- }
- }
- /*
- * allocate_cg_links() allocates "count" cg_cgroup_link structures
- * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
- * success or a negative error
- */
- static int allocate_cg_links(int count, struct list_head *tmp)
- {
- struct cg_cgroup_link *link;
- int i;
- INIT_LIST_HEAD(tmp);
- for (i = 0; i < count; i++) {
- link = kmalloc(sizeof(*link), GFP_KERNEL);
- if (!link) {
- free_cg_links(tmp);
- return -ENOMEM;
- }
- list_add(&link->cgrp_link_list, tmp);
- }
- return 0;
- }
- /**
- * link_css_set - a helper function to link a css_set to a cgroup
- * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
- * @cg: the css_set to be linked
- * @cgrp: the destination cgroup
- */
- static void link_css_set(struct list_head *tmp_cg_links,
- struct css_set *cg, struct cgroup *cgrp)
- {
- struct cg_cgroup_link *link;
- BUG_ON(list_empty(tmp_cg_links));
- link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
- cgrp_link_list);
- link->cg = cg;
- link->cgrp = cgrp;
- atomic_inc(&cgrp->count);
- list_move(&link->cgrp_link_list, &cgrp->css_sets);
- /*
- * Always add links to the tail of the list so that the list
- * is sorted by order of hierarchy creation
- */
- list_add_tail(&link->cg_link_list, &cg->cg_links);
- }
- /*
- * find_css_set() takes an existing cgroup group and a
- * cgroup object, and returns a css_set object that's
- * equivalent to the old group, but with the given cgroup
- * substituted into the appropriate hierarchy. Must be called with
- * cgroup_mutex held
- */
- static struct css_set *find_css_set(
- struct css_set *oldcg, struct cgroup *cgrp)
- {
- struct css_set *res;
- struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
- struct list_head tmp_cg_links;
- struct cg_cgroup_link *link;
- unsigned long key;
- /* First see if we already have a cgroup group that matches
- * the desired set */
- read_lock(&css_set_lock);
- res = find_existing_css_set(oldcg, cgrp, template);
- if (res)
- get_css_set(res);
- read_unlock(&css_set_lock);
- if (res)
- return res;
- res = kmalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- return NULL;
- /* Allocate all the cg_cgroup_link objects that we'll need */
- if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
- kfree(res);
- return NULL;
- }
- atomic_set(&res->refcount, 1);
- INIT_LIST_HEAD(&res->cg_links);
- INIT_LIST_HEAD(&res->tasks);
- INIT_HLIST_NODE(&res->hlist);
- /* Copy the set of subsystem state objects generated in
- * find_existing_css_set() */
- memcpy(res->subsys, template, sizeof(res->subsys));
- write_lock(&css_set_lock);
- /* Add reference counts and links from the new css_set. */
- list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
- struct cgroup *c = link->cgrp;
- if (c->root == cgrp->root)
- c = cgrp;
- link_css_set(&tmp_cg_links, res, c);
- }
- BUG_ON(!list_empty(&tmp_cg_links));
- css_set_count++;
- /* Add this cgroup group to the hash table */
- key = css_set_hash(res->subsys);
- hash_add(css_set_table, &res->hlist, key);
- write_unlock(&css_set_lock);
- return res;
- }
- /*
- * Return the cgroup for "task" from the given hierarchy. Must be
- * called with cgroup_mutex held.
- */
- static struct cgroup *task_cgroup_from_root(struct task_struct *task,
- struct cgroupfs_root *root)
- {
- struct css_set *css;
- struct cgroup *res = NULL;
- BUG_ON(!mutex_is_locked(&cgroup_mutex));
- read_lock(&css_set_lock);
- /*
- * No need to lock the task - since we hold cgroup_mutex the
- * task can't change groups, so the only thing that can happen
- * is that it exits and its css is set back to init_css_set.
- */
- css = task->cgroups;
- if (css == &init_css_set) {
- res = &root->top_cgroup;
- } else {
- struct cg_cgroup_link *link;
- list_for_each_entry(link, &css->cg_links, cg_link_list) {
- struct cgroup *c = link->cgrp;
- if (c->root == root) {
- res = c;
- break;
- }
- }
- }
- read_unlock(&css_set_lock);
- BUG_ON(!res);
- return res;
- }
- /*
- * There is one global cgroup mutex. We also require taking
- * task_lock() when dereferencing a task's cgroup subsys pointers.
- * See "The task_lock() exception", at the end of this comment.
- *
- * A task must hold cgroup_mutex to modify cgroups.
- *
- * Any task can increment and decrement the count field without lock.
- * So in general, code holding cgroup_mutex can't rely on the count
- * field not changing. However, if the count goes to zero, then only
- * cgroup_attach_task() can increment it again. Because a count of zero
- * means that no tasks are currently attached, therefore there is no
- * way a task attached to that cgroup can fork (the other way to
- * increment the count). So code holding cgroup_mutex can safely
- * assume that if the count is zero, it will stay zero. Similarly, if
- * a task holds cgroup_mutex on a cgroup with zero count, it
- * knows that the cgroup won't be removed, as cgroup_rmdir()
- * needs that mutex.
- *
- * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
- * (usually) take cgroup_mutex. These are the two most performance
- * critical pieces of code here. The exception occurs on cgroup_exit(),
- * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
- * is taken, and if the cgroup count is zero, a usermode call made
- * to the release agent with the name of the cgroup (path relative to
- * the root of cgroup file system) as the argument.
- *
- * A cgroup can only be deleted if both its 'count' of using tasks
- * is zero, and its list of 'children' cgroups is empty. Since all
- * tasks in the system use _some_ cgroup, and since there is always at
- * least one task in the system (init, pid == 1), therefore, top_cgroup
- * always has either children cgroups and/or using tasks. So we don't
- * need a special hack to ensure that top_cgroup cannot be deleted.
- *
- * The task_lock() exception
- *
- * The need for this exception arises from the action of
- * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
- * another. It does so using cgroup_mutex, however there are
- * several performance critical places that need to reference
- * task->cgroups without the expense of grabbing a system global
- * mutex. Therefore except as noted below, when dereferencing or, as
- * in cgroup_attach_task(), modifying a task's cgroups pointer we use
- * task_lock(), which acts on a spinlock (task->alloc_lock) already in
- * the task_struct routinely used for such matters.
- *
- * P.S. One more locking exception. RCU is used to guard the
- * update of a tasks cgroup pointer by cgroup_attach_task()
- */
- /**
- * cgroup_lock - lock out any changes to cgroup structures
- *
- */
- void cgroup_lock(void)
- {
- mutex_lock(&cgroup_mutex);
- }
- EXPORT_SYMBOL_GPL(cgroup_lock);
- /**
- * cgroup_unlock - release lock on cgroup changes
- *
- * Undo the lock taken in a previous cgroup_lock() call.
- */
- void cgroup_unlock(void)
- {
- mutex_unlock(&cgroup_mutex);
- }
- EXPORT_SYMBOL_GPL(cgroup_unlock);
- /*
- * A couple of forward declarations required, due to cyclic reference loop:
- * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
- * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
- * -> cgroup_mkdir.
- */
- static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
- static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
- static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
- static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
- unsigned long subsys_mask);
- static const struct inode_operations cgroup_dir_inode_operations;
- static const struct file_operations proc_cgroupstats_operations;
- static struct backing_dev_info cgroup_backing_dev_info = {
- .name = "cgroup",
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
- };
- static int alloc_css_id(struct cgroup_subsys *ss,
- struct cgroup *parent, struct cgroup *child);
- static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
- {
- struct inode *inode = new_inode(sb);
- if (inode) {
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
- }
- return inode;
- }
- /*
- * Call subsys's pre_destroy handler.
- * This is called before css refcnt check.
- */
- static int cgroup_call_pre_destroy(struct cgroup *cgrp)
- {
- struct cgroup_subsys *ss;
- int ret = 0;
- for_each_subsys(cgrp->root, ss) {
- if (!ss->pre_destroy)
- continue;
- ret = ss->pre_destroy(cgrp);
- if (ret) {
- /* ->pre_destroy() failure is being deprecated */
- WARN_ON_ONCE(!ss->__DEPRECATED_clear_css_refs);
- break;
- }
- }
- return ret;
- }
- static void cgroup_diput(struct dentry *dentry, struct inode *inode)
- {
- /* is dentry a directory ? if so, kfree() associated cgroup */
- if (S_ISDIR(inode->i_mode)) {
- struct cgroup *cgrp = dentry->d_fsdata;
- struct cgroup_subsys *ss;
- BUG_ON(!(cgroup_is_removed(cgrp)));
- /* It's possible for external users to be holding css
- * reference counts on a cgroup; css_put() needs to
- * be able to access the cgroup after decrementing
- * the reference count in order to know if it needs to
- * queue the cgroup to be handled by the release
- * agent */
- synchronize_rcu();
- mutex_lock(&cgroup_mutex);
- /*
- * Release the subsystem state objects.
- */
- for_each_subsys(cgrp->root, ss)
- ss->destroy(cgrp);
- cgrp->root->number_of_cgroups--;
- mutex_unlock(&cgroup_mutex);
- /*
- * Drop the active superblock reference that we took when we
- * created the cgroup
- */
- deactivate_super(cgrp->root->sb);
- /*
- * if we're getting rid of the cgroup, refcount should ensure
- * that there are no pidlists left.
- */
- BUG_ON(!list_empty(&cgrp->pidlists));
- simple_xattrs_free(&cgrp->xattrs);
- kfree_rcu(cgrp, rcu_head);
- } else {
- struct cfent *cfe = __d_cfe(dentry);
- struct cgroup *cgrp = dentry->d_parent->d_fsdata;
- struct cftype *cft = cfe->type;
- WARN_ONCE(!list_empty(&cfe->node) &&
- cgrp != &cgrp->root->top_cgroup,
- "cfe still linked for %s\n", cfe->type->name);
- kfree(cfe);
- simple_xattrs_free(&cft->xattrs);
- }
- iput(inode);
- }
- static int cgroup_delete(const struct dentry *d)
- {
- return 1;
- }
- static void remove_dir(struct dentry *d)
- {
- struct dentry *parent = dget(d->d_parent);
- d_delete(d);
- simple_rmdir(parent->d_inode, d);
- dput(parent);
- }
- static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
- {
- struct cfent *cfe;
- lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
- lockdep_assert_held(&cgroup_mutex);
- list_for_each_entry(cfe, &cgrp->files, node) {
- struct dentry *d = cfe->dentry;
- if (cft && cfe->type != cft)
- continue;
- dget(d);
- d_delete(d);
- simple_unlink(cgrp->dentry->d_inode, d);
- list_del_init(&cfe->node);
- dput(d);
- return 0;
- }
- return -ENOENT;
- }
- /**
- * cgroup_clear_directory - selective removal of base and subsystem files
- * @dir: directory containing the files
- * @base_files: true if the base files should be removed
- * @subsys_mask: mask of the subsystem ids whose files should be removed
- */
- static void cgroup_clear_directory(struct dentry *dir, bool base_files,
- unsigned long subsys_mask)
- {
- struct cgroup *cgrp = __d_cgrp(dir);
- struct cgroup_subsys *ss;
- for_each_subsys(cgrp->root, ss) {
- struct cftype_set *set;
- if (!test_bit(ss->subsys_id, &subsys_mask))
- continue;
- list_for_each_entry(set, &ss->cftsets, node)
- cgroup_rm_file(cgrp, set->cfts);
- }
- if (base_files) {
- while (!list_empty(&cgrp->files))
- cgroup_rm_file(cgrp, NULL);
- }
- }
- /*
- * NOTE : the dentry must have been dget()'ed
- */
- static void cgroup_d_remove_dir(struct dentry *dentry)
- {
- struct dentry *parent;
- struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
- cgroup_clear_directory(dentry, true, root->subsys_mask);
- parent = dentry->d_parent;
- spin_lock(&parent->d_lock);
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- list_del_init(&dentry->d_child);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&parent->d_lock);
- remove_dir(dentry);
- }
- /*
- * Call with cgroup_mutex held. Drops reference counts on modules, including
- * any duplicate ones that parse_cgroupfs_options took. If this function
- * returns an error, no reference counts are touched.
- */
- static int rebind_subsystems(struct cgroupfs_root *root,
- unsigned long final_subsys_mask)
- {
- unsigned long added_mask, removed_mask;
- struct cgroup *cgrp = &root->top_cgroup;
- int i;
- BUG_ON(!mutex_is_locked(&cgroup_mutex));
- BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
- removed_mask = root->actual_subsys_mask & ~final_subsys_mask;
- added_mask = final_subsys_mask & ~root->actual_subsys_mask;
- /* Check that any added subsystems are currently free */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- unsigned long bit = 1UL << i;
- struct cgroup_subsys *ss = subsys[i];
- if (!(bit & added_mask))
- continue;
- /*
- * Nobody should tell us to do a subsys that doesn't exist:
- * parse_cgroupfs_options should catch that case and refcounts
- * ensure that subsystems won't disappear once selected.
- */
- BUG_ON(ss == NULL);
- if (ss->root != &rootnode) {
- /* Subsystem isn't free */
- return -EBUSY;
- }
- }
- /* Currently we don't handle adding/removing subsystems when
- * any child cgroups exist. This is theoretically supportable
- * but involves complex error handling, so it's being left until
- * later */
- if (root->number_of_cgroups > 1)
- return -EBUSY;
- /* Process each subsystem */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- unsigned long bit = 1UL << i;
- if (bit & added_mask) {
- /* We're binding this subsystem to this hierarchy */
- BUG_ON(ss == NULL);
- BUG_ON(cgrp->subsys[i]);
- BUG_ON(!dummytop->subsys[i]);
- BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
- cgrp->subsys[i] = dummytop->subsys[i];
- cgrp->subsys[i]->cgroup = cgrp;
- list_move(&ss->sibling, &root->subsys_list);
- ss->root = root;
- if (ss->bind)
- ss->bind(cgrp);
- /* refcount was already taken, and we're keeping it */
- } else if (bit & removed_mask) {
- /* We're removing this subsystem */
- BUG_ON(ss == NULL);
- BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
- BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
- if (ss->bind)
- ss->bind(dummytop);
- dummytop->subsys[i]->cgroup = dummytop;
- cgrp->subsys[i] = NULL;
- subsys[i]->root = &rootnode;
- list_move(&ss->sibling, &rootnode.subsys_list);
- /* subsystem is now free - drop reference on module */
- module_put(ss->module);
- } else if (bit & final_subsys_mask) {
- /* Subsystem state should already exist */
- BUG_ON(ss == NULL);
- BUG_ON(!cgrp->subsys[i]);
- /*
- * a refcount was taken, but we already had one, so
- * drop the extra reference.
- */
- module_put(ss->module);
- #ifdef CONFIG_MODULE_UNLOAD
- BUG_ON(ss->module && !module_refcount(ss->module));
- #endif
- } else {
- /* Subsystem state shouldn't exist */
- BUG_ON(cgrp->subsys[i]);
- }
- }
- root->subsys_mask = root->actual_subsys_mask = final_subsys_mask;
- synchronize_rcu();
- return 0;
- }
- static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
- {
- struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
- struct cgroup_subsys *ss;
- mutex_lock(&cgroup_root_mutex);
- for_each_subsys(root, ss)
- seq_show_option(seq, ss->name, NULL);
- if (test_bit(ROOT_NOPREFIX, &root->flags))
- seq_puts(seq, ",noprefix");
- if (test_bit(ROOT_XATTR, &root->flags))
- seq_puts(seq, ",xattr");
- if (strlen(root->release_agent_path))
- seq_show_option(seq, "release_agent",
- root->release_agent_path);
- if (clone_children(&root->top_cgroup))
- seq_puts(seq, ",clone_children");
- if (strlen(root->name))
- seq_show_option(seq, "name", root->name);
- mutex_unlock(&cgroup_root_mutex);
- return 0;
- }
- struct cgroup_sb_opts {
- unsigned long subsys_mask;
- unsigned long flags;
- char *release_agent;
- bool clone_children;
- char *name;
- /* User explicitly requested empty subsystem */
- bool none;
- struct cgroupfs_root *new_root;
- };
- /*
- * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
- * with cgroup_mutex held to protect the subsys[] array. This function takes
- * refcounts on subsystems to be used, unless it returns error, in which case
- * no refcounts are taken.
- */
- static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
- {
- char *token, *o = data;
- bool all_ss = false, one_ss = false;
- unsigned long mask = (unsigned long)-1;
- int i;
- bool module_pin_failed = false;
- BUG_ON(!mutex_is_locked(&cgroup_mutex));
- #ifdef CONFIG_CPUSETS
- mask = ~(1UL << cpuset_subsys_id);
- #endif
- memset(opts, 0, sizeof(*opts));
- while ((token = strsep(&o, ",")) != NULL) {
- if (!*token)
- return -EINVAL;
- if (!strcmp(token, "none")) {
- /* Explicitly have no subsystems */
- opts->none = true;
- continue;
- }
- if (!strcmp(token, "all")) {
- /* Mutually exclusive option 'all' + subsystem name */
- if (one_ss)
- return -EINVAL;
- all_ss = true;
- continue;
- }
- if (!strcmp(token, "noprefix")) {
- set_bit(ROOT_NOPREFIX, &opts->flags);
- continue;
- }
- if (!strcmp(token, "clone_children")) {
- opts->clone_children = true;
- continue;
- }
- if (!strcmp(token, "xattr")) {
- set_bit(ROOT_XATTR, &opts->flags);
- continue;
- }
- if (!strncmp(token, "release_agent=", 14)) {
- /* Specifying two release agents is forbidden */
- if (opts->release_agent)
- return -EINVAL;
- opts->release_agent =
- kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
- if (!opts->release_agent)
- return -ENOMEM;
- continue;
- }
- if (!strncmp(token, "name=", 5)) {
- const char *name = token + 5;
- /* Can't specify an empty name */
- if (!strlen(name))
- return -EINVAL;
- /* Must match [\w.-]+ */
- for (i = 0; i < strlen(name); i++) {
- char c = name[i];
- if (isalnum(c))
- continue;
- if ((c == '.') || (c == '-') || (c == '_'))
- continue;
- return -EINVAL;
- }
- /* Specifying two names is forbidden */
- if (opts->name)
- return -EINVAL;
- opts->name = kstrndup(name,
- MAX_CGROUP_ROOT_NAMELEN - 1,
- GFP_KERNEL);
- if (!opts->name)
- return -ENOMEM;
- continue;
- }
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss == NULL)
- continue;
- if (strcmp(token, ss->name))
- continue;
- if (ss->disabled)
- continue;
- /* Mutually exclusive option 'all' + subsystem name */
- if (all_ss)
- return -EINVAL;
- set_bit(i, &opts->subsys_mask);
- one_ss = true;
- break;
- }
- if (i == CGROUP_SUBSYS_COUNT)
- return -ENOENT;
- }
- /*
- * If the 'all' option was specified select all the subsystems,
- * otherwise if 'none', 'name=' and a subsystem name options
- * were not specified, let's default to 'all'
- */
- if (all_ss || (!one_ss && !opts->none && !opts->name)) {
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss == NULL)
- continue;
- if (ss->disabled)
- continue;
- set_bit(i, &opts->subsys_mask);
- }
- }
- /* Consistency checks */
- /*
- * Option noprefix was introduced just for backward compatibility
- * with the old cpuset, so we allow noprefix only if mounting just
- * the cpuset subsystem.
- */
- if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
- (opts->subsys_mask & mask))
- return -EINVAL;
- /* Can't specify "none" and some subsystems */
- if (opts->subsys_mask && opts->none)
- return -EINVAL;
- /*
- * We either have to specify by name or by subsystems. (So all
- * empty hierarchies must have a name).
- */
- if (!opts->subsys_mask && !opts->name)
- return -EINVAL;
- /*
- * Grab references on all the modules we'll need, so the subsystems
- * don't dance around before rebind_subsystems attaches them. This may
- * take duplicate reference counts on a subsystem that's already used,
- * but rebind_subsystems handles this case.
- */
- for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
- unsigned long bit = 1UL << i;
- if (!(bit & opts->subsys_mask))
- continue;
- if (!try_module_get(subsys[i]->module)) {
- module_pin_failed = true;
- break;
- }
- }
- if (module_pin_failed) {
- /*
- * oops, one of the modules was going away. this means that we
- * raced with a module_delete call, and to the user this is
- * essentially a "subsystem doesn't exist" case.
- */
- for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
- /* drop refcounts only on the ones we took */
- unsigned long bit = 1UL << i;
- if (!(bit & opts->subsys_mask))
- continue;
- module_put(subsys[i]->module);
- }
- return -ENOENT;
- }
- return 0;
- }
- static void drop_parsed_module_refcounts(unsigned long subsys_mask)
- {
- int i;
- for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
- unsigned long bit = 1UL << i;
- if (!(bit & subsys_mask))
- continue;
- module_put(subsys[i]->module);
- }
- }
- static int cgroup_remount(struct super_block *sb, int *flags, char *data)
- {
- int ret = 0;
- struct cgroupfs_root *root = sb->s_fs_info;
- struct cgroup *cgrp = &root->top_cgroup;
- struct cgroup_sb_opts opts;
- unsigned long added_mask, removed_mask;
- mutex_lock(&cgrp->dentry->d_inode->i_mutex);
- mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
- /* See what subsystems are wanted */
- ret = parse_cgroupfs_options(data, &opts);
- if (ret)
- goto out_unlock;
- /* See feature-removal-schedule.txt */
- if (opts.subsys_mask != root->actual_subsys_mask || opts.release_agent)
- pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
- task_tgid_nr(current), current->comm);
- added_mask = opts.subsys_mask & ~root->subsys_mask;
- removed_mask = root->subsys_mask & ~opts.subsys_mask;
- /* Don't allow flags or name to change at remount */
- if (opts.flags != root->flags ||
- (opts.name && strcmp(opts.name, root->name))) {
- ret = -EINVAL;
- drop_parsed_module_refcounts(opts.subsys_mask);
- goto out_unlock;
- }
- ret = rebind_subsystems(root, opts.subsys_mask);
- if (ret) {
- drop_parsed_module_refcounts(opts.subsys_mask);
- goto out_unlock;
- }
- /* clear out any existing files and repopulate subsystem files */
- cgroup_clear_directory(cgrp->dentry, false, removed_mask);
- /* re-populate subsystem files */
- cgroup_populate_dir(cgrp, false, added_mask);
- if (opts.release_agent)
- strcpy(root->release_agent_path, opts.release_agent);
- out_unlock:
- kfree(opts.release_agent);
- kfree(opts.name);
- mutex_unlock(&cgroup_root_mutex);
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
- return ret;
- }
- static const struct super_operations cgroup_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
- .show_options = cgroup_show_options,
- .remount_fs = cgroup_remount,
- };
- static void init_cgroup_housekeeping(struct cgroup *cgrp)
- {
- INIT_LIST_HEAD(&cgrp->sibling);
- INIT_LIST_HEAD(&cgrp->children);
- INIT_LIST_HEAD(&cgrp->files);
- INIT_LIST_HEAD(&cgrp->css_sets);
- INIT_LIST_HEAD(&cgrp->release_list);
- INIT_LIST_HEAD(&cgrp->pidlists);
- mutex_init(&cgrp->pidlist_mutex);
- INIT_LIST_HEAD(&cgrp->event_list);
- spin_lock_init(&cgrp->event_list_lock);
- simple_xattrs_init(&cgrp->xattrs);
- }
- static void init_cgroup_root(struct cgroupfs_root *root)
- {
- struct cgroup *cgrp = &root->top_cgroup;
- INIT_LIST_HEAD(&root->subsys_list);
- INIT_LIST_HEAD(&root->root_list);
- INIT_LIST_HEAD(&root->allcg_list);
- root->number_of_cgroups = 1;
- cgrp->root = root;
- cgrp->top_cgroup = cgrp;
- list_add_tail(&cgrp->allcg_node, &root->allcg_list);
- init_cgroup_housekeeping(cgrp);
- }
- static bool init_root_id(struct cgroupfs_root *root)
- {
- int ret = 0;
- do {
- if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
- return false;
- spin_lock(&hierarchy_id_lock);
- /* Try to allocate the next unused ID */
- ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
- &root->hierarchy_id);
- if (ret == -ENOSPC)
- /* Try again starting from 0 */
- ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
- if (!ret) {
- next_hierarchy_id = root->hierarchy_id + 1;
- } else if (ret != -EAGAIN) {
- /* Can only get here if the 31-bit IDR is full ... */
- BUG_ON(ret);
- }
- spin_unlock(&hierarchy_id_lock);
- } while (ret);
- return true;
- }
- static int cgroup_test_super(struct super_block *sb, void *data)
- {
- struct cgroup_sb_opts *opts = data;
- struct cgroupfs_root *root = sb->s_fs_info;
- /* If we asked for a name then it must match */
- if (opts->name && strcmp(opts->name, root->name))
- return 0;
- /*
- * If we asked for subsystems (or explicitly for no
- * subsystems) then they must match
- */
- if ((opts->subsys_mask || opts->none)
- && (opts->subsys_mask != root->subsys_mask))
- return 0;
- return 1;
- }
- static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
- {
- struct cgroupfs_root *root;
- if (!opts->subsys_mask && !opts->none)
- return NULL;
- root = kzalloc(sizeof(*root), GFP_KERNEL);
- if (!root)
- return ERR_PTR(-ENOMEM);
- if (!init_root_id(root)) {
- kfree(root);
- return ERR_PTR(-ENOMEM);
- }
- init_cgroup_root(root);
- root->subsys_mask = opts->subsys_mask;
- root->flags = opts->flags;
- if (opts->release_agent)
- strcpy(root->release_agent_path, opts->release_agent);
- if (opts->name)
- strcpy(root->name, opts->name);
- if (opts->clone_children)
- set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
- return root;
- }
- static void cgroup_drop_root(struct cgroupfs_root *root)
- {
- if (!root)
- return;
- BUG_ON(!root->hierarchy_id);
- spin_lock(&hierarchy_id_lock);
- ida_remove(&hierarchy_ida, root->hierarchy_id);
- spin_unlock(&hierarchy_id_lock);
- kfree(root);
- }
- static int cgroup_set_super(struct super_block *sb, void *data)
- {
- int ret;
- struct cgroup_sb_opts *opts = data;
- /* If we don't have a new root, we can't set up a new sb */
- if (!opts->new_root)
- return -EINVAL;
- BUG_ON(!opts->subsys_mask && !opts->none);
- ret = set_anon_super(sb, NULL);
- if (ret)
- return ret;
- sb->s_fs_info = opts->new_root;
- opts->new_root->sb = sb;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = CGROUP_SUPER_MAGIC;
- sb->s_op = &cgroup_ops;
- return 0;
- }
- static int cgroup_get_rootdir(struct super_block *sb)
- {
- static const struct dentry_operations cgroup_dops = {
- .d_iput = cgroup_diput,
- .d_delete = cgroup_delete,
- };
- struct inode *inode =
- cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
- if (!inode)
- return -ENOMEM;
- inode->i_fop = &simple_dir_operations;
- inode->i_op = &cgroup_dir_inode_operations;
- /* directories start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
- sb->s_root = d_make_root(inode);
- if (!sb->s_root)
- return -ENOMEM;
- /* for everything else we want ->d_op set */
- sb->s_d_op = &cgroup_dops;
- return 0;
- }
- static struct dentry *cgroup_mount(struct file_system_type *fs_type,
- int flags, const char *unused_dev_name,
- void *data)
- {
- struct cgroup_sb_opts opts;
- struct cgroupfs_root *root;
- int ret = 0;
- struct super_block *sb;
- struct cgroupfs_root *new_root;
- struct inode *inode;
- /* First find the desired set of subsystems */
- mutex_lock(&cgroup_mutex);
- ret = parse_cgroupfs_options(data, &opts);
- mutex_unlock(&cgroup_mutex);
- if (ret)
- goto out_err;
- /*
- * Allocate a new cgroup root. We may not need it if we're
- * reusing an existing hierarchy.
- */
- new_root = cgroup_root_from_opts(&opts);
- if (IS_ERR(new_root)) {
- ret = PTR_ERR(new_root);
- goto drop_modules;
- }
- opts.new_root = new_root;
- /* Locate an existing or new sb for this hierarchy */
- sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
- if (IS_ERR(sb)) {
- ret = PTR_ERR(sb);
- cgroup_drop_root(opts.new_root);
- goto drop_modules;
- }
- root = sb->s_fs_info;
- BUG_ON(!root);
- if (root == opts.new_root) {
- /* We used the new root structure, so this is a new hierarchy */
- struct list_head tmp_cg_links;
- struct cgroup *root_cgrp = &root->top_cgroup;
- struct cgroupfs_root *existing_root;
- const struct cred *cred;
- int i;
- struct hlist_node *node;
- struct css_set *cg;
- BUG_ON(sb->s_root != NULL);
- ret = cgroup_get_rootdir(sb);
- if (ret)
- goto drop_new_super;
- inode = sb->s_root->d_inode;
- mutex_lock(&inode->i_mutex);
- mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
- /* Check for name clashes with existing mounts */
- ret = -EBUSY;
- if (strlen(root->name))
- for_each_active_root(existing_root)
- if (!strcmp(existing_root->name, root->name))
- goto unlock_drop;
- /*
- * We're accessing css_set_count without locking
- * css_set_lock here, but that's OK - it can only be
- * increased by someone holding cgroup_lock, and
- * that's us. The worst that can happen is that we
- * have some link structures left over
- */
- ret = allocate_cg_links(css_set_count, &tmp_cg_links);
- if (ret)
- goto unlock_drop;
- ret = rebind_subsystems(root, root->subsys_mask);
- if (ret == -EBUSY) {
- free_cg_links(&tmp_cg_links);
- goto unlock_drop;
- }
- /*
- * There must be no failure case after here, since rebinding
- * takes care of subsystems' refcounts, which are explicitly
- * dropped in the failure exit path.
- */
- /* EBUSY should be the only error here */
- BUG_ON(ret);
- list_add(&root->root_list, &roots);
- root_count++;
- sb->s_root->d_fsdata = root_cgrp;
- root->top_cgroup.dentry = sb->s_root;
- /* Link the top cgroup in this hierarchy into all
- * the css_set objects */
- write_lock(&css_set_lock);
- hash_for_each(css_set_table, i, node, cg, hlist)
- link_css_set(&tmp_cg_links, cg, root_cgrp);
- write_unlock(&css_set_lock);
- free_cg_links(&tmp_cg_links);
- BUG_ON(!list_empty(&root_cgrp->sibling));
- BUG_ON(!list_empty(&root_cgrp->children));
- BUG_ON(root->number_of_cgroups != 1);
- cred = override_creds(&init_cred);
- cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
- revert_creds(cred);
- mutex_unlock(&cgroup_root_mutex);
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
- } else {
- /*
- * We re-used an existing hierarchy - the new root (if
- * any) is not needed
- */
- cgroup_drop_root(opts.new_root);
- /* no subsys rebinding, so refcounts don't change */
- drop_parsed_module_refcounts(opts.subsys_mask);
- }
- kfree(opts.release_agent);
- kfree(opts.name);
- return dget(sb->s_root);
- unlock_drop:
- mutex_unlock(&cgroup_root_mutex);
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
- drop_new_super:
- deactivate_locked_super(sb);
- drop_modules:
- drop_parsed_module_refcounts(opts.subsys_mask);
- out_err:
- kfree(opts.release_agent);
- kfree(opts.name);
- return ERR_PTR(ret);
- }
- static void cgroup_kill_sb(struct super_block *sb) {
- struct cgroupfs_root *root = sb->s_fs_info;
- struct cgroup *cgrp = &root->top_cgroup;
- int ret;
- struct cg_cgroup_link *link;
- struct cg_cgroup_link *saved_link;
- BUG_ON(!root);
- BUG_ON(root->number_of_cgroups != 1);
- BUG_ON(!list_empty(&cgrp->children));
- BUG_ON(!list_empty(&cgrp->sibling));
- mutex_lock(&cgroup_mutex);
- mutex_lock(&cgroup_root_mutex);
- /* Rebind all subsystems back to the default hierarchy */
- ret = rebind_subsystems(root, 0);
- /* Shouldn't be able to fail ... */
- BUG_ON(ret);
- /*
- * Release all the links from css_sets to this hierarchy's
- * root cgroup
- */
- write_lock(&css_set_lock);
- list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
- cgrp_link_list) {
- list_del(&link->cg_link_list);
- list_del(&link->cgrp_link_list);
- kfree(link);
- }
- write_unlock(&css_set_lock);
- if (!list_empty(&root->root_list)) {
- list_del(&root->root_list);
- root_count--;
- }
- mutex_unlock(&cgroup_root_mutex);
- mutex_unlock(&cgroup_mutex);
- simple_xattrs_free(&cgrp->xattrs);
- kill_litter_super(sb);
- cgroup_drop_root(root);
- }
- static struct file_system_type cgroup_fs_type = {
- .name = "cgroup",
- .mount = cgroup_mount,
- .kill_sb = cgroup_kill_sb,
- };
- static struct kobject *cgroup_kobj;
- /**
- * cgroup_path - generate the path of a cgroup
- * @cgrp: the cgroup in question
- * @buf: the buffer to write the path into
- * @buflen: the length of the buffer
- *
- * Called with cgroup_mutex held or else with an RCU-protected cgroup
- * reference. Writes path of cgroup into buf. Returns 0 on success,
- * -errno on error.
- */
- int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
- {
- char *start;
- struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
- cgroup_lock_is_held());
- if (!dentry || cgrp == dummytop) {
- /*
- * Inactive subsystems have no dentry for their root
- * cgroup
- */
- strcpy(buf, "/");
- return 0;
- }
- start = buf + buflen;
- *--start = '\0';
- for (;;) {
- int len = dentry->d_name.len;
- if ((start -= len) < buf)
- return -ENAMETOOLONG;
- memcpy(start, dentry->d_name.name, len);
- cgrp = cgrp->parent;
- if (!cgrp)
- break;
- dentry = rcu_dereference_check(cgrp->dentry,
- cgroup_lock_is_held());
- if (!cgrp->parent)
- continue;
- if (--start < buf)
- return -ENAMETOOLONG;
- *start = '/';
- }
- memmove(buf, start, buf + buflen - start);
- return 0;
- }
- EXPORT_SYMBOL_GPL(cgroup_path);
- /*
- * Control Group taskset
- */
- struct task_and_cgroup {
- struct task_struct *task;
- struct cgroup *cgrp;
- struct css_set *cg;
- };
- struct cgroup_taskset {
- struct task_and_cgroup single;
- struct flex_array *tc_array;
- int tc_array_len;
- int idx;
- struct cgroup *cur_cgrp;
- };
- /**
- * cgroup_taskset_first - reset taskset and return the first task
- * @tset: taskset of interest
- *
- * @tset iteration is initialized and the first task is returned.
- */
- struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
- {
- if (tset->tc_array) {
- tset->idx = 0;
- return cgroup_taskset_next(tset);
- } else {
- tset->cur_cgrp = tset->single.cgrp;
- return tset->single.task;
- }
- }
- EXPORT_SYMBOL_GPL(cgroup_taskset_first);
- /**
- * cgroup_taskset_next - iterate to the next task in taskset
- * @tset: taskset of interest
- *
- * Return the next task in @tset. Iteration must have been initialized
- * with cgroup_taskset_first().
- */
- struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
- {
- struct task_and_cgroup *tc;
- if (!tset->tc_array || tset->idx >= tset->tc_array_len)
- return NULL;
- tc = flex_array_get(tset->tc_array, tset->idx++);
- tset->cur_cgrp = tc->cgrp;
- return tc->task;
- }
- EXPORT_SYMBOL_GPL(cgroup_taskset_next);
- /**
- * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
- * @tset: taskset of interest
- *
- * Return the cgroup for the current (last returned) task of @tset. This
- * function must be preceded by either cgroup_taskset_first() or
- * cgroup_taskset_next().
- */
- struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
- {
- return tset->cur_cgrp;
- }
- EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
- /**
- * cgroup_taskset_size - return the number of tasks in taskset
- * @tset: taskset of interest
- */
- int cgroup_taskset_size(struct cgroup_taskset *tset)
- {
- return tset->tc_array ? tset->tc_array_len : 1;
- }
- EXPORT_SYMBOL_GPL(cgroup_taskset_size);
- /*
- * cgroup_task_migrate - move a task from one cgroup to another.
- *
- * 'guarantee' is set if the caller promises that a new css_set for the task
- * will already exist. If not set, this function might sleep, and can fail with
- * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
- */
- static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
- struct task_struct *tsk, struct css_set *newcg)
- {
- struct css_set *oldcg;
- /*
- * We are synchronized through threadgroup_lock() against PF_EXITING
- * setting such that we can't race against cgroup_exit() changing the
- * css_set to init_css_set and dropping the old one.
- */
- WARN_ON_ONCE(tsk->flags & PF_EXITING);
- oldcg = tsk->cgroups;
- task_lock(tsk);
- rcu_assign_pointer(tsk->cgroups, newcg);
- task_unlock(tsk);
- /* Update the css_set linked lists if we're using them */
- write_lock(&css_set_lock);
- if (!list_empty(&tsk->cg_list))
- list_move(&tsk->cg_list, &newcg->tasks);
- write_unlock(&css_set_lock);
- /*
- * We just gained a reference on oldcg by taking it from the task. As
- * trading it for newcg is protected by cgroup_mutex, we're safe to drop
- * it here; it will be freed under RCU.
- */
- set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
- put_css_set(oldcg);
- }
- /**
- * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
- * @cgrp: the cgroup the task is attaching to
- * @tsk: the task to be attached
- *
- * Call with cgroup_mutex and threadgroup locked. May take task_lock of
- * @tsk during call.
- */
- int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
- {
- int retval = 0;
- struct cgroup_subsys *ss, *failed_ss = NULL;
- struct cgroup *oldcgrp;
- struct cgroupfs_root *root = cgrp->root;
- struct cgroup_taskset tset = { };
- struct css_set *newcg;
- struct css_set *cg;
- /* @tsk either already exited or can't exit until the end */
- if (tsk->flags & PF_EXITING)
- return -ESRCH;
- /* Nothing to do if the task is already in that cgroup */
- oldcgrp = task_cgroup_from_root(tsk, root);
- if (cgrp == oldcgrp)
- return 0;
- tset.single.task = tsk;
- tset.single.cgrp = oldcgrp;
- for_each_subsys(root, ss) {
- if (ss->can_attach) {
- retval = ss->can_attach(cgrp, &tset);
- if (retval) {
- /*
- * Remember on which subsystem the can_attach()
- * failed, so that we only call cancel_attach()
- * against the subsystems whose can_attach()
- * succeeded. (See below)
- */
- failed_ss = ss;
- goto out;
- }
- }
- }
- newcg = find_css_set(tsk->cgroups, cgrp);
- if (!newcg) {
- retval = -ENOMEM;
- goto out;
- }
- task_lock(tsk);
- cg = tsk->cgroups;
- get_css_set(cg);
- task_unlock(tsk);
- cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg);
- for_each_subsys(root, ss) {
- if (ss->attach)
- ss->attach(cgrp, &tset);
- }
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
- /* put_css_set will not destroy cg until after an RCU grace period */
- put_css_set(cg);
- /*
- * wake up rmdir() waiter. the rmdir should fail since the cgroup
- * is no longer empty.
- */
- cgroup_wakeup_rmdir_waiter(cgrp);
- out:
- if (retval) {
- for_each_subsys(root, ss) {
- if (ss == failed_ss)
- /*
- * This subsystem was the one that failed the
- * can_attach() check earlier, so we don't need
- * to call cancel_attach() against it or any
- * remaining subsystems.
- */
- break;
- if (ss->cancel_attach)
- ss->cancel_attach(cgrp, &tset);
- }
- }
- return retval;
- }
- /**
- * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
- * @from: attach to all cgroups of a given task
- * @tsk: the task to be attached
- */
- int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
- {
- struct cgroupfs_root *root;
- int retval = 0;
- cgroup_lock();
- for_each_active_root(root) {
- struct cgroup *from_cg = task_cgroup_from_root(from, root);
- retval = cgroup_attach_task(from_cg, tsk);
- if (retval)
- break;
- }
- cgroup_unlock();
- return retval;
- }
- EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
- /**
- * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
- * @cgrp: the cgroup to attach to
- * @leader: the threadgroup leader task_struct of the group to be attached
- *
- * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
- * task_lock of each thread in leader's threadgroup individually in turn.
- */
- static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
- {
- int retval, i, group_size;
- struct cgroup_subsys *ss, *failed_ss = NULL;
- /* guaranteed to be initialized later, but the compiler needs this */
- struct cgroupfs_root *root = cgrp->root;
- /* threadgroup list cursor and array */
- struct task_struct *tsk;
- struct task_and_cgroup *tc;
- struct flex_array *group;
- struct cgroup_taskset tset = { };
- /*
- * step 0: in order to do expensive, possibly blocking operations for
- * every thread, we cannot iterate the thread group list, since it needs
- * rcu or tasklist locked. instead, build an array of all threads in the
- * group - group_rwsem prevents new threads from appearing, and if
- * threads exit, this will just be an over-estimate.
- */
- group_size = get_nr_threads(leader);
- /* flex_array supports very large thread-groups better than kmalloc. */
- group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
- if (!group)
- return -ENOMEM;
- /* pre-allocate to guarantee space while iterating in rcu read-side. */
- retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
- if (retval)
- goto out_free_group_list;
- tsk = leader;
- i = 0;
- /*
- * Prevent freeing of tasks while we take a snapshot. Tasks that are
- * already PF_EXITING could be freed from underneath us unless we
- * take an rcu_read_lock.
- */
- rcu_read_lock();
- do {
- struct task_and_cgroup ent;
- /* @tsk either already exited or can't exit until the end */
- if (tsk->flags & PF_EXITING)
- continue;
- /* as per above, nr_threads may decrease, but not increase. */
- BUG_ON(i >= group_size);
- ent.task = tsk;
- ent.cgrp = task_cgroup_from_root(tsk, root);
- /* nothing to do if this task is already in the cgroup */
- if (ent.cgrp == cgrp)
- continue;
- /*
- * saying GFP_ATOMIC has no effect here because we did prealloc
- * earlier, but it's good form to communicate our expectations.
- */
- retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
- BUG_ON(retval != 0);
- i++;
- } while_each_thread(leader, tsk);
- rcu_read_unlock();
- /* remember the number of threads in the array for later. */
- group_size = i;
- tset.tc_array = group;
- tset.tc_array_len = group_size;
- /* methods shouldn't be called if no task is actually migrating */
- retval = 0;
- if (!group_size)
- goto out_free_group_list;
- /*
- * step 1: check that we can legitimately attach to the cgroup.
- */
- for_each_subsys(root, ss) {
- if (ss->can_attach) {
- retval = ss->can_attach(cgrp, &tset);
- if (retval) {
- failed_ss = ss;
- goto out_cancel_attach;
- }
- }
- }
- /*
- * step 2: make sure css_sets exist for all threads to be migrated.
- * we use find_css_set, which allocates a new one if necessary.
- */
- for (i = 0; i < group_size; i++) {
- tc = flex_array_get(group, i);
- tc->cg = find_css_set(tc->task->cgroups, cgrp);
- if (!tc->cg) {
- retval = -ENOMEM;
- goto out_put_css_set_refs;
- }
- }
- /*
- * step 3: now that we're guaranteed success wrt the css_sets,
- * proceed to move all tasks to the new cgroup. There are no
- * failure cases after here, so this is the commit point.
- */
- for (i = 0; i < group_size; i++) {
- tc = flex_array_get(group, i);
- cgroup_task_migrate(cgrp, tc->cgrp, tc->task, tc->cg);
- }
- /* nothing is sensitive to fork() after this point. */
- /*
- * step 4: do subsystem attach callbacks.
- */
- for_each_subsys(root, ss) {
- if (ss->attach)
- ss->attach(cgrp, &tset);
- }
- /*
- * step 5: success! and cleanup
- */
- cgroup_wakeup_rmdir_waiter(cgrp);
- retval = 0;
- out_put_css_set_refs:
- if (retval) {
- for (i = 0; i < group_size; i++) {
- tc = flex_array_get(group, i);
- if (!tc->cg)
- break;
- put_css_set(tc->cg);
- }
- }
- out_cancel_attach:
- if (retval) {
- for_each_subsys(root, ss) {
- if (ss == failed_ss)
- break;
- if (ss->cancel_attach)
- ss->cancel_attach(cgrp, &tset);
- }
- }
- out_free_group_list:
- flex_array_free(group);
- return retval;
- }
- static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
- {
- struct cgroup_subsys *ss;
- int ret;
- for_each_subsys(cgrp->root, ss) {
- if (ss->allow_attach) {
- ret = ss->allow_attach(cgrp, tset);
- if (ret)
- return ret;
- } else {
- return -EACCES;
- }
- }
- return 0;
- }
- int subsys_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
- {
- const struct cred *cred = current_cred(), *tcred;
- struct task_struct *task;
- if (capable(CAP_SYS_NICE))
- return 0;
- cgroup_taskset_for_each(task, cgrp, tset) {
- tcred = __task_cred(task);
- if (current != task && cred->euid != tcred->uid &&
- cred->euid != tcred->suid)
- return -EACCES;
- }
- return 0;
- }
- /*
- * Find the task_struct of the task to attach by vpid and pass it along to the
- * function to attach either it or all tasks in its threadgroup. Will lock
- * cgroup_mutex and threadgroup; may take task_lock of task.
- */
- static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
- {
- struct task_struct *tsk;
- const struct cred *cred = current_cred(), *tcred;
- int ret;
- if (!cgroup_lock_live_group(cgrp))
- return -ENODEV;
- retry_find_task:
- rcu_read_lock();
- if (pid) {
- tsk = find_task_by_vpid(pid);
- if (!tsk) {
- rcu_read_unlock();
- ret= -ESRCH;
- goto out_unlock_cgroup;
- }
- /*
- * even if we're attaching all tasks in the thread group, we
- * only need to check permissions on one of them.
- */
- tcred = __task_cred(tsk);
- if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
- !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->euid, tcred->suid)) {
- /*
- * if the default permission check fails, give each
- * cgroup a chance to extend the permission check
- */
- struct cgroup_taskset tset = { };
- tset.single.task = tsk;
- tset.single.cgrp = cgrp;
- ret = cgroup_allow_attach(cgrp, &tset);
- if (ret) {
- rcu_read_unlock();
- goto out_unlock_cgroup;
- }
- }
- } else
- tsk = current;
- if (threadgroup)
- tsk = tsk->group_leader;
- /*
- * Workqueue threads may acquire PF_THREAD_BOUND and become
- * trapped in a cpuset, or RT worker may be born in a cgroup
- * with no rt_runtime allocated. Just say no.
- */
- if (tsk == kthreadd_task || (tsk->flags & PF_THREAD_BOUND)) {
- ret = -EINVAL;
- rcu_read_unlock();
- goto out_unlock_cgroup;
- }
- get_task_struct(tsk);
- rcu_read_unlock();
- threadgroup_lock(tsk);
- if (threadgroup) {
- if (!thread_group_leader(tsk)) {
- /*
- * a race with de_thread from another thread's exec()
- * may strip us of our leadership, if this happens,
- * there is no choice but to throw this task away and
- * try again; this is
- * "double-double-toil-and-trouble-check locking".
- */
- threadgroup_unlock(tsk);
- put_task_struct(tsk);
- goto retry_find_task;
- }
- ret = cgroup_attach_proc(cgrp, tsk);
- } else
- ret = cgroup_attach_task(cgrp, tsk);
- threadgroup_unlock(tsk);
- put_task_struct(tsk);
- out_unlock_cgroup:
- cgroup_unlock();
- return ret;
- }
- static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
- {
- return attach_task_by_pid(cgrp, pid, false);
- }
- static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
- {
- return attach_task_by_pid(cgrp, tgid, true);
- }
- /**
- * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
- * @cgrp: the cgroup to be checked for liveness
- *
- * On success, returns true; the lock should be later released with
- * cgroup_unlock(). On failure returns false with no lock held.
- */
- bool cgroup_lock_live_group(struct cgroup *cgrp)
- {
- mutex_lock(&cgroup_mutex);
- if (cgroup_is_removed(cgrp)) {
- mutex_unlock(&cgroup_mutex);
- return false;
- }
- return true;
- }
- EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
- static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
- {
- BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
- if (strlen(buffer) >= PATH_MAX)
- return -EINVAL;
- if (!cgroup_lock_live_group(cgrp))
- return -ENODEV;
- mutex_lock(&cgroup_root_mutex);
- strcpy(cgrp->root->release_agent_path, buffer);
- mutex_unlock(&cgroup_root_mutex);
- cgroup_unlock();
- return 0;
- }
- static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *seq)
- {
- if (!cgroup_lock_live_group(cgrp))
- return -ENODEV;
- seq_puts(seq, cgrp->root->release_agent_path);
- seq_putc(seq, '\n');
- cgroup_unlock();
- return 0;
- }
- /* A buffer size big enough for numbers or short strings */
- #define CGROUP_LOCAL_BUFFER_SIZE 64
- static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *unused_ppos)
- {
- char buffer[CGROUP_LOCAL_BUFFER_SIZE];
- int retval = 0;
- char *end;
- if (!nbytes)
- return -EINVAL;
- if (nbytes >= sizeof(buffer))
- return -E2BIG;
- if (copy_from_user(buffer, userbuf, nbytes))
- return -EFAULT;
- buffer[nbytes] = 0; /* nul-terminate */
- if (cft->write_u64) {
- u64 val = simple_strtoull(strstrip(buffer), &end, 0);
- if (*end)
- return -EINVAL;
- retval = cft->write_u64(cgrp, cft, val);
- } else {
- s64 val = simple_strtoll(strstrip(buffer), &end, 0);
- if (*end)
- return -EINVAL;
- retval = cft->write_s64(cgrp, cft, val);
- }
- if (!retval)
- retval = nbytes;
- return retval;
- }
- static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *unused_ppos)
- {
- char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
- int retval = 0;
- size_t max_bytes = cft->max_write_len;
- char *buffer = local_buffer;
- if (!max_bytes)
- max_bytes = sizeof(local_buffer) - 1;
- if (nbytes >= max_bytes)
- return -E2BIG;
- /* Allocate a dynamic buffer if we need one */
- if (nbytes >= sizeof(local_buffer)) {
- buffer = kmalloc(nbytes + 1, GFP_KERNEL);
- if (buffer == NULL)
- return -ENOMEM;
- }
- if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
- retval = -EFAULT;
- goto out;
- }
- buffer[nbytes] = 0; /* nul-terminate */
- retval = cft->write_string(cgrp, cft, strstrip(buffer));
- if (!retval)
- retval = nbytes;
- out:
- if (buffer != local_buffer)
- kfree(buffer);
- return retval;
- }
- static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
- {
- struct cftype *cft = __d_cft(file->f_dentry);
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
- if (cgroup_is_removed(cgrp))
- return -ENODEV;
- if (cft->write)
- return cft->write(cgrp, cft, file, buf, nbytes, ppos);
- if (cft->write_u64 || cft->write_s64)
- return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
- if (cft->write_string)
- return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
- if (cft->trigger) {
- int ret = cft->trigger(cgrp, (unsigned int)cft->private);
- return ret ? ret : nbytes;
- }
- return -EINVAL;
- }
- static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes,
- loff_t *ppos)
- {
- char tmp[CGROUP_LOCAL_BUFFER_SIZE];
- u64 val = cft->read_u64(cgrp, cft);
- int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
- return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
- }
- static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes,
- loff_t *ppos)
- {
- char tmp[CGROUP_LOCAL_BUFFER_SIZE];
- s64 val = cft->read_s64(cgrp, cft);
- int len = sprintf(tmp, "%lld\n", (long long) val);
- return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
- }
- static ssize_t cgroup_file_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
- {
- struct cftype *cft = __d_cft(file->f_dentry);
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
- if (cgroup_is_removed(cgrp))
- return -ENODEV;
- if (cft->read)
- return cft->read(cgrp, cft, file, buf, nbytes, ppos);
- if (cft->read_u64)
- return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
- if (cft->read_s64)
- return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
- return -EINVAL;
- }
- /*
- * seqfile ops/methods for returning structured data. Currently just
- * supports string->u64 maps, but can be extended in future.
- */
- struct cgroup_seqfile_state {
- struct cftype *cft;
- struct cgroup *cgroup;
- };
- static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
- {
- struct seq_file *sf = cb->state;
- return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
- }
- static int cgroup_seqfile_show(struct seq_file *m, void *arg)
- {
- struct cgroup_seqfile_state *state = m->private;
- struct cftype *cft = state->cft;
- if (cft->read_map) {
- struct cgroup_map_cb cb = {
- .fill = cgroup_map_add,
- .state = m,
- };
- return cft->read_map(state->cgroup, cft, &cb);
- }
- return cft->read_seq_string(state->cgroup, cft, m);
- }
- static int cgroup_seqfile_release(struct inode *inode, struct file *file)
- {
- struct seq_file *seq = file->private_data;
- kfree(seq->private);
- return single_release(inode, file);
- }
- static const struct file_operations cgroup_seqfile_operations = {
- .read = seq_read,
- .write = cgroup_file_write,
- .llseek = seq_lseek,
- .release = cgroup_seqfile_release,
- };
- static int cgroup_file_open(struct inode *inode, struct file *file)
- {
- int err;
- struct cftype *cft;
- err = generic_file_open(inode, file);
- if (err)
- return err;
- cft = __d_cft(file->f_dentry);
- if (cft->read_map || cft->read_seq_string) {
- struct cgroup_seqfile_state *state =
- kzalloc(sizeof(*state), GFP_USER);
- if (!state)
- return -ENOMEM;
- state->cft = cft;
- state->cgroup = __d_cgrp(file->f_dentry->d_parent);
- file->f_op = &cgroup_seqfile_operations;
- err = single_open(file, cgroup_seqfile_show, state);
- if (err < 0)
- kfree(state);
- } else if (cft->open)
- err = cft->open(inode, file);
- else
- err = 0;
- return err;
- }
- static int cgroup_file_release(struct inode *inode, struct file *file)
- {
- struct cftype *cft = __d_cft(file->f_dentry);
- if (cft->release)
- return cft->release(inode, file);
- return 0;
- }
- /*
- * cgroup_rename - Only allow simple rename of directories in place.
- */
- static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
- {
- if (!S_ISDIR(old_dentry->d_inode->i_mode))
- return -ENOTDIR;
- if (new_dentry->d_inode)
- return -EEXIST;
- if (old_dir != new_dir)
- return -EIO;
- return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
- }
- static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
- {
- if (S_ISDIR(dentry->d_inode->i_mode))
- return &__d_cgrp(dentry)->xattrs;
- else
- return &__d_cft(dentry)->xattrs;
- }
- static inline int xattr_enabled(struct dentry *dentry)
- {
- struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
- return test_bit(ROOT_XATTR, &root->flags);
- }
- static bool is_valid_xattr(const char *name)
- {
- if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
- !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
- return true;
- return false;
- }
- static int cgroup_setxattr(struct dentry *dentry, const char *name,
- const void *val, size_t size, int flags)
- {
- if (!xattr_enabled(dentry))
- return -EOPNOTSUPP;
- if (!is_valid_xattr(name))
- return -EINVAL;
- return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
- }
- static int cgroup_removexattr(struct dentry *dentry, const char *name)
- {
- if (!xattr_enabled(dentry))
- return -EOPNOTSUPP;
- if (!is_valid_xattr(name))
- return -EINVAL;
- return simple_xattr_remove(__d_xattrs(dentry), name);
- }
- static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
- void *buf, size_t size)
- {
- if (!xattr_enabled(dentry))
- return -EOPNOTSUPP;
- if (!is_valid_xattr(name))
- return -EINVAL;
- return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
- }
- static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
- {
- if (!xattr_enabled(dentry))
- return -EOPNOTSUPP;
- return simple_xattr_list(__d_xattrs(dentry), buf, size);
- }
- static const struct file_operations cgroup_file_operations = {
- .read = cgroup_file_read,
- .write = cgroup_file_write,
- .llseek = generic_file_llseek,
- .open = cgroup_file_open,
- .release = cgroup_file_release,
- };
- static const struct inode_operations cgroup_file_inode_operations = {
- .setxattr = cgroup_setxattr,
- .getxattr = cgroup_getxattr,
- .listxattr = cgroup_listxattr,
- .removexattr = cgroup_removexattr,
- };
- static const struct inode_operations cgroup_dir_inode_operations = {
- .lookup = cgroup_lookup,
- .mkdir = cgroup_mkdir,
- .rmdir = cgroup_rmdir,
- .rename = cgroup_rename,
- .setxattr = cgroup_setxattr,
- .getxattr = cgroup_getxattr,
- .listxattr = cgroup_listxattr,
- .removexattr = cgroup_removexattr,
- };
- static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
- {
- if (dentry->d_name.len > NAME_MAX)
- return ERR_PTR(-ENAMETOOLONG);
- d_add(dentry, NULL);
- return NULL;
- }
- /*
- * Check if a file is a control file
- */
- static inline struct cftype *__file_cft(struct file *file)
- {
- if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
- return ERR_PTR(-EINVAL);
- return __d_cft(file->f_dentry);
- }
- static int cgroup_create_file(struct dentry *dentry, umode_t mode,
- struct super_block *sb)
- {
- struct inode *inode;
- if (!dentry)
- return -ENOENT;
- if (dentry->d_inode)
- return -EEXIST;
- inode = cgroup_new_inode(mode, sb);
- if (!inode)
- return -ENOMEM;
- if (S_ISDIR(mode)) {
- inode->i_op = &cgroup_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- /* start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
- /* start with the directory inode held, so that we can
- * populate it without racing with another mkdir */
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
- } else if (S_ISREG(mode)) {
- inode->i_size = 0;
- inode->i_fop = &cgroup_file_operations;
- inode->i_op = &cgroup_file_inode_operations;
- }
- d_instantiate(dentry, inode);
- dget(dentry); /* Extra count - pin the dentry in core */
- return 0;
- }
- /*
- * cgroup_create_dir - create a directory for an object.
- * @cgrp: the cgroup we create the directory for. It must have a valid
- * ->parent field. And we are going to fill its ->dentry field.
- * @dentry: dentry of the new cgroup
- * @mode: mode to set on new directory.
- */
- static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
- umode_t mode)
- {
- struct dentry *parent;
- int error = 0;
- parent = cgrp->parent->dentry;
- error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
- if (!error) {
- dentry->d_fsdata = cgrp;
- inc_nlink(parent->d_inode);
- rcu_assign_pointer(cgrp->dentry, dentry);
- }
- return error;
- }
- /**
- * cgroup_file_mode - deduce file mode of a control file
- * @cft: the control file in question
- *
- * returns cft->mode if ->mode is not 0
- * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
- * returns S_IRUGO if it has only a read handler
- * returns S_IWUSR if it has only a write hander
- */
- static umode_t cgroup_file_mode(const struct cftype *cft)
- {
- umode_t mode = 0;
- if (cft->mode)
- return cft->mode;
- if (cft->read || cft->read_u64 || cft->read_s64 ||
- cft->read_map || cft->read_seq_string)
- mode |= S_IRUGO;
- if (cft->write || cft->write_u64 || cft->write_s64 ||
- cft->write_string || cft->trigger)
- mode |= S_IWUSR;
- return mode;
- }
- static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype *cft)
- {
- struct dentry *dir = cgrp->dentry;
- struct cgroup *parent = __d_cgrp(dir);
- struct dentry *dentry;
- struct cfent *cfe;
- int error;
- umode_t mode;
- char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
- simple_xattrs_init(&cft->xattrs);
- /* does @cft->flags tell us to skip creation on @cgrp? */
- if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
- return 0;
- if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
- return 0;
- if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
- strcpy(name, subsys->name);
- strcat(name, ".");
- }
- strcat(name, cft->name);
- BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
- cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
- if (!cfe)
- return -ENOMEM;
- dentry = lookup_one_len(name, dir, strlen(name));
- if (IS_ERR(dentry)) {
- error = PTR_ERR(dentry);
- goto out;
- }
- mode = cgroup_file_mode(cft);
- error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
- if (!error) {
- cfe->type = (void *)cft;
- cfe->dentry = dentry;
- dentry->d_fsdata = cfe;
- list_add_tail(&cfe->node, &parent->files);
- cfe = NULL;
- }
- dput(dentry);
- out:
- kfree(cfe);
- return error;
- }
- static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype cfts[], bool is_add)
- {
- struct cftype *cft;
- int err, ret = 0;
- for (cft = cfts; cft->name[0] != '\0'; cft++) {
- if (is_add)
- err = cgroup_add_file(cgrp, subsys, cft);
- else
- err = cgroup_rm_file(cgrp, cft);
- if (err) {
- pr_warning("cgroup_addrm_files: failed to %s %s, err=%d\n",
- is_add ? "add" : "remove", cft->name, err);
- ret = err;
- }
- }
- return ret;
- }
- static DEFINE_MUTEX(cgroup_cft_mutex);
- static void cgroup_cfts_prepare(void)
- __acquires(&cgroup_cft_mutex) __acquires(&cgroup_mutex)
- {
- /*
- * Thanks to the entanglement with vfs inode locking, we can't walk
- * the existing cgroups under cgroup_mutex and create files.
- * Instead, we increment reference on all cgroups and build list of
- * them using @cgrp->cft_q_node. Grab cgroup_cft_mutex to ensure
- * exclusive access to the field.
- */
- mutex_lock(&cgroup_cft_mutex);
- mutex_lock(&cgroup_mutex);
- }
- static void cgroup_cfts_commit(struct cgroup_subsys *ss,
- struct cftype *cfts, bool is_add)
- __releases(&cgroup_mutex) __releases(&cgroup_cft_mutex)
- {
- LIST_HEAD(pending);
- struct cgroup *cgrp, *n;
- /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
- if (cfts && ss->root != &rootnode) {
- list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
- dget(cgrp->dentry);
- list_add_tail(&cgrp->cft_q_node, &pending);
- }
- }
- mutex_unlock(&cgroup_mutex);
- /*
- * All new cgroups will see @cfts update on @ss->cftsets. Add/rm
- * files for all cgroups which were created before.
- */
- list_for_each_entry_safe(cgrp, n, &pending, cft_q_node) {
- struct inode *inode = cgrp->dentry->d_inode;
- mutex_lock(&inode->i_mutex);
- mutex_lock(&cgroup_mutex);
- if (!cgroup_is_removed(cgrp))
- cgroup_addrm_files(cgrp, ss, cfts, is_add);
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
- list_del_init(&cgrp->cft_q_node);
- dput(cgrp->dentry);
- }
- mutex_unlock(&cgroup_cft_mutex);
- }
- /**
- * cgroup_add_cftypes - add an array of cftypes to a subsystem
- * @ss: target cgroup subsystem
- * @cfts: zero-length name terminated array of cftypes
- *
- * Register @cfts to @ss. Files described by @cfts are created for all
- * existing cgroups to which @ss is attached and all future cgroups will
- * have them too. This function can be called anytime whether @ss is
- * attached or not.
- *
- * Returns 0 on successful registration, -errno on failure. Note that this
- * function currently returns 0 as long as @cfts registration is successful
- * even if some file creation attempts on existing cgroups fail.
- */
- int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
- {
- struct cftype_set *set;
- set = kzalloc(sizeof(*set), GFP_KERNEL);
- if (!set)
- return -ENOMEM;
- cgroup_cfts_prepare();
- set->cfts = cfts;
- list_add_tail(&set->node, &ss->cftsets);
- cgroup_cfts_commit(ss, cfts, true);
- return 0;
- }
- EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
- /**
- * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
- * @ss: target cgroup subsystem
- * @cfts: zero-length name terminated array of cftypes
- *
- * Unregister @cfts from @ss. Files described by @cfts are removed from
- * all existing cgroups to which @ss is attached and all future cgroups
- * won't have them either. This function can be called anytime whether @ss
- * is attached or not.
- *
- * Returns 0 on successful unregistration, -ENOENT if @cfts is not
- * registered with @ss.
- */
- int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
- {
- struct cftype_set *set;
- cgroup_cfts_prepare();
- list_for_each_entry(set, &ss->cftsets, node) {
- if (set->cfts == cfts) {
- list_del_init(&set->node);
- cgroup_cfts_commit(ss, cfts, false);
- return 0;
- }
- }
- cgroup_cfts_commit(ss, NULL, false);
- return -ENOENT;
- }
- /**
- * cgroup_task_count - count the number of tasks in a cgroup.
- * @cgrp: the cgroup in question
- *
- * Return the number of tasks in the cgroup.
- */
- int cgroup_task_count(const struct cgroup *cgrp)
- {
- int count = 0;
- struct cg_cgroup_link *link;
- read_lock(&css_set_lock);
- list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
- count += atomic_read(&link->cg->refcount);
- }
- read_unlock(&css_set_lock);
- return count;
- }
- /*
- * Advance a list_head iterator. The iterator should be positioned at
- * the start of a css_set
- */
- static void cgroup_advance_iter(struct cgroup *cgrp,
- struct cgroup_iter *it)
- {
- struct list_head *l = it->cg_link;
- struct cg_cgroup_link *link;
- struct css_set *cg;
- /* Advance to the next non-empty css_set */
- do {
- l = l->next;
- if (l == &cgrp->css_sets) {
- it->cg_link = NULL;
- return;
- }
- link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
- cg = link->cg;
- } while (list_empty(&cg->tasks));
- it->cg_link = l;
- it->task = cg->tasks.next;
- }
- /*
- * To reduce the fork() overhead for systems that are not actually
- * using their cgroups capability, we don't maintain the lists running
- * through each css_set to its tasks until we see the list actually
- * used - in other words after the first call to cgroup_iter_start().
- */
- static void cgroup_enable_task_cg_lists(void)
- {
- struct task_struct *p, *g;
- write_lock(&css_set_lock);
- use_task_css_set_links = 1;
- /*
- * We need tasklist_lock because RCU is not safe against
- * while_each_thread(). Besides, a forking task that has passed
- * cgroup_post_fork() without seeing use_task_css_set_links = 1
- * is not guaranteed to have its child immediately visible in the
- * tasklist if we walk through it with RCU.
- */
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- task_lock(p);
- /*
- * We should check if the process is exiting, otherwise
- * it will race with cgroup_exit() in that the list
- * entry won't be deleted though the process has exited.
- */
- if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
- list_add(&p->cg_list, &p->cgroups->tasks);
- task_unlock(p);
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
- write_unlock(&css_set_lock);
- }
- void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
- __acquires(css_set_lock)
- {
- /*
- * The first time anyone tries to iterate across a cgroup,
- * we need to enable the list linking each css_set to its
- * tasks, and fix up all existing tasks.
- */
- if (!use_task_css_set_links)
- cgroup_enable_task_cg_lists();
- read_lock(&css_set_lock);
- it->cg_link = &cgrp->css_sets;
- cgroup_advance_iter(cgrp, it);
- }
- struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
- struct cgroup_iter *it)
- {
- struct task_struct *res;
- struct list_head *l = it->task;
- struct cg_cgroup_link *link;
- /* If the iterator cg is NULL, we have no tasks */
- if (!it->cg_link)
- return NULL;
- res = list_entry(l, struct task_struct, cg_list);
- /* Advance iterator to find next entry */
- l = l->next;
- link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
- if (l == &link->cg->tasks) {
- /* We reached the end of this task list - move on to
- * the next cg_cgroup_link */
- cgroup_advance_iter(cgrp, it);
- } else {
- it->task = l;
- }
- return res;
- }
- void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
- __releases(css_set_lock)
- {
- read_unlock(&css_set_lock);
- }
- static inline int started_after_time(struct task_struct *t1,
- struct timespec *time,
- struct task_struct *t2)
- {
- int start_diff = timespec_compare(&t1->start_time, time);
- if (start_diff > 0) {
- return 1;
- } else if (start_diff < 0) {
- return 0;
- } else {
- /*
- * Arbitrarily, if two processes started at the same
- * time, we'll say that the lower pointer value
- * started first. Note that t2 may have exited by now
- * so this may not be a valid pointer any longer, but
- * that's fine - it still serves to distinguish
- * between two tasks started (effectively) simultaneously.
- */
- return t1 > t2;
- }
- }
- /*
- * This function is a callback from heap_insert() and is used to order
- * the heap.
- * In this case we order the heap in descending task start time.
- */
- static inline int started_after(void *p1, void *p2)
- {
- struct task_struct *t1 = p1;
- struct task_struct *t2 = p2;
- return started_after_time(t1, &t2->start_time, t2);
- }
- /**
- * cgroup_scan_tasks - iterate though all the tasks in a cgroup
- * @scan: struct cgroup_scanner containing arguments for the scan
- *
- * Arguments include pointers to callback functions test_task() and
- * process_task().
- * Iterate through all the tasks in a cgroup, calling test_task() for each,
- * and if it returns true, call process_task() for it also.
- * The test_task pointer may be NULL, meaning always true (select all tasks).
- * Effectively duplicates cgroup_iter_{start,next,end}()
- * but does not lock css_set_lock for the call to process_task().
- * The struct cgroup_scanner may be embedded in any structure of the caller's
- * creation.
- * It is guaranteed that process_task() will act on every task that
- * is a member of the cgroup for the duration of this call. This
- * function may or may not call process_task() for tasks that exit
- * or move to a different cgroup during the call, or are forked or
- * move into the cgroup during the call.
- *
- * Note that test_task() may be called with locks held, and may in some
- * situations be called multiple times for the same task, so it should
- * be cheap.
- * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
- * pre-allocated and will be used for heap operations (and its "gt" member will
- * be overwritten), else a temporary heap will be used (allocation of which
- * may cause this function to fail).
- */
- int cgroup_scan_tasks(struct cgroup_scanner *scan)
- {
- int retval, i;
- struct cgroup_iter it;
- struct task_struct *p, *dropped;
- /* Never dereference latest_task, since it's not refcounted */
- struct task_struct *latest_task = NULL;
- struct ptr_heap tmp_heap;
- struct ptr_heap *heap;
- struct timespec latest_time = { 0, 0 };
- if (scan->heap) {
- /* The caller supplied our heap and pre-allocated its memory */
- heap = scan->heap;
- heap->gt = &started_after;
- } else {
- /* We need to allocate our own heap memory */
- heap = &tmp_heap;
- retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
- if (retval)
- /* cannot allocate the heap */
- return retval;
- }
- again:
- /*
- * Scan tasks in the cgroup, using the scanner's "test_task" callback
- * to determine which are of interest, and using the scanner's
- * "process_task" callback to process any of them that need an update.
- * Since we don't want to hold any locks during the task updates,
- * gather tasks to be processed in a heap structure.
- * The heap is sorted by descending task start time.
- * If the statically-sized heap fills up, we overflow tasks that
- * started later, and in future iterations only consider tasks that
- * started after the latest task in the previous pass. This
- * guarantees forward progress and that we don't miss any tasks.
- */
- heap->size = 0;
- cgroup_iter_start(scan->cg, &it);
- while ((p = cgroup_iter_next(scan->cg, &it))) {
- /*
- * Only affect tasks that qualify per the caller's callback,
- * if he provided one
- */
- if (scan->test_task && !scan->test_task(p, scan))
- continue;
- /*
- * Only process tasks that started after the last task
- * we processed
- */
- if (!started_after_time(p, &latest_time, latest_task))
- continue;
- dropped = heap_insert(heap, p);
- if (dropped == NULL) {
- /*
- * The new task was inserted; the heap wasn't
- * previously full
- */
- get_task_struct(p);
- } else if (dropped != p) {
- /*
- * The new task was inserted, and pushed out a
- * different task
- */
- get_task_struct(p);
- put_task_struct(dropped);
- }
- /*
- * Else the new task was newer than anything already in
- * the heap and wasn't inserted
- */
- }
- cgroup_iter_end(scan->cg, &it);
- if (heap->size) {
- for (i = 0; i < heap->size; i++) {
- struct task_struct *q = heap->ptrs[i];
- if (i == 0) {
- latest_time = q->start_time;
- latest_task = q;
- }
- /* Process the task per the caller's callback */
- scan->process_task(q, scan);
- put_task_struct(q);
- }
- /*
- * If we had to process any tasks at all, scan again
- * in case some of them were in the middle of forking
- * children that didn't get processed.
- * Not the most efficient way to do it, but it avoids
- * having to take callback_mutex in the fork path
- */
- goto again;
- }
- if (heap == &tmp_heap)
- heap_free(&tmp_heap);
- return 0;
- }
- /*
- * Stuff for reading the 'tasks'/'procs' files.
- *
- * Reading this file can return large amounts of data if a cgroup has
- * *lots* of attached tasks. So it may need several calls to read(),
- * but we cannot guarantee that the information we produce is correct
- * unless we produce it entirely atomically.
- *
- */
- /* which pidlist file are we talking about? */
- enum cgroup_filetype {
- CGROUP_FILE_PROCS,
- CGROUP_FILE_TASKS,
- };
- /*
- * A pidlist is a list of pids that virtually represents the contents of one
- * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
- * a pair (one each for procs, tasks) for each pid namespace that's relevant
- * to the cgroup.
- */
- struct cgroup_pidlist {
- /*
- * used to find which pidlist is wanted. doesn't change as long as
- * this particular list stays in the list.
- */
- struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
- /* array of xids */
- pid_t *list;
- /* how many elements the above list has */
- int length;
- /* how many files are using the current array */
- int use_count;
- /* each of these stored in a list by its cgroup */
- struct list_head links;
- /* pointer to the cgroup we belong to, for list removal purposes */
- struct cgroup *owner;
- /* protects the other fields */
- struct rw_semaphore mutex;
- };
- /*
- * The following two functions "fix" the issue where there are more pids
- * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
- * TODO: replace with a kernel-wide solution to this problem
- */
- #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
- static void *pidlist_allocate(int count)
- {
- if (PIDLIST_TOO_LARGE(count))
- return vmalloc(count * sizeof(pid_t));
- else
- return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
- }
- static void pidlist_free(void *p)
- {
- if (is_vmalloc_addr(p))
- vfree(p);
- else
- kfree(p);
- }
- static void *pidlist_resize(void *p, int newcount)
- {
- void *newlist;
- /* note: if new alloc fails, old p will still be valid either way */
- if (is_vmalloc_addr(p)) {
- newlist = vmalloc(newcount * sizeof(pid_t));
- if (!newlist)
- return NULL;
- memcpy(newlist, p, newcount * sizeof(pid_t));
- vfree(p);
- } else {
- newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
- }
- return newlist;
- }
- /*
- * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
- * If the new stripped list is sufficiently smaller and there's enough memory
- * to allocate a new buffer, will let go of the unneeded memory. Returns the
- * number of unique elements.
- */
- /* is the size difference enough that we should re-allocate the array? */
- #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
- static int pidlist_uniq(pid_t **p, int length)
- {
- int src, dest = 1;
- pid_t *list = *p;
- pid_t *newlist;
- /*
- * we presume the 0th element is unique, so i starts at 1. trivial
- * edge cases first; no work needs to be done for either
- */
- if (length == 0 || length == 1)
- return length;
- /* src and dest walk down the list; dest counts unique elements */
- for (src = 1; src < length; src++) {
- /* find next unique element */
- while (list[src] == list[src-1]) {
- src++;
- if (src == length)
- goto after;
- }
- /* dest always points to where the next unique element goes */
- list[dest] = list[src];
- dest++;
- }
- after:
- /*
- * if the length difference is large enough, we want to allocate a
- * smaller buffer to save memory. if this fails due to out of memory,
- * we'll just stay with what we've got.
- */
- if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
- newlist = pidlist_resize(list, dest);
- if (newlist)
- *p = newlist;
- }
- return dest;
- }
- static int cmppid(const void *a, const void *b)
- {
- return *(pid_t *)a - *(pid_t *)b;
- }
- /*
- * find the appropriate pidlist for our purpose (given procs vs tasks)
- * returns with the lock on that pidlist already held, and takes care
- * of the use count, or returns NULL with no locks held if we're out of
- * memory.
- */
- static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
- enum cgroup_filetype type)
- {
- struct cgroup_pidlist *l;
- /* don't need task_nsproxy() if we're looking at ourself */
- struct pid_namespace *ns = current->nsproxy->pid_ns;
- /*
- * We can't drop the pidlist_mutex before taking the l->mutex in case
- * the last ref-holder is trying to remove l from the list at the same
- * time. Holding the pidlist_mutex precludes somebody taking whichever
- * list we find out from under us - compare release_pid_array().
- */
- mutex_lock(&cgrp->pidlist_mutex);
- list_for_each_entry(l, &cgrp->pidlists, links) {
- if (l->key.type == type && l->key.ns == ns) {
- /* make sure l doesn't vanish out from under us */
- down_write(&l->mutex);
- mutex_unlock(&cgrp->pidlist_mutex);
- return l;
- }
- }
- /* entry not found; create a new one */
- l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
- if (!l) {
- mutex_unlock(&cgrp->pidlist_mutex);
- return l;
- }
- init_rwsem(&l->mutex);
- down_write(&l->mutex);
- l->key.type = type;
- l->key.ns = get_pid_ns(ns);
- l->use_count = 0; /* don't increment here */
- l->list = NULL;
- l->owner = cgrp;
- list_add(&l->links, &cgrp->pidlists);
- mutex_unlock(&cgrp->pidlist_mutex);
- return l;
- }
- /*
- * Load a cgroup's pidarray with either procs' tgids or tasks' pids
- */
- static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
- struct cgroup_pidlist **lp)
- {
- pid_t *array;
- int length;
- int pid, n = 0; /* used for populating the array */
- struct cgroup_iter it;
- struct task_struct *tsk;
- struct cgroup_pidlist *l;
- /*
- * If cgroup gets more users after we read count, we won't have
- * enough space - tough. This race is indistinguishable to the
- * caller from the case that the additional cgroup users didn't
- * show up until sometime later on.
- */
- length = cgroup_task_count(cgrp);
- array = pidlist_allocate(length);
- if (!array)
- return -ENOMEM;
- /* now, populate the array */
- cgroup_iter_start(cgrp, &it);
- while ((tsk = cgroup_iter_next(cgrp, &it))) {
- if (unlikely(n == length))
- break;
- /* get tgid or pid for procs or tasks file respectively */
- if (type == CGROUP_FILE_PROCS)
- pid = task_tgid_vnr(tsk);
- else
- pid = task_pid_vnr(tsk);
- if (pid > 0) /* make sure to only use valid results */
- array[n++] = pid;
- }
- cgroup_iter_end(cgrp, &it);
- length = n;
- /* now sort & (if procs) strip out duplicates */
- sort(array, length, sizeof(pid_t), cmppid, NULL);
- if (type == CGROUP_FILE_PROCS)
- length = pidlist_uniq(&array, length);
- l = cgroup_pidlist_find(cgrp, type);
- if (!l) {
- pidlist_free(array);
- return -ENOMEM;
- }
- /* store array, freeing old if necessary - lock already held */
- pidlist_free(l->list);
- l->list = array;
- l->length = length;
- l->use_count++;
- up_write(&l->mutex);
- *lp = l;
- return 0;
- }
- /**
- * cgroupstats_build - build and fill cgroupstats
- * @stats: cgroupstats to fill information into
- * @dentry: A dentry entry belonging to the cgroup for which stats have
- * been requested.
- *
- * Build and fill cgroupstats so that taskstats can export it to user
- * space.
- */
- int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
- {
- int ret = -EINVAL;
- struct cgroup *cgrp;
- struct cgroup_iter it;
- struct task_struct *tsk;
- /*
- * Validate dentry by checking the superblock operations,
- * and make sure it's a directory.
- */
- if (dentry->d_sb->s_op != &cgroup_ops ||
- !S_ISDIR(dentry->d_inode->i_mode))
- goto err;
- ret = 0;
- cgrp = dentry->d_fsdata;
- cgroup_iter_start(cgrp, &it);
- while ((tsk = cgroup_iter_next(cgrp, &it))) {
- switch (tsk->state) {
- case TASK_RUNNING:
- stats->nr_running++;
- break;
- case TASK_INTERRUPTIBLE:
- stats->nr_sleeping++;
- break;
- case TASK_UNINTERRUPTIBLE:
- stats->nr_uninterruptible++;
- break;
- case TASK_STOPPED:
- stats->nr_stopped++;
- break;
- default:
- if (delayacct_is_task_waiting_on_io(tsk))
- stats->nr_io_wait++;
- break;
- }
- }
- cgroup_iter_end(cgrp, &it);
- err:
- return ret;
- }
- /*
- * seq_file methods for the tasks/procs files. The seq_file position is the
- * next pid to display; the seq_file iterator is a pointer to the pid
- * in the cgroup->l->list array.
- */
- static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
- {
- /*
- * Initially we receive a position value that corresponds to
- * one more than the last pid shown (or 0 on the first call or
- * after a seek to the start). Use a binary-search to find the
- * next pid to display, if any
- */
- struct cgroup_pidlist *l = s->private;
- int index = 0, pid = *pos;
- int *iter;
- down_read(&l->mutex);
- if (pid) {
- int end = l->length;
- while (index < end) {
- int mid = (index + end) / 2;
- if (l->list[mid] == pid) {
- index = mid;
- break;
- } else if (l->list[mid] <= pid)
- index = mid + 1;
- else
- end = mid;
- }
- }
- /* If we're off the end of the array, we're done */
- if (index >= l->length)
- return NULL;
- /* Update the abstract position to be the actual pid that we found */
- iter = l->list + index;
- *pos = *iter;
- return iter;
- }
- static void cgroup_pidlist_stop(struct seq_file *s, void *v)
- {
- struct cgroup_pidlist *l = s->private;
- up_read(&l->mutex);
- }
- static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
- {
- struct cgroup_pidlist *l = s->private;
- pid_t *p = v;
- pid_t *end = l->list + l->length;
- /*
- * Advance to the next pid in the array. If this goes off the
- * end, we're done
- */
- p++;
- if (p >= end) {
- return NULL;
- } else {
- *pos = *p;
- return p;
- }
- }
- static int cgroup_pidlist_show(struct seq_file *s, void *v)
- {
- return seq_printf(s, "%d\n", *(int *)v);
- }
- /*
- * seq_operations functions for iterating on pidlists through seq_file -
- * independent of whether it's tasks or procs
- */
- static const struct seq_operations cgroup_pidlist_seq_operations = {
- .start = cgroup_pidlist_start,
- .stop = cgroup_pidlist_stop,
- .next = cgroup_pidlist_next,
- .show = cgroup_pidlist_show,
- };
- static void cgroup_release_pid_array(struct cgroup_pidlist *l)
- {
- /*
- * the case where we're the last user of this particular pidlist will
- * have us remove it from the cgroup's list, which entails taking the
- * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
- * pidlist_mutex, we have to take pidlist_mutex first.
- */
- mutex_lock(&l->owner->pidlist_mutex);
- down_write(&l->mutex);
- BUG_ON(!l->use_count);
- if (!--l->use_count) {
- /* we're the last user if refcount is 0; remove and free */
- list_del(&l->links);
- mutex_unlock(&l->owner->pidlist_mutex);
- pidlist_free(l->list);
- put_pid_ns(l->key.ns);
- up_write(&l->mutex);
- kfree(l);
- return;
- }
- mutex_unlock(&l->owner->pidlist_mutex);
- up_write(&l->mutex);
- }
- static int cgroup_pidlist_release(struct inode *inode, struct file *file)
- {
- struct cgroup_pidlist *l;
- if (!(file->f_mode & FMODE_READ))
- return 0;
- /*
- * the seq_file will only be initialized if the file was opened for
- * reading; hence we check if it's not null only in that case.
- */
- l = ((struct seq_file *)file->private_data)->private;
- cgroup_release_pid_array(l);
- return seq_release(inode, file);
- }
- static const struct file_operations cgroup_pidlist_operations = {
- .read = seq_read,
- .llseek = seq_lseek,
- .write = cgroup_file_write,
- .release = cgroup_pidlist_release,
- };
- /*
- * The following functions handle opens on a file that displays a pidlist
- * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
- * in the cgroup.
- */
- /* helper function for the two below it */
- static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
- {
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
- struct cgroup_pidlist *l;
- int retval;
- /* Nothing to do for write-only files */
- if (!(file->f_mode & FMODE_READ))
- return 0;
- /* have the array populated */
- retval = pidlist_array_load(cgrp, type, &l);
- if (retval)
- return retval;
- /* configure file information */
- file->f_op = &cgroup_pidlist_operations;
- retval = seq_open(file, &cgroup_pidlist_seq_operations);
- if (retval) {
- cgroup_release_pid_array(l);
- return retval;
- }
- ((struct seq_file *)file->private_data)->private = l;
- return 0;
- }
- static int cgroup_tasks_open(struct inode *unused, struct file *file)
- {
- return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
- }
- static int cgroup_procs_open(struct inode *unused, struct file *file)
- {
- return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
- }
- static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
- struct cftype *cft)
- {
- return notify_on_release(cgrp);
- }
- static int cgroup_write_notify_on_release(struct cgroup *cgrp,
- struct cftype *cft,
- u64 val)
- {
- clear_bit(CGRP_RELEASABLE, &cgrp->flags);
- if (val)
- set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
- else
- clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
- return 0;
- }
- /*
- * Unregister event and free resources.
- *
- * Gets called from workqueue.
- */
- static void cgroup_event_remove(struct work_struct *work)
- {
- struct cgroup_event *event = container_of(work, struct cgroup_event,
- remove);
- struct cgroup *cgrp = event->cgrp;
- event->cft->unregister_event(cgrp, event->cft, event->eventfd);
- eventfd_ctx_put(event->eventfd);
- kfree(event);
- dput(cgrp->dentry);
- }
- /*
- * Gets called on POLLHUP on eventfd when user closes it.
- *
- * Called with wqh->lock held and interrupts disabled.
- */
- static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
- {
- struct cgroup_event *event = container_of(wait,
- struct cgroup_event, wait);
- struct cgroup *cgrp = event->cgrp;
- unsigned long flags = (unsigned long)key;
- if (flags & POLLHUP) {
- __remove_wait_queue(event->wqh, &event->wait);
- spin_lock(&cgrp->event_list_lock);
- list_del(&event->list);
- spin_unlock(&cgrp->event_list_lock);
- /*
- * We are in atomic context, but cgroup_event_remove() may
- * sleep, so we have to call it in workqueue.
- */
- schedule_work(&event->remove);
- }
- return 0;
- }
- static void cgroup_event_ptable_queue_proc(struct file *file,
- wait_queue_head_t *wqh, poll_table *pt)
- {
- struct cgroup_event *event = container_of(pt,
- struct cgroup_event, pt);
- event->wqh = wqh;
- add_wait_queue(wqh, &event->wait);
- }
- /*
- * Parse input and register new cgroup event handler.
- *
- * Input must be in format '<event_fd> <control_fd> <args>'.
- * Interpretation of args is defined by control file implementation.
- */
- static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
- {
- struct cgroup_event *event = NULL;
- struct cgroup *cgrp_cfile;
- unsigned int efd, cfd;
- struct file *efile = NULL;
- struct file *cfile = NULL;
- char *endp;
- int ret;
- efd = simple_strtoul(buffer, &endp, 10);
- if (*endp != ' ')
- return -EINVAL;
- buffer = endp + 1;
- cfd = simple_strtoul(buffer, &endp, 10);
- if ((*endp != ' ') && (*endp != '\0'))
- return -EINVAL;
- buffer = endp + 1;
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
- event->cgrp = cgrp;
- INIT_LIST_HEAD(&event->list);
- init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
- init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
- INIT_WORK(&event->remove, cgroup_event_remove);
- efile = eventfd_fget(efd);
- if (IS_ERR(efile)) {
- ret = PTR_ERR(efile);
- goto fail;
- }
- event->eventfd = eventfd_ctx_fileget(efile);
- if (IS_ERR(event->eventfd)) {
- ret = PTR_ERR(event->eventfd);
- goto fail;
- }
- cfile = fget(cfd);
- if (!cfile) {
- ret = -EBADF;
- goto fail;
- }
- /* the process need read permission on control file */
- /* AV: shouldn't we check that it's been opened for read instead? */
- ret = inode_permission(cfile->f_path.dentry->d_inode, MAY_READ);
- if (ret < 0)
- goto fail;
- event->cft = __file_cft(cfile);
- if (IS_ERR(event->cft)) {
- ret = PTR_ERR(event->cft);
- goto fail;
- }
- /*
- * The file to be monitored must be in the same cgroup as
- * cgroup.event_control is.
- */
- cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
- if (cgrp_cfile != cgrp) {
- ret = -EINVAL;
- goto fail;
- }
- if (!event->cft->register_event || !event->cft->unregister_event) {
- ret = -EINVAL;
- goto fail;
- }
- ret = event->cft->register_event(cgrp, event->cft,
- event->eventfd, buffer);
- if (ret)
- goto fail;
- if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
- event->cft->unregister_event(cgrp, event->cft, event->eventfd);
- ret = 0;
- goto fail;
- }
- /*
- * Events should be removed after rmdir of cgroup directory, but before
- * destroying subsystem state objects. Let's take reference to cgroup
- * directory dentry to do that.
- */
- dget(cgrp->dentry);
- spin_lock(&cgrp->event_list_lock);
- list_add(&event->list, &cgrp->event_list);
- spin_unlock(&cgrp->event_list_lock);
- fput(cfile);
- fput(efile);
- return 0;
- fail:
- if (cfile)
- fput(cfile);
- if (event && event->eventfd && !IS_ERR(event->eventfd))
- eventfd_ctx_put(event->eventfd);
- if (!IS_ERR_OR_NULL(efile))
- fput(efile);
- kfree(event);
- return ret;
- }
- static u64 cgroup_clone_children_read(struct cgroup *cgrp,
- struct cftype *cft)
- {
- return clone_children(cgrp);
- }
- static int cgroup_clone_children_write(struct cgroup *cgrp,
- struct cftype *cft,
- u64 val)
- {
- if (val)
- set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
- else
- clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
- return 0;
- }
- /*
- * for the common functions, 'private' gives the type of file
- */
- /* for hysterical raisins, we can't put this on the older files */
- #define CGROUP_FILE_GENERIC_PREFIX "cgroup."
- static struct cftype files[] = {
- {
- .name = "tasks",
- .open = cgroup_tasks_open,
- .write_u64 = cgroup_tasks_write,
- .release = cgroup_pidlist_release,
- .mode = S_IRUGO | S_IWUSR,
- },
- {
- .name = CGROUP_FILE_GENERIC_PREFIX "procs",
- .open = cgroup_procs_open,
- .write_u64 = cgroup_procs_write,
- .release = cgroup_pidlist_release,
- .mode = S_IRUGO | S_IWUSR,
- },
- {
- .name = "notify_on_release",
- .read_u64 = cgroup_read_notify_on_release,
- .write_u64 = cgroup_write_notify_on_release,
- },
- {
- .name = CGROUP_FILE_GENERIC_PREFIX "event_control",
- .write_string = cgroup_write_event_control,
- .mode = S_IWUGO,
- },
- {
- .name = "cgroup.clone_children",
- .read_u64 = cgroup_clone_children_read,
- .write_u64 = cgroup_clone_children_write,
- },
- {
- .name = "release_agent",
- .flags = CFTYPE_ONLY_ON_ROOT,
- .read_seq_string = cgroup_release_agent_show,
- .write_string = cgroup_release_agent_write,
- .max_write_len = PATH_MAX,
- },
- { } /* terminate */
- };
- /**
- * cgroup_populate_dir - selectively creation of files in a directory
- * @cgrp: target cgroup
- * @base_files: true if the base files should be added
- * @subsys_mask: mask of the subsystem ids whose files should be added
- */
- static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
- unsigned long subsys_mask)
- {
- int err;
- struct cgroup_subsys *ss;
- if (base_files) {
- err = cgroup_addrm_files(cgrp, NULL, files, true);
- if (err < 0)
- return err;
- }
- /* process cftsets of each subsystem */
- for_each_subsys(cgrp->root, ss) {
- struct cftype_set *set;
- if (!test_bit(ss->subsys_id, &subsys_mask))
- continue;
- list_for_each_entry(set, &ss->cftsets, node)
- cgroup_addrm_files(cgrp, ss, set->cfts, true);
- }
- /* This cgroup is ready now */
- for_each_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
- /*
- * Update id->css pointer and make this css visible from
- * CSS ID functions. This pointer will be dereferened
- * from RCU-read-side without locks.
- */
- if (css->id)
- rcu_assign_pointer(css->id->css, css);
- }
- return 0;
- }
- static void css_dput_fn(struct work_struct *work)
- {
- struct cgroup_subsys_state *css =
- container_of(work, struct cgroup_subsys_state, dput_work);
- struct dentry *dentry = css->cgroup->dentry;
- struct super_block *sb = dentry->d_sb;
- atomic_inc(&sb->s_active);
- dput(dentry);
- deactivate_super(sb);
- }
- static void init_cgroup_css(struct cgroup_subsys_state *css,
- struct cgroup_subsys *ss,
- struct cgroup *cgrp)
- {
- css->cgroup = cgrp;
- atomic_set(&css->refcnt, 1);
- css->flags = 0;
- css->id = NULL;
- if (cgrp == dummytop)
- set_bit(CSS_ROOT, &css->flags);
- BUG_ON(cgrp->subsys[ss->subsys_id]);
- cgrp->subsys[ss->subsys_id] = css;
- /*
- * If !clear_css_refs, css holds an extra ref to @cgrp->dentry
- * which is put on the last css_put(). dput() requires process
- * context, which css_put() may be called without. @css->dput_work
- * will be used to invoke dput() asynchronously from css_put().
- */
- INIT_WORK(&css->dput_work, css_dput_fn);
- if (ss->__DEPRECATED_clear_css_refs)
- set_bit(CSS_CLEAR_CSS_REFS, &css->flags);
- }
- /*
- * cgroup_create - create a cgroup
- * @parent: cgroup that will be parent of the new cgroup
- * @dentry: dentry of the new cgroup
- * @mode: mode to set on new inode
- *
- * Must be called with the mutex on the parent inode held
- */
- static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
- umode_t mode)
- {
- struct cgroup *cgrp;
- struct cgroupfs_root *root = parent->root;
- int err = 0;
- struct cgroup_subsys *ss;
- struct super_block *sb = root->sb;
- cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
- if (!cgrp)
- return -ENOMEM;
- /* Grab a reference on the superblock so the hierarchy doesn't
- * get deleted on unmount if there are child cgroups. This
- * can be done outside cgroup_mutex, since the sb can't
- * disappear while someone has an open control file on the
- * fs */
- atomic_inc(&sb->s_active);
- mutex_lock(&cgroup_mutex);
- init_cgroup_housekeeping(cgrp);
- cgrp->parent = parent;
- cgrp->root = parent->root;
- cgrp->top_cgroup = parent->top_cgroup;
- if (notify_on_release(parent))
- set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
- if (clone_children(parent))
- set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
- for_each_subsys(root, ss) {
- struct cgroup_subsys_state *css;
- css = ss->create(cgrp);
- if (IS_ERR(css)) {
- err = PTR_ERR(css);
- goto err_destroy;
- }
- init_cgroup_css(css, ss, cgrp);
- if (ss->use_id) {
- err = alloc_css_id(ss, parent, cgrp);
- if (err)
- goto err_destroy;
- }
- /* At error, ->destroy() callback has to free assigned ID. */
- if (clone_children(parent) && ss->post_clone)
- ss->post_clone(cgrp);
- if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
- parent->parent) {
- pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
- current->comm, current->pid, ss->name);
- if (!strcmp(ss->name, "memory"))
- pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
- ss->warned_broken_hierarchy = true;
- }
- }
- list_add(&cgrp->sibling, &cgrp->parent->children);
- root->number_of_cgroups++;
- err = cgroup_create_dir(cgrp, dentry, mode);
- if (err < 0)
- goto err_remove;
- /* If !clear_css_refs, each css holds a ref to the cgroup's dentry */
- for_each_subsys(root, ss)
- if (!ss->__DEPRECATED_clear_css_refs)
- dget(dentry);
- set_bit(CGRP_RELEASABLE, &parent->flags);
- /* The cgroup directory was pre-locked for us */
- BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
- list_add_tail(&cgrp->allcg_node, &root->allcg_list);
- err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
- /* If err < 0, we have a half-filled directory - oh well ;) */
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
- return 0;
- err_remove:
- list_del(&cgrp->sibling);
- root->number_of_cgroups--;
- err_destroy:
- for_each_subsys(root, ss) {
- if (cgrp->subsys[ss->subsys_id])
- ss->destroy(cgrp);
- }
- mutex_unlock(&cgroup_mutex);
- /* Release the reference count that we took on the superblock */
- deactivate_super(sb);
- kfree(cgrp);
- return err;
- }
- static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
- {
- struct cgroup *c_parent = dentry->d_parent->d_fsdata;
- /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
- */
- if (strchr(dentry->d_name.name, '\n'))
- return -EINVAL;
- /* the vfs holds inode->i_mutex already */
- return cgroup_create(c_parent, dentry, mode | S_IFDIR);
- }
- /*
- * Check the reference count on each subsystem. Since we already
- * established that there are no tasks in the cgroup, if the css refcount
- * is also 1, then there should be no outstanding references, so the
- * subsystem is safe to destroy. We scan across all subsystems rather than
- * using the per-hierarchy linked list of mounted subsystems since we can
- * be called via check_for_release() with no synchronization other than
- * RCU, and the subsystem linked list isn't RCU-safe.
- */
- static int cgroup_has_css_refs(struct cgroup *cgrp)
- {
- int i;
- /*
- * We won't need to lock the subsys array, because the subsystems
- * we're concerned about aren't going anywhere since our cgroup root
- * has a reference on them.
- */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- struct cgroup_subsys_state *css;
- /* Skip subsystems not present or not in this hierarchy */
- if (ss == NULL || ss->root != cgrp->root)
- continue;
- css = cgrp->subsys[ss->subsys_id];
- /*
- * When called from check_for_release() it's possible
- * that by this point the cgroup has been removed
- * and the css deleted. But a false-positive doesn't
- * matter, since it can only happen if the cgroup
- * has been deleted and hence no longer needs the
- * release agent to be called anyway.
- */
- if (css && css_refcnt(css) > 1)
- return 1;
- }
- return 0;
- }
- /*
- * Atomically mark all (or else none) of the cgroup's CSS objects as
- * CSS_REMOVED. Return true on success, or false if the cgroup has
- * busy subsystems. Call with cgroup_mutex held
- *
- * Depending on whether a subsys has __DEPRECATED_clear_css_refs set or
- * not, cgroup removal behaves differently.
- *
- * If clear is set, css refcnt for the subsystem should be zero before
- * cgroup removal can be committed. This is implemented by
- * CGRP_WAIT_ON_RMDIR and retry logic around ->pre_destroy(), which may be
- * called multiple times until all css refcnts reach zero and is allowed to
- * veto removal on any invocation. This behavior is deprecated and will be
- * removed as soon as the existing user (memcg) is updated.
- *
- * If clear is not set, each css holds an extra reference to the cgroup's
- * dentry and cgroup removal proceeds regardless of css refs.
- * ->pre_destroy() will be called at least once and is not allowed to fail.
- * On the last put of each css, whenever that may be, the extra dentry ref
- * is put so that dentry destruction happens only after all css's are
- * released.
- */
- static int cgroup_clear_css_refs(struct cgroup *cgrp)
- {
- struct cgroup_subsys *ss;
- unsigned long flags;
- bool failed = false;
- if (atomic_read(&cgrp->count) != 0)
- return false;
- local_irq_save(flags);
- /*
- * Block new css_tryget() by deactivating refcnt. If all refcnts
- * for subsystems w/ clear_css_refs set were 1 at the moment of
- * deactivation, we succeeded.
- */
- for_each_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
- WARN_ON(atomic_read(&css->refcnt) < 0);
- atomic_add(CSS_DEACT_BIAS, &css->refcnt);
- if (ss->__DEPRECATED_clear_css_refs)
- failed |= css_refcnt(css) != 1;
- }
- /*
- * If succeeded, set REMOVED and put all the base refs; otherwise,
- * restore refcnts to positive values. Either way, all in-progress
- * css_tryget() will be released.
- */
- for_each_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
- if (!failed) {
- set_bit(CSS_REMOVED, &css->flags);
- css_put(css);
- } else {
- atomic_sub(CSS_DEACT_BIAS, &css->refcnt);
- }
- }
- local_irq_restore(flags);
- return !failed;
- }
- /* Checks if all of the css_sets attached to a cgroup have a refcount of 0. */
- static int cgroup_css_sets_empty(struct cgroup *cgrp)
- {
- struct cg_cgroup_link *link;
- int retval = 1;
- read_lock(&css_set_lock);
- list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
- struct css_set *cg = link->cg;
- if (cg && (atomic_read(&cg->refcount) > 0)) {
- retval = 0;
- break;
- }
- }
- read_unlock(&css_set_lock);
- return retval;
- }
- static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
- {
- struct cgroup *cgrp = dentry->d_fsdata;
- struct dentry *d;
- struct cgroup *parent;
- DEFINE_WAIT(wait);
- struct cgroup_event *event, *tmp;
- int ret;
- /* the vfs holds both inode->i_mutex already */
- again:
- mutex_lock(&cgroup_mutex);
- if (!cgroup_css_sets_empty(cgrp)) {
- mutex_unlock(&cgroup_mutex);
- return -EBUSY;
- }
- if (!list_empty(&cgrp->children)) {
- mutex_unlock(&cgroup_mutex);
- return -EBUSY;
- }
- mutex_unlock(&cgroup_mutex);
- /*
- * In general, subsystem has no css->refcnt after pre_destroy(). But
- * in racy cases, subsystem may have to get css->refcnt after
- * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
- * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
- * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
- * and subsystem's reference count handling. Please see css_get/put
- * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
- */
- set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- /*
- * Call pre_destroy handlers of subsys. Notify subsystems
- * that rmdir() request comes.
- */
- ret = cgroup_call_pre_destroy(cgrp);
- if (ret) {
- clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- return ret;
- }
- mutex_lock(&cgroup_mutex);
- parent = cgrp->parent;
- if (!cgroup_css_sets_empty(cgrp) || !list_empty(&cgrp->children)) {
- clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- mutex_unlock(&cgroup_mutex);
- return -EBUSY;
- }
- prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
- if (!cgroup_clear_css_refs(cgrp)) {
- mutex_unlock(&cgroup_mutex);
- /*
- * Because someone may call cgroup_wakeup_rmdir_waiter() before
- * prepare_to_wait(), we need to check this flag.
- */
- if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
- schedule();
- finish_wait(&cgroup_rmdir_waitq, &wait);
- clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- if (signal_pending(current))
- return -EINTR;
- goto again;
- }
- /* NO css_tryget() can success after here. */
- finish_wait(&cgroup_rmdir_waitq, &wait);
- clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- raw_spin_lock(&release_list_lock);
- set_bit(CGRP_REMOVED, &cgrp->flags);
- if (!list_empty(&cgrp->release_list))
- list_del_init(&cgrp->release_list);
- raw_spin_unlock(&release_list_lock);
- /* delete this cgroup from parent->children */
- list_del_init(&cgrp->sibling);
- list_del_init(&cgrp->allcg_node);
- d = dget(cgrp->dentry);
- cgroup_d_remove_dir(d);
- dput(d);
- check_for_release(parent);
- /*
- * Unregister events and notify userspace.
- * Notify userspace about cgroup removing only after rmdir of cgroup
- * directory to avoid race between userspace and kernelspace
- */
- spin_lock(&cgrp->event_list_lock);
- list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
- list_del(&event->list);
- remove_wait_queue(event->wqh, &event->wait);
- eventfd_signal(event->eventfd, 1);
- schedule_work(&event->remove);
- }
- spin_unlock(&cgrp->event_list_lock);
- mutex_unlock(&cgroup_mutex);
- return 0;
- }
- static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
- {
- INIT_LIST_HEAD(&ss->cftsets);
- /*
- * base_cftset is embedded in subsys itself, no need to worry about
- * deregistration.
- */
- if (ss->base_cftypes) {
- ss->base_cftset.cfts = ss->base_cftypes;
- list_add_tail(&ss->base_cftset.node, &ss->cftsets);
- }
- }
- static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
- {
- struct cgroup_subsys_state *css;
- printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
- /* init base cftset */
- cgroup_init_cftsets(ss);
- /* Create the top cgroup state for this subsystem */
- list_add(&ss->sibling, &rootnode.subsys_list);
- ss->root = &rootnode;
- css = ss->create(dummytop);
- /* We don't handle early failures gracefully */
- BUG_ON(IS_ERR(css));
- init_cgroup_css(css, ss, dummytop);
- /* Update the init_css_set to contain a subsys
- * pointer to this state - since the subsystem is
- * newly registered, all tasks and hence the
- * init_css_set is in the subsystem's top cgroup. */
- init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
- need_forkexit_callback |= ss->fork || ss->exit;
- /* At system boot, before all subsystems have been
- * registered, no tasks have been forked, so we don't
- * need to invoke fork callbacks here. */
- BUG_ON(!list_empty(&init_task.tasks));
- ss->active = 1;
- /* this function shouldn't be used with modular subsystems, since they
- * need to register a subsys_id, among other things */
- BUG_ON(ss->module);
- }
- /**
- * cgroup_load_subsys: load and register a modular subsystem at runtime
- * @ss: the subsystem to load
- *
- * This function should be called in a modular subsystem's initcall. If the
- * subsystem is built as a module, it will be assigned a new subsys_id and set
- * up for use. If the subsystem is built-in anyway, work is delegated to the
- * simpler cgroup_init_subsys.
- */
- int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
- {
- int i;
- struct cgroup_subsys_state *css;
- struct hlist_node *node, *tmp;
- struct css_set *cg;
- unsigned long key;
- /* check name and function validity */
- if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
- ss->create == NULL || ss->destroy == NULL)
- return -EINVAL;
- /*
- * we don't support callbacks in modular subsystems. this check is
- * before the ss->module check for consistency; a subsystem that could
- * be a module should still have no callbacks even if the user isn't
- * compiling it as one.
- */
- if (ss->fork || ss->exit)
- return -EINVAL;
- /*
- * an optionally modular subsystem is built-in: we want to do nothing,
- * since cgroup_init_subsys will have already taken care of it.
- */
- if (ss->module == NULL) {
- /* a few sanity checks */
- BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
- BUG_ON(subsys[ss->subsys_id] != ss);
- return 0;
- }
- /* init base cftset */
- cgroup_init_cftsets(ss);
- /*
- * need to register a subsys id before anything else - for example,
- * init_cgroup_css needs it.
- */
- mutex_lock(&cgroup_mutex);
- /* find the first empty slot in the array */
- for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
- if (subsys[i] == NULL)
- break;
- }
- if (i == CGROUP_SUBSYS_COUNT) {
- /* maximum number of subsystems already registered! */
- mutex_unlock(&cgroup_mutex);
- return -EBUSY;
- }
- /* assign ourselves the subsys_id */
- ss->subsys_id = i;
- subsys[i] = ss;
- /*
- * no ss->create seems to need anything important in the ss struct, so
- * this can happen first (i.e. before the rootnode attachment).
- */
- css = ss->create(dummytop);
- if (IS_ERR(css)) {
- /* failure case - need to deassign the subsys[] slot. */
- subsys[i] = NULL;
- mutex_unlock(&cgroup_mutex);
- return PTR_ERR(css);
- }
- list_add(&ss->sibling, &rootnode.subsys_list);
- ss->root = &rootnode;
- /* our new subsystem will be attached to the dummy hierarchy. */
- init_cgroup_css(css, ss, dummytop);
- /* init_idr must be after init_cgroup_css because it sets css->id. */
- if (ss->use_id) {
- int ret = cgroup_init_idr(ss, css);
- if (ret) {
- dummytop->subsys[ss->subsys_id] = NULL;
- ss->destroy(dummytop);
- subsys[i] = NULL;
- mutex_unlock(&cgroup_mutex);
- return ret;
- }
- }
- /*
- * Now we need to entangle the css into the existing css_sets. unlike
- * in cgroup_init_subsys, there are now multiple css_sets, so each one
- * will need a new pointer to it; done by iterating the css_set_table.
- * furthermore, modifying the existing css_sets will corrupt the hash
- * table state, so each changed css_set will need its hash recomputed.
- * this is all done under the css_set_lock.
- */
- write_lock(&css_set_lock);
- hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) {
- /* skip entries that we already rehashed */
- if (cg->subsys[ss->subsys_id])
- continue;
- /* remove existing entry */
- hash_del(&cg->hlist);
- /* set new value */
- cg->subsys[ss->subsys_id] = css;
- /* recompute hash and restore entry */
- key = css_set_hash(cg->subsys);
- hash_add(css_set_table, node, key);
- }
- write_unlock(&css_set_lock);
- ss->active = 1;
- /* success! */
- mutex_unlock(&cgroup_mutex);
- return 0;
- }
- EXPORT_SYMBOL_GPL(cgroup_load_subsys);
- /**
- * cgroup_unload_subsys: unload a modular subsystem
- * @ss: the subsystem to unload
- *
- * This function should be called in a modular subsystem's exitcall. When this
- * function is invoked, the refcount on the subsystem's module will be 0, so
- * the subsystem will not be attached to any hierarchy.
- */
- void cgroup_unload_subsys(struct cgroup_subsys *ss)
- {
- struct cg_cgroup_link *link;
- BUG_ON(ss->module == NULL);
- /*
- * we shouldn't be called if the subsystem is in use, and the use of
- * try_module_get in parse_cgroupfs_options should ensure that it
- * doesn't start being used while we're killing it off.
- */
- BUG_ON(ss->root != &rootnode);
- mutex_lock(&cgroup_mutex);
- /* deassign the subsys_id */
- BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
- subsys[ss->subsys_id] = NULL;
- /* remove subsystem from rootnode's list of subsystems */
- list_del_init(&ss->sibling);
- /*
- * disentangle the css from all css_sets attached to the dummytop. as
- * in loading, we need to pay our respects to the hashtable gods.
- */
- write_lock(&css_set_lock);
- list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
- struct css_set *cg = link->cg;
- unsigned long key;
- hash_del(&cg->hlist);
- BUG_ON(!cg->subsys[ss->subsys_id]);
- cg->subsys[ss->subsys_id] = NULL;
- key = css_set_hash(cg->subsys);
- hash_add(css_set_table, &cg->hlist, key);
- }
- write_unlock(&css_set_lock);
- /*
- * remove subsystem's css from the dummytop and free it - need to free
- * before marking as null because ss->destroy needs the cgrp->subsys
- * pointer to find their state. note that this also takes care of
- * freeing the css_id.
- */
- ss->destroy(dummytop);
- dummytop->subsys[ss->subsys_id] = NULL;
- mutex_unlock(&cgroup_mutex);
- }
- EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
- /**
- * cgroup_init_early - cgroup initialization at system boot
- *
- * Initialize cgroups at system boot, and initialize any
- * subsystems that request early init.
- */
- int __init cgroup_init_early(void)
- {
- int i;
- atomic_set(&init_css_set.refcount, 1);
- INIT_LIST_HEAD(&init_css_set.cg_links);
- INIT_LIST_HEAD(&init_css_set.tasks);
- INIT_HLIST_NODE(&init_css_set.hlist);
- css_set_count = 1;
- init_cgroup_root(&rootnode);
- root_count = 1;
- init_task.cgroups = &init_css_set;
- init_css_set_link.cg = &init_css_set;
- init_css_set_link.cgrp = dummytop;
- list_add(&init_css_set_link.cgrp_link_list,
- &rootnode.top_cgroup.css_sets);
- list_add(&init_css_set_link.cg_link_list,
- &init_css_set.cg_links);
- /* at bootup time, we don't worry about modular subsystems */
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- BUG_ON(!ss->name);
- BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
- BUG_ON(!ss->create);
- BUG_ON(!ss->destroy);
- if (ss->subsys_id != i) {
- printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
- ss->name, ss->subsys_id);
- BUG();
- }
- if (ss->early_init)
- cgroup_init_subsys(ss);
- }
- return 0;
- }
- /**
- * cgroup_init - cgroup initialization
- *
- * Register cgroup filesystem and /proc file, and initialize
- * any subsystems that didn't request early init.
- */
- int __init cgroup_init(void)
- {
- int err;
- int i;
- unsigned long key;
- err = bdi_init(&cgroup_backing_dev_info);
- if (err)
- return err;
- /* at bootup time, we don't worry about modular subsystems */
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (!ss->early_init)
- cgroup_init_subsys(ss);
- if (ss->use_id)
- cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
- }
- /* Add init_css_set to the hash table */
- key = css_set_hash(init_css_set.subsys);
- hash_add(css_set_table, &init_css_set.hlist, key);
- BUG_ON(!init_root_id(&rootnode));
- cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
- if (!cgroup_kobj) {
- err = -ENOMEM;
- goto out;
- }
- err = register_filesystem(&cgroup_fs_type);
- if (err < 0) {
- kobject_put(cgroup_kobj);
- goto out;
- }
- proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
- out:
- if (err)
- bdi_destroy(&cgroup_backing_dev_info);
- return err;
- }
- /*
- * proc_cgroup_show()
- * - Print task's cgroup paths into seq_file, one line for each hierarchy
- * - Used for /proc/<pid>/cgroup.
- * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
- * doesn't really matter if tsk->cgroup changes after we read it,
- * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
- * anyway. No need to check that tsk->cgroup != NULL, thanks to
- * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
- * cgroup to top_cgroup.
- */
- /* TODO: Use a proper seq_file iterator */
- static int proc_cgroup_show(struct seq_file *m, void *v)
- {
- struct pid *pid;
- struct task_struct *tsk;
- char *buf;
- int retval;
- struct cgroupfs_root *root;
- retval = -ENOMEM;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- goto out;
- retval = -ESRCH;
- pid = m->private;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- if (!tsk)
- goto out_free;
- retval = 0;
- mutex_lock(&cgroup_mutex);
- for_each_active_root(root) {
- struct cgroup_subsys *ss;
- struct cgroup *cgrp;
- int count = 0;
- seq_printf(m, "%d:", root->hierarchy_id);
- for_each_subsys(root, ss)
- seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
- if (strlen(root->name))
- seq_printf(m, "%sname=%s", count ? "," : "",
- root->name);
- seq_putc(m, ':');
- cgrp = task_cgroup_from_root(tsk, root);
- retval = cgroup_path(cgrp, buf, PAGE_SIZE);
- if (retval < 0)
- goto out_unlock;
- seq_puts(m, buf);
- seq_putc(m, '\n');
- }
- out_unlock:
- mutex_unlock(&cgroup_mutex);
- put_task_struct(tsk);
- out_free:
- kfree(buf);
- out:
- return retval;
- }
- static int cgroup_open(struct inode *inode, struct file *file)
- {
- struct pid *pid = PROC_I(inode)->pid;
- return single_open(file, proc_cgroup_show, pid);
- }
- const struct file_operations proc_cgroup_operations = {
- .open = cgroup_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- /* Display information about each subsystem and each hierarchy */
- static int proc_cgroupstats_show(struct seq_file *m, void *v)
- {
- int i;
- seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
- /*
- * ideally we don't want subsystems moving around while we do this.
- * cgroup_mutex is also necessary to guarantee an atomic snapshot of
- * subsys/hierarchy state.
- */
- mutex_lock(&cgroup_mutex);
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss == NULL)
- continue;
- seq_printf(m, "%s\t%d\t%d\t%d\n",
- ss->name, ss->root->hierarchy_id,
- ss->root->number_of_cgroups, !ss->disabled);
- }
- mutex_unlock(&cgroup_mutex);
- return 0;
- }
- static int cgroupstats_open(struct inode *inode, struct file *file)
- {
- return single_open(file, proc_cgroupstats_show, NULL);
- }
- static const struct file_operations proc_cgroupstats_operations = {
- .open = cgroupstats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- /**
- * cgroup_fork - attach newly forked task to its parents cgroup.
- * @child: pointer to task_struct of forking parent process.
- *
- * Description: A task inherits its parent's cgroup at fork().
- *
- * A pointer to the shared css_set was automatically copied in
- * fork.c by dup_task_struct(). However, we ignore that copy, since
- * it was not made under the protection of RCU or cgroup_mutex, so
- * might no longer be a valid cgroup pointer. cgroup_attach_task() might
- * have already changed current->cgroups, allowing the previously
- * referenced cgroup group to be removed and freed.
- *
- * At the point that cgroup_fork() is called, 'current' is the parent
- * task, and the passed argument 'child' points to the child task.
- */
- void cgroup_fork(struct task_struct *child)
- {
- task_lock(current);
- child->cgroups = current->cgroups;
- get_css_set(child->cgroups);
- task_unlock(current);
- INIT_LIST_HEAD(&child->cg_list);
- }
- /**
- * cgroup_post_fork - called on a new task after adding it to the task list
- * @child: the task in question
- *
- * Adds the task to the list running through its css_set if necessary and
- * call the subsystem fork() callbacks. Has to be after the task is
- * visible on the task list in case we race with the first call to
- * cgroup_iter_start() - to guarantee that the new task ends up on its
- * list.
- */
- void cgroup_post_fork(struct task_struct *child)
- {
- int i;
- /*
- * use_task_css_set_links is set to 1 before we walk the tasklist
- * under the tasklist_lock and we read it here after we added the child
- * to the tasklist under the tasklist_lock as well. If the child wasn't
- * yet in the tasklist when we walked through it from
- * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
- * should be visible now due to the paired locking and barriers implied
- * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
- * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
- * lock on fork.
- */
- if (use_task_css_set_links) {
- write_lock(&css_set_lock);
- task_lock(child);
- if (list_empty(&child->cg_list))
- list_add(&child->cg_list, &child->cgroups->tasks);
- task_unlock(child);
- write_unlock(&css_set_lock);
- }
- /*
- * Call ss->fork(). This must happen after @child is linked on
- * css_set; otherwise, @child might change state between ->fork()
- * and addition to css_set.
- */
- if (need_forkexit_callback) {
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss->fork)
- ss->fork(child);
- }
- }
- }
- /**
- * cgroup_exit - detach cgroup from exiting task
- * @tsk: pointer to task_struct of exiting process
- * @run_callback: run exit callbacks?
- *
- * Description: Detach cgroup from @tsk and release it.
- *
- * Note that cgroups marked notify_on_release force every task in
- * them to take the global cgroup_mutex mutex when exiting.
- * This could impact scaling on very large systems. Be reluctant to
- * use notify_on_release cgroups where very high task exit scaling
- * is required on large systems.
- *
- * the_top_cgroup_hack:
- *
- * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
- *
- * We call cgroup_exit() while the task is still competent to
- * handle notify_on_release(), then leave the task attached to the
- * root cgroup in each hierarchy for the remainder of its exit.
- *
- * To do this properly, we would increment the reference count on
- * top_cgroup, and near the very end of the kernel/exit.c do_exit()
- * code we would add a second cgroup function call, to drop that
- * reference. This would just create an unnecessary hot spot on
- * the top_cgroup reference count, to no avail.
- *
- * Normally, holding a reference to a cgroup without bumping its
- * count is unsafe. The cgroup could go away, or someone could
- * attach us to a different cgroup, decrementing the count on
- * the first cgroup that we never incremented. But in this case,
- * top_cgroup isn't going away, and either task has PF_EXITING set,
- * which wards off any cgroup_attach_task() attempts, or task is a failed
- * fork, never visible to cgroup_attach_task.
- */
- void cgroup_exit(struct task_struct *tsk, int run_callbacks)
- {
- struct css_set *cg;
- int i;
- /*
- * Unlink from the css_set task list if necessary.
- * Optimistically check cg_list before taking
- * css_set_lock
- */
- if (!list_empty(&tsk->cg_list)) {
- write_lock(&css_set_lock);
- if (!list_empty(&tsk->cg_list))
- list_del_init(&tsk->cg_list);
- write_unlock(&css_set_lock);
- }
- /* Reassign the task to the init_css_set. */
- task_lock(tsk);
- cg = tsk->cgroups;
- tsk->cgroups = &init_css_set;
- if (run_callbacks && need_forkexit_callback) {
- /*
- * modular subsystems can't use callbacks, so no need to lock
- * the subsys array
- */
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss->exit) {
- struct cgroup *old_cgrp =
- rcu_dereference_raw(cg->subsys[i])->cgroup;
- struct cgroup *cgrp = task_cgroup(tsk, i);
- ss->exit(cgrp, old_cgrp, tsk);
- }
- }
- }
- task_unlock(tsk);
- if (cg)
- put_css_set(cg);
- }
- /**
- * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
- * @cgrp: the cgroup in question
- * @task: the task in question
- *
- * See if @cgrp is a descendant of @task's cgroup in the appropriate
- * hierarchy.
- *
- * If we are sending in dummytop, then presumably we are creating
- * the top cgroup in the subsystem.
- *
- * Called only by the ns (nsproxy) cgroup.
- */
- int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
- {
- int ret;
- struct cgroup *target;
- if (cgrp == dummytop)
- return 1;
- target = task_cgroup_from_root(task, cgrp->root);
- while (cgrp != target && cgrp!= cgrp->top_cgroup)
- cgrp = cgrp->parent;
- ret = (cgrp == target);
- return ret;
- }
- static void check_for_release(struct cgroup *cgrp)
- {
- /* All of these checks rely on RCU to keep the cgroup
- * structure alive */
- if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
- && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
- /* Control Group is currently removeable. If it's not
- * already queued for a userspace notification, queue
- * it now */
- int need_schedule_work = 0;
- raw_spin_lock(&release_list_lock);
- if (!cgroup_is_removed(cgrp) &&
- list_empty(&cgrp->release_list)) {
- list_add(&cgrp->release_list, &release_list);
- need_schedule_work = 1;
- }
- raw_spin_unlock(&release_list_lock);
- if (need_schedule_work)
- schedule_work(&release_agent_work);
- }
- }
- /* Caller must verify that the css is not for root cgroup */
- void __css_get(struct cgroup_subsys_state *css, int count)
- {
- atomic_add(count, &css->refcnt);
- set_bit(CGRP_RELEASABLE, &css->cgroup->flags);
- }
- EXPORT_SYMBOL_GPL(__css_get);
- /* Caller must verify that the css is not for root cgroup */
- bool __css_tryget(struct cgroup_subsys_state *css)
- {
- do {
- int v = css_refcnt(css);
- if (atomic_cmpxchg(&css->refcnt, v, v + 1) == v)
- return true;
- cpu_relax();
- } while (!test_bit(CSS_REMOVED, &css->flags));
- return false;
- }
- EXPORT_SYMBOL_GPL(__css_tryget);
- /* Caller must verify that the css is not for root cgroup */
- void __css_put(struct cgroup_subsys_state *css)
- {
- struct cgroup *cgrp = css->cgroup;
- int v;
- rcu_read_lock();
- v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
- switch (v) {
- case 1:
- check_for_release(cgrp);
- cgroup_wakeup_rmdir_waiter(cgrp);
- break;
- case 0:
- if (!test_bit(CSS_CLEAR_CSS_REFS, &css->flags))
- schedule_work(&css->dput_work);
- break;
- }
- rcu_read_unlock();
- }
- EXPORT_SYMBOL_GPL(__css_put);
- /*
- * Notify userspace when a cgroup is released, by running the
- * configured release agent with the name of the cgroup (path
- * relative to the root of cgroup file system) as the argument.
- *
- * Most likely, this user command will try to rmdir this cgroup.
- *
- * This races with the possibility that some other task will be
- * attached to this cgroup before it is removed, or that some other
- * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
- * The presumed 'rmdir' will fail quietly if this cgroup is no longer
- * unused, and this cgroup will be reprieved from its death sentence,
- * to continue to serve a useful existence. Next time it's released,
- * we will get notified again, if it still has 'notify_on_release' set.
- *
- * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
- * means only wait until the task is successfully execve()'d. The
- * separate release agent task is forked by call_usermodehelper(),
- * then control in this thread returns here, without waiting for the
- * release agent task. We don't bother to wait because the caller of
- * this routine has no use for the exit status of the release agent
- * task, so no sense holding our caller up for that.
- */
- static void cgroup_release_agent(struct work_struct *work)
- {
- BUG_ON(work != &release_agent_work);
- mutex_lock(&cgroup_mutex);
- raw_spin_lock(&release_list_lock);
- while (!list_empty(&release_list)) {
- char *argv[3], *envp[3];
- int i;
- char *pathbuf = NULL, *agentbuf = NULL;
- struct cgroup *cgrp = list_entry(release_list.next,
- struct cgroup,
- release_list);
- list_del_init(&cgrp->release_list);
- raw_spin_unlock(&release_list_lock);
- pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!pathbuf)
- goto continue_free;
- if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
- goto continue_free;
- agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
- if (!agentbuf)
- goto continue_free;
- i = 0;
- argv[i++] = agentbuf;
- argv[i++] = pathbuf;
- argv[i] = NULL;
- i = 0;
- /* minimal command environment */
- envp[i++] = "HOME=/";
- envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
- envp[i] = NULL;
- /* Drop the lock while we invoke the usermode helper,
- * since the exec could involve hitting disk and hence
- * be a slow process */
- mutex_unlock(&cgroup_mutex);
- call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
- mutex_lock(&cgroup_mutex);
- continue_free:
- kfree(pathbuf);
- kfree(agentbuf);
- raw_spin_lock(&release_list_lock);
- }
- raw_spin_unlock(&release_list_lock);
- mutex_unlock(&cgroup_mutex);
- }
- static int __init cgroup_disable(char *str)
- {
- int i;
- char *token;
- while ((token = strsep(&str, ",")) != NULL) {
- if (!*token)
- continue;
- /*
- * cgroup_disable, being at boot time, can't know about module
- * subsystems, so we don't worry about them.
- */
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (!strcmp(token, ss->name)) {
- ss->disabled = 1;
- printk(KERN_INFO "Disabling %s control group"
- " subsystem\n", ss->name);
- break;
- }
- }
- }
- return 1;
- }
- __setup("cgroup_disable=", cgroup_disable);
- /*
- * Functons for CSS ID.
- */
- /*
- *To get ID other than 0, this should be called when !cgroup_is_removed().
- */
- unsigned short css_id(struct cgroup_subsys_state *css)
- {
- struct css_id *cssid;
- /*
- * This css_id() can return correct value when somone has refcnt
- * on this or this is under rcu_read_lock(). Once css->id is allocated,
- * it's unchanged until freed.
- */
- cssid = rcu_dereference_check(css->id, css_refcnt(css));
- if (cssid)
- return cssid->id;
- return 0;
- }
- EXPORT_SYMBOL_GPL(css_id);
- unsigned short css_depth(struct cgroup_subsys_state *css)
- {
- struct css_id *cssid;
- cssid = rcu_dereference_check(css->id, css_refcnt(css));
- if (cssid)
- return cssid->depth;
- return 0;
- }
- EXPORT_SYMBOL_GPL(css_depth);
- /**
- * css_is_ancestor - test "root" css is an ancestor of "child"
- * @child: the css to be tested.
- * @root: the css supporsed to be an ancestor of the child.
- *
- * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, the caller must hold rcu_read_lock().
- * But, considering usual usage, the csses should be valid objects after test.
- * Assuming that the caller will do some action to the child if this returns
- * returns true, the caller must take "child";s reference count.
- * If "child" is valid object and this returns true, "root" is valid, too.
- */
- bool css_is_ancestor(struct cgroup_subsys_state *child,
- const struct cgroup_subsys_state *root)
- {
- struct css_id *child_id;
- struct css_id *root_id;
- child_id = rcu_dereference(child->id);
- if (!child_id)
- return false;
- root_id = rcu_dereference(root->id);
- if (!root_id)
- return false;
- if (child_id->depth < root_id->depth)
- return false;
- if (child_id->stack[root_id->depth] != root_id->id)
- return false;
- return true;
- }
- void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
- {
- struct css_id *id = css->id;
- /* When this is called before css_id initialization, id can be NULL */
- if (!id)
- return;
- BUG_ON(!ss->use_id);
- rcu_assign_pointer(id->css, NULL);
- rcu_assign_pointer(css->id, NULL);
- spin_lock(&ss->id_lock);
- idr_remove(&ss->idr, id->id);
- spin_unlock(&ss->id_lock);
- kfree_rcu(id, rcu_head);
- }
- EXPORT_SYMBOL_GPL(free_css_id);
- /*
- * This is called by init or create(). Then, calls to this function are
- * always serialized (By cgroup_mutex() at create()).
- */
- static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
- {
- struct css_id *newid;
- int myid, error, size;
- BUG_ON(!ss->use_id);
- size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
- newid = kzalloc(size, GFP_KERNEL);
- if (!newid)
- return ERR_PTR(-ENOMEM);
- /* get id */
- if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
- error = -ENOMEM;
- goto err_out;
- }
- spin_lock(&ss->id_lock);
- /* Don't use 0. allocates an ID of 1-65535 */
- error = idr_get_new_above(&ss->idr, newid, 1, &myid);
- spin_unlock(&ss->id_lock);
- /* Returns error when there are no free spaces for new ID.*/
- if (error) {
- error = -ENOSPC;
- goto err_out;
- }
- if (myid > CSS_ID_MAX)
- goto remove_idr;
- newid->id = myid;
- newid->depth = depth;
- return newid;
- remove_idr:
- error = -ENOSPC;
- spin_lock(&ss->id_lock);
- idr_remove(&ss->idr, myid);
- spin_unlock(&ss->id_lock);
- err_out:
- kfree(newid);
- return ERR_PTR(error);
- }
- static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
- struct cgroup_subsys_state *rootcss)
- {
- struct css_id *newid;
- spin_lock_init(&ss->id_lock);
- idr_init(&ss->idr);
- newid = get_new_cssid(ss, 0);
- if (IS_ERR(newid))
- return PTR_ERR(newid);
- newid->stack[0] = newid->id;
- newid->css = rootcss;
- rootcss->id = newid;
- return 0;
- }
- static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
- struct cgroup *child)
- {
- int subsys_id, i, depth = 0;
- struct cgroup_subsys_state *parent_css, *child_css;
- struct css_id *child_id, *parent_id;
- subsys_id = ss->subsys_id;
- parent_css = parent->subsys[subsys_id];
- child_css = child->subsys[subsys_id];
- parent_id = parent_css->id;
- depth = parent_id->depth + 1;
- child_id = get_new_cssid(ss, depth);
- if (IS_ERR(child_id))
- return PTR_ERR(child_id);
- for (i = 0; i < depth; i++)
- child_id->stack[i] = parent_id->stack[i];
- child_id->stack[depth] = child_id->id;
- /*
- * child_id->css pointer will be set after this cgroup is available
- * see cgroup_populate_dir()
- */
- rcu_assign_pointer(child_css->id, child_id);
- return 0;
- }
- /**
- * css_lookup - lookup css by id
- * @ss: cgroup subsys to be looked into.
- * @id: the id
- *
- * Returns pointer to cgroup_subsys_state if there is valid one with id.
- * NULL if not. Should be called under rcu_read_lock()
- */
- struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
- {
- struct css_id *cssid = NULL;
- BUG_ON(!ss->use_id);
- cssid = idr_find(&ss->idr, id);
- if (unlikely(!cssid))
- return NULL;
- return rcu_dereference(cssid->css);
- }
- EXPORT_SYMBOL_GPL(css_lookup);
- /**
- * css_get_next - lookup next cgroup under specified hierarchy.
- * @ss: pointer to subsystem
- * @id: current position of iteration.
- * @root: pointer to css. search tree under this.
- * @foundid: position of found object.
- *
- * Search next css under the specified hierarchy of rootid. Calling under
- * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
- */
- struct cgroup_subsys_state *
- css_get_next(struct cgroup_subsys *ss, int id,
- struct cgroup_subsys_state *root, int *foundid)
- {
- struct cgroup_subsys_state *ret = NULL;
- struct css_id *tmp;
- int tmpid;
- int rootid = css_id(root);
- int depth = css_depth(root);
- if (!rootid)
- return NULL;
- BUG_ON(!ss->use_id);
- WARN_ON_ONCE(!rcu_read_lock_held());
- /* fill start point for scan */
- tmpid = id;
- while (1) {
- /*
- * scan next entry from bitmap(tree), tmpid is updated after
- * idr_get_next().
- */
- tmp = idr_get_next(&ss->idr, &tmpid);
- if (!tmp)
- break;
- if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
- ret = rcu_dereference(tmp->css);
- if (ret) {
- *foundid = tmpid;
- break;
- }
- }
- /* continue to scan from next id */
- tmpid = tmpid + 1;
- }
- return ret;
- }
- /*
- * get corresponding css from file open on cgroupfs directory
- */
- struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
- {
- struct cgroup *cgrp;
- struct inode *inode;
- struct cgroup_subsys_state *css;
- inode = f->f_dentry->d_inode;
- /* check in cgroup filesystem dir */
- if (inode->i_op != &cgroup_dir_inode_operations)
- return ERR_PTR(-EBADF);
- if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
- return ERR_PTR(-EINVAL);
- /* get cgroup */
- cgrp = __d_cgrp(f->f_dentry);
- css = cgrp->subsys[id];
- return css ? css : ERR_PTR(-ENOENT);
- }
- #ifdef CONFIG_CGROUP_DEBUG
- static struct cgroup_subsys_state *debug_create(struct cgroup *cont)
- {
- struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
- if (!css)
- return ERR_PTR(-ENOMEM);
- return css;
- }
- static void debug_destroy(struct cgroup *cont)
- {
- kfree(cont->subsys[debug_subsys_id]);
- }
- static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
- {
- return atomic_read(&cont->count);
- }
- static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
- {
- return cgroup_task_count(cont);
- }
- static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
- {
- return (u64)(unsigned long)current->cgroups;
- }
- static u64 current_css_set_refcount_read(struct cgroup *cont,
- struct cftype *cft)
- {
- u64 count;
- rcu_read_lock();
- count = atomic_read(¤t->cgroups->refcount);
- rcu_read_unlock();
- return count;
- }
- static int current_css_set_cg_links_read(struct cgroup *cont,
- struct cftype *cft,
- struct seq_file *seq)
- {
- struct cg_cgroup_link *link;
- struct css_set *cg;
- read_lock(&css_set_lock);
- rcu_read_lock();
- cg = rcu_dereference(current->cgroups);
- list_for_each_entry(link, &cg->cg_links, cg_link_list) {
- struct cgroup *c = link->cgrp;
- const char *name;
- if (c->dentry)
- name = c->dentry->d_name.name;
- else
- name = "?";
- seq_printf(seq, "Root %d group %s\n",
- c->root->hierarchy_id, name);
- }
- rcu_read_unlock();
- read_unlock(&css_set_lock);
- return 0;
- }
- #define MAX_TASKS_SHOWN_PER_CSS 25
- static int cgroup_css_links_read(struct cgroup *cont,
- struct cftype *cft,
- struct seq_file *seq)
- {
- struct cg_cgroup_link *link;
- read_lock(&css_set_lock);
- list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
- struct css_set *cg = link->cg;
- struct task_struct *task;
- int count = 0;
- seq_printf(seq, "css_set %pK\n", cg);
- list_for_each_entry(task, &cg->tasks, cg_list) {
- if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
- seq_puts(seq, " ...\n");
- break;
- } else {
- seq_printf(seq, " task %d\n",
- task_pid_vnr(task));
- }
- }
- }
- read_unlock(&css_set_lock);
- return 0;
- }
- static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
- {
- return test_bit(CGRP_RELEASABLE, &cgrp->flags);
- }
- static struct cftype debug_files[] = {
- {
- .name = "cgroup_refcount",
- .read_u64 = cgroup_refcount_read,
- },
- {
- .name = "taskcount",
- .read_u64 = debug_taskcount_read,
- },
- {
- .name = "current_css_set",
- .read_u64 = current_css_set_read,
- },
- {
- .name = "current_css_set_refcount",
- .read_u64 = current_css_set_refcount_read,
- },
- {
- .name = "current_css_set_cg_links",
- .read_seq_string = current_css_set_cg_links_read,
- },
- {
- .name = "cgroup_css_links",
- .read_seq_string = cgroup_css_links_read,
- },
- {
- .name = "releasable",
- .read_u64 = releasable_read,
- },
- { } /* terminate */
- };
- struct cgroup_subsys debug_subsys = {
- .name = "debug",
- .create = debug_create,
- .destroy = debug_destroy,
- .subsys_id = debug_subsys_id,
- .base_cftypes = debug_files,
- };
- #endif /* CONFIG_CGROUP_DEBUG */
|