core.c 169 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440
  1. /*
  2. * Performance events core code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/idr.h>
  16. #include <linux/file.h>
  17. #include <linux/poll.h>
  18. #include <linux/slab.h>
  19. #include <linux/hash.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/dcache.h>
  22. #include <linux/percpu.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/reboot.h>
  25. #include <linux/vmstat.h>
  26. #include <linux/device.h>
  27. #include <linux/vmalloc.h>
  28. #include <linux/hardirq.h>
  29. #include <linux/rculist.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/syscalls.h>
  32. #include <linux/anon_inodes.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/perf_event.h>
  35. #include <linux/ftrace_event.h>
  36. #include <linux/hw_breakpoint.h>
  37. #include <asm/irq_regs.h>
  38. struct remote_function_call {
  39. struct task_struct *p;
  40. int (*func)(void *info);
  41. void *info;
  42. int ret;
  43. };
  44. static void remote_function(void *data)
  45. {
  46. struct remote_function_call *tfc = data;
  47. struct task_struct *p = tfc->p;
  48. if (p) {
  49. tfc->ret = -EAGAIN;
  50. if (task_cpu(p) != smp_processor_id() || !task_curr(p))
  51. return;
  52. }
  53. tfc->ret = tfc->func(tfc->info);
  54. }
  55. /**
  56. * task_function_call - call a function on the cpu on which a task runs
  57. * @p: the task to evaluate
  58. * @func: the function to be called
  59. * @info: the function call argument
  60. *
  61. * Calls the function @func when the task is currently running. This might
  62. * be on the current CPU, which just calls the function directly
  63. *
  64. * returns: @func return value, or
  65. * -ESRCH - when the process isn't running
  66. * -EAGAIN - when the process moved away
  67. */
  68. static int
  69. task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
  70. {
  71. struct remote_function_call data = {
  72. .p = p,
  73. .func = func,
  74. .info = info,
  75. .ret = -ESRCH, /* No such (running) process */
  76. };
  77. if (task_curr(p))
  78. smp_call_function_single(task_cpu(p), remote_function, &data, 1);
  79. return data.ret;
  80. }
  81. /**
  82. * cpu_function_call - call a function on the cpu
  83. * @func: the function to be called
  84. * @info: the function call argument
  85. *
  86. * Calls the function @func on the remote cpu.
  87. *
  88. * returns: @func return value or -ENXIO when the cpu is offline
  89. */
  90. static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
  91. {
  92. struct remote_function_call data = {
  93. .p = NULL,
  94. .func = func,
  95. .info = info,
  96. .ret = -ENXIO, /* No such CPU */
  97. };
  98. smp_call_function_single(cpu, remote_function, &data, 1);
  99. return data.ret;
  100. }
  101. #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
  102. PERF_FLAG_FD_OUTPUT |\
  103. PERF_FLAG_PID_CGROUP)
  104. enum event_type_t {
  105. EVENT_FLEXIBLE = 0x1,
  106. EVENT_PINNED = 0x2,
  107. EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
  108. };
  109. /*
  110. * perf_sched_events : >0 events exist
  111. * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  112. */
  113. struct jump_label_key perf_sched_events __read_mostly;
  114. static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
  115. static atomic_t nr_mmap_events __read_mostly;
  116. static atomic_t nr_comm_events __read_mostly;
  117. static atomic_t nr_task_events __read_mostly;
  118. static LIST_HEAD(pmus);
  119. static DEFINE_MUTEX(pmus_lock);
  120. static struct srcu_struct pmus_srcu;
  121. /*
  122. * perf event paranoia level:
  123. * -1 - not paranoid at all
  124. * 0 - disallow raw tracepoint access for unpriv
  125. * 1 - disallow cpu events for unpriv
  126. * 2 - disallow kernel profiling for unpriv
  127. */
  128. int sysctl_perf_event_paranoid __read_mostly = 1;
  129. /* Minimum for 512 kiB + 1 user control page */
  130. int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
  131. /*
  132. * max perf event sample rate
  133. */
  134. #define DEFAULT_MAX_SAMPLE_RATE 100000
  135. int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
  136. static int max_samples_per_tick __read_mostly =
  137. DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
  138. int perf_proc_update_handler(struct ctl_table *table, int write,
  139. void __user *buffer, size_t *lenp,
  140. loff_t *ppos)
  141. {
  142. int ret = proc_dointvec(table, write, buffer, lenp, ppos);
  143. if (ret || !write)
  144. return ret;
  145. max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
  146. return 0;
  147. }
  148. static atomic64_t perf_event_id;
  149. static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  150. enum event_type_t event_type);
  151. static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
  152. enum event_type_t event_type,
  153. struct task_struct *task);
  154. static void update_context_time(struct perf_event_context *ctx);
  155. static u64 perf_event_time(struct perf_event *event);
  156. void __weak perf_event_print_debug(void) { }
  157. extern __weak const char *perf_pmu_name(void)
  158. {
  159. return "pmu";
  160. }
  161. static inline u64 perf_clock(void)
  162. {
  163. return local_clock();
  164. }
  165. static inline struct perf_cpu_context *
  166. __get_cpu_context(struct perf_event_context *ctx)
  167. {
  168. return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
  169. }
  170. static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
  171. struct perf_event_context *ctx)
  172. {
  173. raw_spin_lock(&cpuctx->ctx.lock);
  174. if (ctx)
  175. raw_spin_lock(&ctx->lock);
  176. }
  177. static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
  178. struct perf_event_context *ctx)
  179. {
  180. if (ctx)
  181. raw_spin_unlock(&ctx->lock);
  182. raw_spin_unlock(&cpuctx->ctx.lock);
  183. }
  184. #ifdef CONFIG_CGROUP_PERF
  185. /*
  186. * Must ensure cgroup is pinned (css_get) before calling
  187. * this function. In other words, we cannot call this function
  188. * if there is no cgroup event for the current CPU context.
  189. */
  190. static inline struct perf_cgroup *
  191. perf_cgroup_from_task(struct task_struct *task)
  192. {
  193. return container_of(task_subsys_state(task, perf_subsys_id),
  194. struct perf_cgroup, css);
  195. }
  196. static inline bool
  197. perf_cgroup_match(struct perf_event *event)
  198. {
  199. struct perf_event_context *ctx = event->ctx;
  200. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  201. return !event->cgrp || event->cgrp == cpuctx->cgrp;
  202. }
  203. static inline void perf_get_cgroup(struct perf_event *event)
  204. {
  205. css_get(&event->cgrp->css);
  206. }
  207. static inline void perf_put_cgroup(struct perf_event *event)
  208. {
  209. css_put(&event->cgrp->css);
  210. }
  211. static inline void perf_detach_cgroup(struct perf_event *event)
  212. {
  213. perf_put_cgroup(event);
  214. event->cgrp = NULL;
  215. }
  216. static inline int is_cgroup_event(struct perf_event *event)
  217. {
  218. return event->cgrp != NULL;
  219. }
  220. static inline u64 perf_cgroup_event_time(struct perf_event *event)
  221. {
  222. struct perf_cgroup_info *t;
  223. t = per_cpu_ptr(event->cgrp->info, event->cpu);
  224. return t->time;
  225. }
  226. static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
  227. {
  228. struct perf_cgroup_info *info;
  229. u64 now;
  230. now = perf_clock();
  231. info = this_cpu_ptr(cgrp->info);
  232. info->time += now - info->timestamp;
  233. info->timestamp = now;
  234. }
  235. static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
  236. {
  237. struct perf_cgroup *cgrp_out = cpuctx->cgrp;
  238. if (cgrp_out)
  239. __update_cgrp_time(cgrp_out);
  240. }
  241. static inline void update_cgrp_time_from_event(struct perf_event *event)
  242. {
  243. struct perf_cgroup *cgrp;
  244. /*
  245. * ensure we access cgroup data only when needed and
  246. * when we know the cgroup is pinned (css_get)
  247. */
  248. if (!is_cgroup_event(event))
  249. return;
  250. cgrp = perf_cgroup_from_task(current);
  251. /*
  252. * Do not update time when cgroup is not active
  253. */
  254. if (cgrp == event->cgrp)
  255. __update_cgrp_time(event->cgrp);
  256. }
  257. static inline void
  258. perf_cgroup_set_timestamp(struct task_struct *task,
  259. struct perf_event_context *ctx)
  260. {
  261. struct perf_cgroup *cgrp;
  262. struct perf_cgroup_info *info;
  263. /*
  264. * ctx->lock held by caller
  265. * ensure we do not access cgroup data
  266. * unless we have the cgroup pinned (css_get)
  267. */
  268. if (!task || !ctx->nr_cgroups)
  269. return;
  270. cgrp = perf_cgroup_from_task(task);
  271. info = this_cpu_ptr(cgrp->info);
  272. info->timestamp = ctx->timestamp;
  273. }
  274. #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
  275. #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
  276. /*
  277. * reschedule events based on the cgroup constraint of task.
  278. *
  279. * mode SWOUT : schedule out everything
  280. * mode SWIN : schedule in based on cgroup for next
  281. */
  282. void perf_cgroup_switch(struct task_struct *task, int mode)
  283. {
  284. struct perf_cpu_context *cpuctx;
  285. struct pmu *pmu;
  286. unsigned long flags;
  287. /*
  288. * disable interrupts to avoid geting nr_cgroup
  289. * changes via __perf_event_disable(). Also
  290. * avoids preemption.
  291. */
  292. local_irq_save(flags);
  293. /*
  294. * we reschedule only in the presence of cgroup
  295. * constrained events.
  296. */
  297. rcu_read_lock();
  298. list_for_each_entry_rcu(pmu, &pmus, entry) {
  299. cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  300. /*
  301. * perf_cgroup_events says at least one
  302. * context on this CPU has cgroup events.
  303. *
  304. * ctx->nr_cgroups reports the number of cgroup
  305. * events for a context.
  306. */
  307. if (cpuctx->ctx.nr_cgroups > 0) {
  308. perf_ctx_lock(cpuctx, cpuctx->task_ctx);
  309. perf_pmu_disable(cpuctx->ctx.pmu);
  310. if (mode & PERF_CGROUP_SWOUT) {
  311. cpu_ctx_sched_out(cpuctx, EVENT_ALL);
  312. /*
  313. * must not be done before ctxswout due
  314. * to event_filter_match() in event_sched_out()
  315. */
  316. cpuctx->cgrp = NULL;
  317. }
  318. if (mode & PERF_CGROUP_SWIN) {
  319. WARN_ON_ONCE(cpuctx->cgrp);
  320. /* set cgrp before ctxsw in to
  321. * allow event_filter_match() to not
  322. * have to pass task around
  323. */
  324. cpuctx->cgrp = perf_cgroup_from_task(task);
  325. cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
  326. }
  327. perf_pmu_enable(cpuctx->ctx.pmu);
  328. perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
  329. }
  330. }
  331. rcu_read_unlock();
  332. local_irq_restore(flags);
  333. }
  334. static inline void perf_cgroup_sched_out(struct task_struct *task)
  335. {
  336. perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
  337. }
  338. static inline void perf_cgroup_sched_in(struct task_struct *task)
  339. {
  340. perf_cgroup_switch(task, PERF_CGROUP_SWIN);
  341. }
  342. static inline int perf_cgroup_connect(int fd, struct perf_event *event,
  343. struct perf_event_attr *attr,
  344. struct perf_event *group_leader)
  345. {
  346. struct perf_cgroup *cgrp;
  347. struct cgroup_subsys_state *css;
  348. struct file *file;
  349. int ret = 0, fput_needed;
  350. file = fget_light(fd, &fput_needed);
  351. if (!file)
  352. return -EBADF;
  353. css = cgroup_css_from_dir(file, perf_subsys_id);
  354. if (IS_ERR(css)) {
  355. ret = PTR_ERR(css);
  356. goto out;
  357. }
  358. cgrp = container_of(css, struct perf_cgroup, css);
  359. event->cgrp = cgrp;
  360. /* must be done before we fput() the file */
  361. perf_get_cgroup(event);
  362. /*
  363. * all events in a group must monitor
  364. * the same cgroup because a task belongs
  365. * to only one perf cgroup at a time
  366. */
  367. if (group_leader && group_leader->cgrp != cgrp) {
  368. perf_detach_cgroup(event);
  369. ret = -EINVAL;
  370. }
  371. out:
  372. fput_light(file, fput_needed);
  373. return ret;
  374. }
  375. static inline void
  376. perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
  377. {
  378. struct perf_cgroup_info *t;
  379. t = per_cpu_ptr(event->cgrp->info, event->cpu);
  380. event->shadow_ctx_time = now - t->timestamp;
  381. }
  382. static inline void
  383. perf_cgroup_defer_enabled(struct perf_event *event)
  384. {
  385. /*
  386. * when the current task's perf cgroup does not match
  387. * the event's, we need to remember to call the
  388. * perf_mark_enable() function the first time a task with
  389. * a matching perf cgroup is scheduled in.
  390. */
  391. if (is_cgroup_event(event) && !perf_cgroup_match(event))
  392. event->cgrp_defer_enabled = 1;
  393. }
  394. static inline void
  395. perf_cgroup_mark_enabled(struct perf_event *event,
  396. struct perf_event_context *ctx)
  397. {
  398. struct perf_event *sub;
  399. u64 tstamp = perf_event_time(event);
  400. if (!event->cgrp_defer_enabled)
  401. return;
  402. event->cgrp_defer_enabled = 0;
  403. event->tstamp_enabled = tstamp - event->total_time_enabled;
  404. list_for_each_entry(sub, &event->sibling_list, group_entry) {
  405. if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
  406. sub->tstamp_enabled = tstamp - sub->total_time_enabled;
  407. sub->cgrp_defer_enabled = 0;
  408. }
  409. }
  410. }
  411. #else /* !CONFIG_CGROUP_PERF */
  412. static inline bool
  413. perf_cgroup_match(struct perf_event *event)
  414. {
  415. return true;
  416. }
  417. static inline void perf_detach_cgroup(struct perf_event *event)
  418. {}
  419. static inline int is_cgroup_event(struct perf_event *event)
  420. {
  421. return 0;
  422. }
  423. static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
  424. {
  425. return 0;
  426. }
  427. static inline void update_cgrp_time_from_event(struct perf_event *event)
  428. {
  429. }
  430. static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
  431. {
  432. }
  433. static inline void perf_cgroup_sched_out(struct task_struct *task)
  434. {
  435. }
  436. static inline void perf_cgroup_sched_in(struct task_struct *task)
  437. {
  438. }
  439. static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
  440. struct perf_event_attr *attr,
  441. struct perf_event *group_leader)
  442. {
  443. return -EINVAL;
  444. }
  445. static inline void
  446. perf_cgroup_set_timestamp(struct task_struct *task,
  447. struct perf_event_context *ctx)
  448. {
  449. }
  450. void
  451. perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
  452. {
  453. }
  454. static inline void
  455. perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
  456. {
  457. }
  458. static inline u64 perf_cgroup_event_time(struct perf_event *event)
  459. {
  460. return 0;
  461. }
  462. static inline void
  463. perf_cgroup_defer_enabled(struct perf_event *event)
  464. {
  465. }
  466. static inline void
  467. perf_cgroup_mark_enabled(struct perf_event *event,
  468. struct perf_event_context *ctx)
  469. {
  470. }
  471. #endif
  472. void perf_pmu_disable(struct pmu *pmu)
  473. {
  474. int *count = this_cpu_ptr(pmu->pmu_disable_count);
  475. if (!(*count)++)
  476. pmu->pmu_disable(pmu);
  477. }
  478. void perf_pmu_enable(struct pmu *pmu)
  479. {
  480. int *count = this_cpu_ptr(pmu->pmu_disable_count);
  481. if (!--(*count))
  482. pmu->pmu_enable(pmu);
  483. }
  484. static DEFINE_PER_CPU(struct list_head, rotation_list);
  485. /*
  486. * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
  487. * because they're strictly cpu affine and rotate_start is called with IRQs
  488. * disabled, while rotate_context is called from IRQ context.
  489. */
  490. static void perf_pmu_rotate_start(struct pmu *pmu)
  491. {
  492. struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  493. struct list_head *head = &__get_cpu_var(rotation_list);
  494. WARN_ON(!irqs_disabled());
  495. if (list_empty(&cpuctx->rotation_list))
  496. list_add(&cpuctx->rotation_list, head);
  497. }
  498. static void get_ctx(struct perf_event_context *ctx)
  499. {
  500. WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
  501. }
  502. static void put_ctx(struct perf_event_context *ctx)
  503. {
  504. if (atomic_dec_and_test(&ctx->refcount)) {
  505. if (ctx->parent_ctx)
  506. put_ctx(ctx->parent_ctx);
  507. if (ctx->task)
  508. put_task_struct(ctx->task);
  509. kfree_rcu(ctx, rcu_head);
  510. }
  511. }
  512. static void unclone_ctx(struct perf_event_context *ctx)
  513. {
  514. if (ctx->parent_ctx) {
  515. put_ctx(ctx->parent_ctx);
  516. ctx->parent_ctx = NULL;
  517. }
  518. }
  519. static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
  520. {
  521. /*
  522. * only top level events have the pid namespace they were created in
  523. */
  524. if (event->parent)
  525. event = event->parent;
  526. return task_tgid_nr_ns(p, event->ns);
  527. }
  528. static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
  529. {
  530. /*
  531. * only top level events have the pid namespace they were created in
  532. */
  533. if (event->parent)
  534. event = event->parent;
  535. return task_pid_nr_ns(p, event->ns);
  536. }
  537. /*
  538. * If we inherit events we want to return the parent event id
  539. * to userspace.
  540. */
  541. static u64 primary_event_id(struct perf_event *event)
  542. {
  543. u64 id = event->id;
  544. if (event->parent)
  545. id = event->parent->id;
  546. return id;
  547. }
  548. /*
  549. * Get the perf_event_context for a task and lock it.
  550. * This has to cope with with the fact that until it is locked,
  551. * the context could get moved to another task.
  552. */
  553. static struct perf_event_context *
  554. perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
  555. {
  556. struct perf_event_context *ctx;
  557. rcu_read_lock();
  558. retry:
  559. ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
  560. if (ctx) {
  561. /*
  562. * If this context is a clone of another, it might
  563. * get swapped for another underneath us by
  564. * perf_event_task_sched_out, though the
  565. * rcu_read_lock() protects us from any context
  566. * getting freed. Lock the context and check if it
  567. * got swapped before we could get the lock, and retry
  568. * if so. If we locked the right context, then it
  569. * can't get swapped on us any more.
  570. */
  571. raw_spin_lock_irqsave(&ctx->lock, *flags);
  572. if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
  573. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  574. goto retry;
  575. }
  576. if (!atomic_inc_not_zero(&ctx->refcount)) {
  577. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  578. ctx = NULL;
  579. }
  580. }
  581. rcu_read_unlock();
  582. return ctx;
  583. }
  584. /*
  585. * Get the context for a task and increment its pin_count so it
  586. * can't get swapped to another task. This also increments its
  587. * reference count so that the context can't get freed.
  588. */
  589. static struct perf_event_context *
  590. perf_pin_task_context(struct task_struct *task, int ctxn)
  591. {
  592. struct perf_event_context *ctx;
  593. unsigned long flags;
  594. ctx = perf_lock_task_context(task, ctxn, &flags);
  595. if (ctx) {
  596. ++ctx->pin_count;
  597. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  598. }
  599. return ctx;
  600. }
  601. static void perf_unpin_context(struct perf_event_context *ctx)
  602. {
  603. unsigned long flags;
  604. raw_spin_lock_irqsave(&ctx->lock, flags);
  605. --ctx->pin_count;
  606. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  607. }
  608. /*
  609. * Update the record of the current time in a context.
  610. */
  611. static void update_context_time(struct perf_event_context *ctx)
  612. {
  613. u64 now = perf_clock();
  614. ctx->time += now - ctx->timestamp;
  615. ctx->timestamp = now;
  616. }
  617. static u64 perf_event_time(struct perf_event *event)
  618. {
  619. struct perf_event_context *ctx = event->ctx;
  620. if (is_cgroup_event(event))
  621. return perf_cgroup_event_time(event);
  622. return ctx ? ctx->time : 0;
  623. }
  624. /*
  625. * Update the total_time_enabled and total_time_running fields for a event.
  626. */
  627. static void update_event_times(struct perf_event *event)
  628. {
  629. struct perf_event_context *ctx = event->ctx;
  630. u64 run_end;
  631. if (event->state < PERF_EVENT_STATE_INACTIVE ||
  632. event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
  633. return;
  634. /*
  635. * in cgroup mode, time_enabled represents
  636. * the time the event was enabled AND active
  637. * tasks were in the monitored cgroup. This is
  638. * independent of the activity of the context as
  639. * there may be a mix of cgroup and non-cgroup events.
  640. *
  641. * That is why we treat cgroup events differently
  642. * here.
  643. */
  644. if (is_cgroup_event(event))
  645. run_end = perf_event_time(event);
  646. else if (ctx->is_active)
  647. run_end = ctx->time;
  648. else
  649. run_end = event->tstamp_stopped;
  650. event->total_time_enabled = run_end - event->tstamp_enabled;
  651. if (event->state == PERF_EVENT_STATE_INACTIVE)
  652. run_end = event->tstamp_stopped;
  653. else
  654. run_end = perf_event_time(event);
  655. event->total_time_running = run_end - event->tstamp_running;
  656. }
  657. /*
  658. * Update total_time_enabled and total_time_running for all events in a group.
  659. */
  660. static void update_group_times(struct perf_event *leader)
  661. {
  662. struct perf_event *event;
  663. update_event_times(leader);
  664. list_for_each_entry(event, &leader->sibling_list, group_entry)
  665. update_event_times(event);
  666. }
  667. static struct list_head *
  668. ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
  669. {
  670. if (event->attr.pinned)
  671. return &ctx->pinned_groups;
  672. else
  673. return &ctx->flexible_groups;
  674. }
  675. /*
  676. * Add a event from the lists for its context.
  677. * Must be called with ctx->mutex and ctx->lock held.
  678. */
  679. static void
  680. list_add_event(struct perf_event *event, struct perf_event_context *ctx)
  681. {
  682. WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
  683. event->attach_state |= PERF_ATTACH_CONTEXT;
  684. /*
  685. * If we're a stand alone event or group leader, we go to the context
  686. * list, group events are kept attached to the group so that
  687. * perf_group_detach can, at all times, locate all siblings.
  688. */
  689. if (event->group_leader == event) {
  690. struct list_head *list;
  691. if (is_software_event(event))
  692. event->group_flags |= PERF_GROUP_SOFTWARE;
  693. list = ctx_group_list(event, ctx);
  694. list_add_tail(&event->group_entry, list);
  695. }
  696. if (is_cgroup_event(event))
  697. ctx->nr_cgroups++;
  698. list_add_rcu(&event->event_entry, &ctx->event_list);
  699. if (!ctx->nr_events)
  700. perf_pmu_rotate_start(ctx->pmu);
  701. ctx->nr_events++;
  702. if (event->attr.inherit_stat)
  703. ctx->nr_stat++;
  704. }
  705. /*
  706. * Called at perf_event creation and when events are attached/detached from a
  707. * group.
  708. */
  709. static void perf_event__read_size(struct perf_event *event)
  710. {
  711. int entry = sizeof(u64); /* value */
  712. int size = 0;
  713. int nr = 1;
  714. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  715. size += sizeof(u64);
  716. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  717. size += sizeof(u64);
  718. if (event->attr.read_format & PERF_FORMAT_ID)
  719. entry += sizeof(u64);
  720. if (event->attr.read_format & PERF_FORMAT_GROUP) {
  721. nr += event->group_leader->nr_siblings;
  722. size += sizeof(u64);
  723. }
  724. size += entry * nr;
  725. event->read_size = size;
  726. }
  727. static void perf_event__header_size(struct perf_event *event)
  728. {
  729. struct perf_sample_data *data;
  730. u64 sample_type = event->attr.sample_type;
  731. u16 size = 0;
  732. perf_event__read_size(event);
  733. if (sample_type & PERF_SAMPLE_IP)
  734. size += sizeof(data->ip);
  735. if (sample_type & PERF_SAMPLE_ADDR)
  736. size += sizeof(data->addr);
  737. if (sample_type & PERF_SAMPLE_PERIOD)
  738. size += sizeof(data->period);
  739. if (sample_type & PERF_SAMPLE_READ)
  740. size += event->read_size;
  741. event->header_size = size;
  742. }
  743. static void perf_event__id_header_size(struct perf_event *event)
  744. {
  745. struct perf_sample_data *data;
  746. u64 sample_type = event->attr.sample_type;
  747. u16 size = 0;
  748. if (sample_type & PERF_SAMPLE_TID)
  749. size += sizeof(data->tid_entry);
  750. if (sample_type & PERF_SAMPLE_TIME)
  751. size += sizeof(data->time);
  752. if (sample_type & PERF_SAMPLE_ID)
  753. size += sizeof(data->id);
  754. if (sample_type & PERF_SAMPLE_STREAM_ID)
  755. size += sizeof(data->stream_id);
  756. if (sample_type & PERF_SAMPLE_CPU)
  757. size += sizeof(data->cpu_entry);
  758. event->id_header_size = size;
  759. }
  760. static void perf_group_attach(struct perf_event *event)
  761. {
  762. struct perf_event *group_leader = event->group_leader, *pos;
  763. /*
  764. * We can have double attach due to group movement in perf_event_open.
  765. */
  766. if (event->attach_state & PERF_ATTACH_GROUP)
  767. return;
  768. event->attach_state |= PERF_ATTACH_GROUP;
  769. if (group_leader == event)
  770. return;
  771. if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
  772. !is_software_event(event))
  773. group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
  774. list_add_tail(&event->group_entry, &group_leader->sibling_list);
  775. group_leader->nr_siblings++;
  776. perf_event__header_size(group_leader);
  777. list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
  778. perf_event__header_size(pos);
  779. }
  780. /*
  781. * Remove a event from the lists for its context.
  782. * Must be called with ctx->mutex and ctx->lock held.
  783. */
  784. static void
  785. list_del_event(struct perf_event *event, struct perf_event_context *ctx)
  786. {
  787. struct perf_cpu_context *cpuctx;
  788. /*
  789. * We can have double detach due to exit/hot-unplug + close.
  790. */
  791. if (!(event->attach_state & PERF_ATTACH_CONTEXT))
  792. return;
  793. event->attach_state &= ~PERF_ATTACH_CONTEXT;
  794. if (is_cgroup_event(event)) {
  795. ctx->nr_cgroups--;
  796. cpuctx = __get_cpu_context(ctx);
  797. /*
  798. * if there are no more cgroup events
  799. * then cler cgrp to avoid stale pointer
  800. * in update_cgrp_time_from_cpuctx()
  801. */
  802. if (!ctx->nr_cgroups)
  803. cpuctx->cgrp = NULL;
  804. }
  805. ctx->nr_events--;
  806. if (event->attr.inherit_stat)
  807. ctx->nr_stat--;
  808. list_del_rcu(&event->event_entry);
  809. if (event->group_leader == event)
  810. list_del_init(&event->group_entry);
  811. update_group_times(event);
  812. /*
  813. * If event was in error state, then keep it
  814. * that way, otherwise bogus counts will be
  815. * returned on read(). The only way to get out
  816. * of error state is by explicit re-enabling
  817. * of the event
  818. */
  819. if (event->state > PERF_EVENT_STATE_OFF)
  820. event->state = PERF_EVENT_STATE_OFF;
  821. }
  822. static void perf_group_detach(struct perf_event *event)
  823. {
  824. struct perf_event *sibling, *tmp;
  825. struct list_head *list = NULL;
  826. /*
  827. * We can have double detach due to exit/hot-unplug + close.
  828. */
  829. if (!(event->attach_state & PERF_ATTACH_GROUP))
  830. return;
  831. event->attach_state &= ~PERF_ATTACH_GROUP;
  832. /*
  833. * If this is a sibling, remove it from its group.
  834. */
  835. if (event->group_leader != event) {
  836. list_del_init(&event->group_entry);
  837. event->group_leader->nr_siblings--;
  838. goto out;
  839. }
  840. if (!list_empty(&event->group_entry))
  841. list = &event->group_entry;
  842. /*
  843. * If this was a group event with sibling events then
  844. * upgrade the siblings to singleton events by adding them
  845. * to whatever list we are on.
  846. */
  847. list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
  848. if (list)
  849. list_move_tail(&sibling->group_entry, list);
  850. sibling->group_leader = sibling;
  851. /* Inherit group flags from the previous leader */
  852. sibling->group_flags = event->group_flags;
  853. }
  854. out:
  855. perf_event__header_size(event->group_leader);
  856. list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
  857. perf_event__header_size(tmp);
  858. }
  859. static inline int
  860. event_filter_match(struct perf_event *event)
  861. {
  862. return (event->cpu == -1 || event->cpu == smp_processor_id())
  863. && perf_cgroup_match(event);
  864. }
  865. static void
  866. event_sched_out(struct perf_event *event,
  867. struct perf_cpu_context *cpuctx,
  868. struct perf_event_context *ctx)
  869. {
  870. u64 tstamp = perf_event_time(event);
  871. u64 delta;
  872. /*
  873. * An event which could not be activated because of
  874. * filter mismatch still needs to have its timings
  875. * maintained, otherwise bogus information is return
  876. * via read() for time_enabled, time_running:
  877. */
  878. if (event->state == PERF_EVENT_STATE_INACTIVE
  879. && !event_filter_match(event)) {
  880. delta = tstamp - event->tstamp_stopped;
  881. event->tstamp_running += delta;
  882. event->tstamp_stopped = tstamp;
  883. }
  884. if (event->state != PERF_EVENT_STATE_ACTIVE)
  885. return;
  886. event->state = PERF_EVENT_STATE_INACTIVE;
  887. if (event->pending_disable) {
  888. event->pending_disable = 0;
  889. event->state = PERF_EVENT_STATE_OFF;
  890. }
  891. event->tstamp_stopped = tstamp;
  892. event->pmu->del(event, 0);
  893. event->oncpu = -1;
  894. if (!is_software_event(event))
  895. cpuctx->active_oncpu--;
  896. ctx->nr_active--;
  897. if (event->attr.exclusive || !cpuctx->active_oncpu)
  898. cpuctx->exclusive = 0;
  899. }
  900. static void
  901. group_sched_out(struct perf_event *group_event,
  902. struct perf_cpu_context *cpuctx,
  903. struct perf_event_context *ctx)
  904. {
  905. struct perf_event *event;
  906. int state = group_event->state;
  907. event_sched_out(group_event, cpuctx, ctx);
  908. /*
  909. * Schedule out siblings (if any):
  910. */
  911. list_for_each_entry(event, &group_event->sibling_list, group_entry)
  912. event_sched_out(event, cpuctx, ctx);
  913. if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
  914. cpuctx->exclusive = 0;
  915. }
  916. /*
  917. * Cross CPU call to remove a performance event
  918. *
  919. * We disable the event on the hardware level first. After that we
  920. * remove it from the context list.
  921. */
  922. static int __perf_remove_from_context(void *info)
  923. {
  924. struct perf_event *event = info;
  925. struct perf_event_context *ctx = event->ctx;
  926. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  927. raw_spin_lock(&ctx->lock);
  928. event_sched_out(event, cpuctx, ctx);
  929. list_del_event(event, ctx);
  930. raw_spin_unlock(&ctx->lock);
  931. return 0;
  932. }
  933. /*
  934. * Remove the event from a task's (or a CPU's) list of events.
  935. *
  936. * CPU events are removed with a smp call. For task events we only
  937. * call when the task is on a CPU.
  938. *
  939. * If event->ctx is a cloned context, callers must make sure that
  940. * every task struct that event->ctx->task could possibly point to
  941. * remains valid. This is OK when called from perf_release since
  942. * that only calls us on the top-level context, which can't be a clone.
  943. * When called from perf_event_exit_task, it's OK because the
  944. * context has been detached from its task.
  945. */
  946. static void perf_remove_from_context(struct perf_event *event)
  947. {
  948. struct perf_event_context *ctx = event->ctx;
  949. struct task_struct *task = ctx->task;
  950. lockdep_assert_held(&ctx->mutex);
  951. if (!task) {
  952. /*
  953. * Per cpu events are removed via an smp call and
  954. * the removal is always successful.
  955. */
  956. cpu_function_call(event->cpu, __perf_remove_from_context, event);
  957. return;
  958. }
  959. retry:
  960. if (!task_function_call(task, __perf_remove_from_context, event))
  961. return;
  962. raw_spin_lock_irq(&ctx->lock);
  963. /*
  964. * If we failed to find a running task, but find the context active now
  965. * that we've acquired the ctx->lock, retry.
  966. */
  967. if (ctx->is_active) {
  968. raw_spin_unlock_irq(&ctx->lock);
  969. goto retry;
  970. }
  971. /*
  972. * Since the task isn't running, its safe to remove the event, us
  973. * holding the ctx->lock ensures the task won't get scheduled in.
  974. */
  975. list_del_event(event, ctx);
  976. raw_spin_unlock_irq(&ctx->lock);
  977. }
  978. /*
  979. * Cross CPU call to disable a performance event
  980. */
  981. static int __perf_event_disable(void *info)
  982. {
  983. struct perf_event *event = info;
  984. struct perf_event_context *ctx = event->ctx;
  985. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  986. /*
  987. * If this is a per-task event, need to check whether this
  988. * event's task is the current task on this cpu.
  989. *
  990. * Can trigger due to concurrent perf_event_context_sched_out()
  991. * flipping contexts around.
  992. */
  993. if (ctx->task && cpuctx->task_ctx != ctx)
  994. return -EINVAL;
  995. raw_spin_lock(&ctx->lock);
  996. /*
  997. * If the event is on, turn it off.
  998. * If it is in error state, leave it in error state.
  999. */
  1000. if (event->state >= PERF_EVENT_STATE_INACTIVE) {
  1001. update_context_time(ctx);
  1002. update_cgrp_time_from_event(event);
  1003. update_group_times(event);
  1004. if (event == event->group_leader)
  1005. group_sched_out(event, cpuctx, ctx);
  1006. else
  1007. event_sched_out(event, cpuctx, ctx);
  1008. event->state = PERF_EVENT_STATE_OFF;
  1009. }
  1010. raw_spin_unlock(&ctx->lock);
  1011. return 0;
  1012. }
  1013. /*
  1014. * Disable a event.
  1015. *
  1016. * If event->ctx is a cloned context, callers must make sure that
  1017. * every task struct that event->ctx->task could possibly point to
  1018. * remains valid. This condition is satisifed when called through
  1019. * perf_event_for_each_child or perf_event_for_each because they
  1020. * hold the top-level event's child_mutex, so any descendant that
  1021. * goes to exit will block in sync_child_event.
  1022. * When called from perf_pending_event it's OK because event->ctx
  1023. * is the current context on this CPU and preemption is disabled,
  1024. * hence we can't get into perf_event_task_sched_out for this context.
  1025. */
  1026. void perf_event_disable(struct perf_event *event)
  1027. {
  1028. struct perf_event_context *ctx = event->ctx;
  1029. struct task_struct *task = ctx->task;
  1030. if (!task) {
  1031. /*
  1032. * Disable the event on the cpu that it's on
  1033. */
  1034. cpu_function_call(event->cpu, __perf_event_disable, event);
  1035. return;
  1036. }
  1037. retry:
  1038. if (!task_function_call(task, __perf_event_disable, event))
  1039. return;
  1040. raw_spin_lock_irq(&ctx->lock);
  1041. /*
  1042. * If the event is still active, we need to retry the cross-call.
  1043. */
  1044. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  1045. raw_spin_unlock_irq(&ctx->lock);
  1046. /*
  1047. * Reload the task pointer, it might have been changed by
  1048. * a concurrent perf_event_context_sched_out().
  1049. */
  1050. task = ctx->task;
  1051. goto retry;
  1052. }
  1053. /*
  1054. * Since we have the lock this context can't be scheduled
  1055. * in, so we can change the state safely.
  1056. */
  1057. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1058. update_group_times(event);
  1059. event->state = PERF_EVENT_STATE_OFF;
  1060. }
  1061. raw_spin_unlock_irq(&ctx->lock);
  1062. }
  1063. static void perf_set_shadow_time(struct perf_event *event,
  1064. struct perf_event_context *ctx,
  1065. u64 tstamp)
  1066. {
  1067. /*
  1068. * use the correct time source for the time snapshot
  1069. *
  1070. * We could get by without this by leveraging the
  1071. * fact that to get to this function, the caller
  1072. * has most likely already called update_context_time()
  1073. * and update_cgrp_time_xx() and thus both timestamp
  1074. * are identical (or very close). Given that tstamp is,
  1075. * already adjusted for cgroup, we could say that:
  1076. * tstamp - ctx->timestamp
  1077. * is equivalent to
  1078. * tstamp - cgrp->timestamp.
  1079. *
  1080. * Then, in perf_output_read(), the calculation would
  1081. * work with no changes because:
  1082. * - event is guaranteed scheduled in
  1083. * - no scheduled out in between
  1084. * - thus the timestamp would be the same
  1085. *
  1086. * But this is a bit hairy.
  1087. *
  1088. * So instead, we have an explicit cgroup call to remain
  1089. * within the time time source all along. We believe it
  1090. * is cleaner and simpler to understand.
  1091. */
  1092. if (is_cgroup_event(event))
  1093. perf_cgroup_set_shadow_time(event, tstamp);
  1094. else
  1095. event->shadow_ctx_time = tstamp - ctx->timestamp;
  1096. }
  1097. #define MAX_INTERRUPTS (~0ULL)
  1098. static void perf_log_throttle(struct perf_event *event, int enable);
  1099. static int
  1100. event_sched_in(struct perf_event *event,
  1101. struct perf_cpu_context *cpuctx,
  1102. struct perf_event_context *ctx)
  1103. {
  1104. u64 tstamp = perf_event_time(event);
  1105. if (event->state <= PERF_EVENT_STATE_OFF)
  1106. return 0;
  1107. event->state = PERF_EVENT_STATE_ACTIVE;
  1108. event->oncpu = smp_processor_id();
  1109. /*
  1110. * Unthrottle events, since we scheduled we might have missed several
  1111. * ticks already, also for a heavily scheduling task there is little
  1112. * guarantee it'll get a tick in a timely manner.
  1113. */
  1114. if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
  1115. perf_log_throttle(event, 1);
  1116. event->hw.interrupts = 0;
  1117. }
  1118. /*
  1119. * The new state must be visible before we turn it on in the hardware:
  1120. */
  1121. smp_wmb();
  1122. if (event->pmu->add(event, PERF_EF_START)) {
  1123. event->state = PERF_EVENT_STATE_INACTIVE;
  1124. event->oncpu = -1;
  1125. return -EAGAIN;
  1126. }
  1127. event->tstamp_running += tstamp - event->tstamp_stopped;
  1128. perf_set_shadow_time(event, ctx, tstamp);
  1129. if (!is_software_event(event))
  1130. cpuctx->active_oncpu++;
  1131. ctx->nr_active++;
  1132. if (event->attr.exclusive)
  1133. cpuctx->exclusive = 1;
  1134. return 0;
  1135. }
  1136. static int
  1137. group_sched_in(struct perf_event *group_event,
  1138. struct perf_cpu_context *cpuctx,
  1139. struct perf_event_context *ctx)
  1140. {
  1141. struct perf_event *event, *partial_group = NULL;
  1142. struct pmu *pmu = group_event->pmu;
  1143. u64 now = ctx->time;
  1144. bool simulate = false;
  1145. if (group_event->state == PERF_EVENT_STATE_OFF)
  1146. return 0;
  1147. pmu->start_txn(pmu);
  1148. if (event_sched_in(group_event, cpuctx, ctx)) {
  1149. pmu->cancel_txn(pmu);
  1150. return -EAGAIN;
  1151. }
  1152. /*
  1153. * Schedule in siblings as one group (if any):
  1154. */
  1155. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  1156. if (event_sched_in(event, cpuctx, ctx)) {
  1157. partial_group = event;
  1158. goto group_error;
  1159. }
  1160. }
  1161. if (!pmu->commit_txn(pmu))
  1162. return 0;
  1163. group_error:
  1164. /*
  1165. * Groups can be scheduled in as one unit only, so undo any
  1166. * partial group before returning:
  1167. * The events up to the failed event are scheduled out normally,
  1168. * tstamp_stopped will be updated.
  1169. *
  1170. * The failed events and the remaining siblings need to have
  1171. * their timings updated as if they had gone thru event_sched_in()
  1172. * and event_sched_out(). This is required to get consistent timings
  1173. * across the group. This also takes care of the case where the group
  1174. * could never be scheduled by ensuring tstamp_stopped is set to mark
  1175. * the time the event was actually stopped, such that time delta
  1176. * calculation in update_event_times() is correct.
  1177. */
  1178. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  1179. if (event == partial_group)
  1180. simulate = true;
  1181. if (simulate) {
  1182. event->tstamp_running += now - event->tstamp_stopped;
  1183. event->tstamp_stopped = now;
  1184. } else {
  1185. event_sched_out(event, cpuctx, ctx);
  1186. }
  1187. }
  1188. event_sched_out(group_event, cpuctx, ctx);
  1189. pmu->cancel_txn(pmu);
  1190. return -EAGAIN;
  1191. }
  1192. /*
  1193. * Work out whether we can put this event group on the CPU now.
  1194. */
  1195. static int group_can_go_on(struct perf_event *event,
  1196. struct perf_cpu_context *cpuctx,
  1197. int can_add_hw)
  1198. {
  1199. /*
  1200. * Groups consisting entirely of software events can always go on.
  1201. */
  1202. if (event->group_flags & PERF_GROUP_SOFTWARE)
  1203. return 1;
  1204. /*
  1205. * If an exclusive group is already on, no other hardware
  1206. * events can go on.
  1207. */
  1208. if (cpuctx->exclusive)
  1209. return 0;
  1210. /*
  1211. * If this group is exclusive and there are already
  1212. * events on the CPU, it can't go on.
  1213. */
  1214. if (event->attr.exclusive && cpuctx->active_oncpu)
  1215. return 0;
  1216. /*
  1217. * Otherwise, try to add it if all previous groups were able
  1218. * to go on.
  1219. */
  1220. return can_add_hw;
  1221. }
  1222. static void add_event_to_ctx(struct perf_event *event,
  1223. struct perf_event_context *ctx)
  1224. {
  1225. u64 tstamp = perf_event_time(event);
  1226. list_add_event(event, ctx);
  1227. perf_group_attach(event);
  1228. event->tstamp_enabled = tstamp;
  1229. event->tstamp_running = tstamp;
  1230. event->tstamp_stopped = tstamp;
  1231. }
  1232. static void perf_event_context_sched_in(struct perf_event_context *ctx,
  1233. struct task_struct *tsk);
  1234. /*
  1235. * Cross CPU call to install and enable a performance event
  1236. *
  1237. * Must be called with ctx->mutex held
  1238. */
  1239. static int __perf_install_in_context(void *info)
  1240. {
  1241. struct perf_event *event = info;
  1242. struct perf_event_context *ctx = event->ctx;
  1243. struct perf_event *leader = event->group_leader;
  1244. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1245. int err;
  1246. /*
  1247. * In case we're installing a new context to an already running task,
  1248. * could also happen before perf_event_task_sched_in() on architectures
  1249. * which do context switches with IRQs enabled.
  1250. */
  1251. if (ctx->task && !cpuctx->task_ctx)
  1252. perf_event_context_sched_in(ctx, ctx->task);
  1253. raw_spin_lock(&ctx->lock);
  1254. ctx->is_active = 1;
  1255. update_context_time(ctx);
  1256. /*
  1257. * update cgrp time only if current cgrp
  1258. * matches event->cgrp. Must be done before
  1259. * calling add_event_to_ctx()
  1260. */
  1261. update_cgrp_time_from_event(event);
  1262. add_event_to_ctx(event, ctx);
  1263. if (!event_filter_match(event))
  1264. goto unlock;
  1265. /*
  1266. * Don't put the event on if it is disabled or if
  1267. * it is in a group and the group isn't on.
  1268. */
  1269. if (event->state != PERF_EVENT_STATE_INACTIVE ||
  1270. (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
  1271. goto unlock;
  1272. /*
  1273. * An exclusive event can't go on if there are already active
  1274. * hardware events, and no hardware event can go on if there
  1275. * is already an exclusive event on.
  1276. */
  1277. if (!group_can_go_on(event, cpuctx, 1))
  1278. err = -EEXIST;
  1279. else
  1280. err = event_sched_in(event, cpuctx, ctx);
  1281. if (err) {
  1282. /*
  1283. * This event couldn't go on. If it is in a group
  1284. * then we have to pull the whole group off.
  1285. * If the event group is pinned then put it in error state.
  1286. */
  1287. if (leader != event)
  1288. group_sched_out(leader, cpuctx, ctx);
  1289. if (leader->attr.pinned) {
  1290. update_group_times(leader);
  1291. leader->state = PERF_EVENT_STATE_ERROR;
  1292. }
  1293. }
  1294. unlock:
  1295. raw_spin_unlock(&ctx->lock);
  1296. return 0;
  1297. }
  1298. /*
  1299. * Attach a performance event to a context
  1300. *
  1301. * First we add the event to the list with the hardware enable bit
  1302. * in event->hw_config cleared.
  1303. *
  1304. * If the event is attached to a task which is on a CPU we use a smp
  1305. * call to enable it in the task context. The task might have been
  1306. * scheduled away, but we check this in the smp call again.
  1307. */
  1308. static void
  1309. perf_install_in_context(struct perf_event_context *ctx,
  1310. struct perf_event *event,
  1311. int cpu)
  1312. {
  1313. struct task_struct *task = ctx->task;
  1314. lockdep_assert_held(&ctx->mutex);
  1315. event->ctx = ctx;
  1316. if (!task) {
  1317. /*
  1318. * Per cpu events are installed via an smp call and
  1319. * the install is always successful.
  1320. */
  1321. cpu_function_call(cpu, __perf_install_in_context, event);
  1322. return;
  1323. }
  1324. retry:
  1325. if (!task_function_call(task, __perf_install_in_context, event))
  1326. return;
  1327. raw_spin_lock_irq(&ctx->lock);
  1328. /*
  1329. * If we failed to find a running task, but find the context active now
  1330. * that we've acquired the ctx->lock, retry.
  1331. */
  1332. if (ctx->is_active) {
  1333. raw_spin_unlock_irq(&ctx->lock);
  1334. goto retry;
  1335. }
  1336. /*
  1337. * Since the task isn't running, its safe to add the event, us holding
  1338. * the ctx->lock ensures the task won't get scheduled in.
  1339. */
  1340. add_event_to_ctx(event, ctx);
  1341. raw_spin_unlock_irq(&ctx->lock);
  1342. }
  1343. /*
  1344. * Put a event into inactive state and update time fields.
  1345. * Enabling the leader of a group effectively enables all
  1346. * the group members that aren't explicitly disabled, so we
  1347. * have to update their ->tstamp_enabled also.
  1348. * Note: this works for group members as well as group leaders
  1349. * since the non-leader members' sibling_lists will be empty.
  1350. */
  1351. static void __perf_event_mark_enabled(struct perf_event *event,
  1352. struct perf_event_context *ctx)
  1353. {
  1354. struct perf_event *sub;
  1355. u64 tstamp = perf_event_time(event);
  1356. event->state = PERF_EVENT_STATE_INACTIVE;
  1357. event->tstamp_enabled = tstamp - event->total_time_enabled;
  1358. list_for_each_entry(sub, &event->sibling_list, group_entry) {
  1359. if (sub->state >= PERF_EVENT_STATE_INACTIVE)
  1360. sub->tstamp_enabled = tstamp - sub->total_time_enabled;
  1361. }
  1362. }
  1363. /*
  1364. * Cross CPU call to enable a performance event
  1365. */
  1366. static int __perf_event_enable(void *info)
  1367. {
  1368. struct perf_event *event = info;
  1369. struct perf_event_context *ctx = event->ctx;
  1370. struct perf_event *leader = event->group_leader;
  1371. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1372. int err;
  1373. if (WARN_ON_ONCE(!ctx->is_active))
  1374. return -EINVAL;
  1375. raw_spin_lock(&ctx->lock);
  1376. update_context_time(ctx);
  1377. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  1378. goto unlock;
  1379. /*
  1380. * set current task's cgroup time reference point
  1381. */
  1382. perf_cgroup_set_timestamp(current, ctx);
  1383. __perf_event_mark_enabled(event, ctx);
  1384. if (!event_filter_match(event)) {
  1385. if (is_cgroup_event(event))
  1386. perf_cgroup_defer_enabled(event);
  1387. goto unlock;
  1388. }
  1389. /*
  1390. * If the event is in a group and isn't the group leader,
  1391. * then don't put it on unless the group is on.
  1392. */
  1393. if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
  1394. goto unlock;
  1395. if (!group_can_go_on(event, cpuctx, 1)) {
  1396. err = -EEXIST;
  1397. } else {
  1398. if (event == leader)
  1399. err = group_sched_in(event, cpuctx, ctx);
  1400. else
  1401. err = event_sched_in(event, cpuctx, ctx);
  1402. }
  1403. if (err) {
  1404. /*
  1405. * If this event can't go on and it's part of a
  1406. * group, then the whole group has to come off.
  1407. */
  1408. if (leader != event)
  1409. group_sched_out(leader, cpuctx, ctx);
  1410. if (leader->attr.pinned) {
  1411. update_group_times(leader);
  1412. leader->state = PERF_EVENT_STATE_ERROR;
  1413. }
  1414. }
  1415. unlock:
  1416. raw_spin_unlock(&ctx->lock);
  1417. return 0;
  1418. }
  1419. /*
  1420. * Enable a event.
  1421. *
  1422. * If event->ctx is a cloned context, callers must make sure that
  1423. * every task struct that event->ctx->task could possibly point to
  1424. * remains valid. This condition is satisfied when called through
  1425. * perf_event_for_each_child or perf_event_for_each as described
  1426. * for perf_event_disable.
  1427. */
  1428. void perf_event_enable(struct perf_event *event)
  1429. {
  1430. struct perf_event_context *ctx = event->ctx;
  1431. struct task_struct *task = ctx->task;
  1432. if (!task) {
  1433. /*
  1434. * Enable the event on the cpu that it's on
  1435. */
  1436. cpu_function_call(event->cpu, __perf_event_enable, event);
  1437. return;
  1438. }
  1439. raw_spin_lock_irq(&ctx->lock);
  1440. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  1441. goto out;
  1442. /*
  1443. * If the event is in error state, clear that first.
  1444. * That way, if we see the event in error state below, we
  1445. * know that it has gone back into error state, as distinct
  1446. * from the task having been scheduled away before the
  1447. * cross-call arrived.
  1448. */
  1449. if (event->state == PERF_EVENT_STATE_ERROR)
  1450. event->state = PERF_EVENT_STATE_OFF;
  1451. retry:
  1452. if (!ctx->is_active) {
  1453. __perf_event_mark_enabled(event, ctx);
  1454. goto out;
  1455. }
  1456. raw_spin_unlock_irq(&ctx->lock);
  1457. if (!task_function_call(task, __perf_event_enable, event))
  1458. return;
  1459. raw_spin_lock_irq(&ctx->lock);
  1460. /*
  1461. * If the context is active and the event is still off,
  1462. * we need to retry the cross-call.
  1463. */
  1464. if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
  1465. /*
  1466. * task could have been flipped by a concurrent
  1467. * perf_event_context_sched_out()
  1468. */
  1469. task = ctx->task;
  1470. goto retry;
  1471. }
  1472. out:
  1473. raw_spin_unlock_irq(&ctx->lock);
  1474. }
  1475. static int perf_event_refresh(struct perf_event *event, int refresh)
  1476. {
  1477. /*
  1478. * not supported on inherited events
  1479. */
  1480. if (event->attr.inherit || !is_sampling_event(event))
  1481. return -EINVAL;
  1482. atomic_add(refresh, &event->event_limit);
  1483. perf_event_enable(event);
  1484. return 0;
  1485. }
  1486. static void ctx_sched_out(struct perf_event_context *ctx,
  1487. struct perf_cpu_context *cpuctx,
  1488. enum event_type_t event_type)
  1489. {
  1490. struct perf_event *event;
  1491. ctx->is_active = 0;
  1492. if (likely(!ctx->nr_events))
  1493. return;
  1494. update_context_time(ctx);
  1495. update_cgrp_time_from_cpuctx(cpuctx);
  1496. if (!ctx->nr_active)
  1497. return;
  1498. perf_pmu_disable(ctx->pmu);
  1499. if (event_type & EVENT_PINNED) {
  1500. list_for_each_entry(event, &ctx->pinned_groups, group_entry)
  1501. group_sched_out(event, cpuctx, ctx);
  1502. }
  1503. if (event_type & EVENT_FLEXIBLE) {
  1504. list_for_each_entry(event, &ctx->flexible_groups, group_entry)
  1505. group_sched_out(event, cpuctx, ctx);
  1506. }
  1507. perf_pmu_enable(ctx->pmu);
  1508. }
  1509. /*
  1510. * Test whether two contexts are equivalent, i.e. whether they
  1511. * have both been cloned from the same version of the same context
  1512. * and they both have the same number of enabled events.
  1513. * If the number of enabled events is the same, then the set
  1514. * of enabled events should be the same, because these are both
  1515. * inherited contexts, therefore we can't access individual events
  1516. * in them directly with an fd; we can only enable/disable all
  1517. * events via prctl, or enable/disable all events in a family
  1518. * via ioctl, which will have the same effect on both contexts.
  1519. */
  1520. static int context_equiv(struct perf_event_context *ctx1,
  1521. struct perf_event_context *ctx2)
  1522. {
  1523. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  1524. && ctx1->parent_gen == ctx2->parent_gen
  1525. && !ctx1->pin_count && !ctx2->pin_count;
  1526. }
  1527. static void __perf_event_sync_stat(struct perf_event *event,
  1528. struct perf_event *next_event)
  1529. {
  1530. u64 value;
  1531. if (!event->attr.inherit_stat)
  1532. return;
  1533. /*
  1534. * Update the event value, we cannot use perf_event_read()
  1535. * because we're in the middle of a context switch and have IRQs
  1536. * disabled, which upsets smp_call_function_single(), however
  1537. * we know the event must be on the current CPU, therefore we
  1538. * don't need to use it.
  1539. */
  1540. switch (event->state) {
  1541. case PERF_EVENT_STATE_ACTIVE:
  1542. event->pmu->read(event);
  1543. /* fall-through */
  1544. case PERF_EVENT_STATE_INACTIVE:
  1545. update_event_times(event);
  1546. break;
  1547. default:
  1548. break;
  1549. }
  1550. /*
  1551. * In order to keep per-task stats reliable we need to flip the event
  1552. * values when we flip the contexts.
  1553. */
  1554. value = local64_read(&next_event->count);
  1555. value = local64_xchg(&event->count, value);
  1556. local64_set(&next_event->count, value);
  1557. swap(event->total_time_enabled, next_event->total_time_enabled);
  1558. swap(event->total_time_running, next_event->total_time_running);
  1559. /*
  1560. * Since we swizzled the values, update the user visible data too.
  1561. */
  1562. perf_event_update_userpage(event);
  1563. perf_event_update_userpage(next_event);
  1564. }
  1565. #define list_next_entry(pos, member) \
  1566. list_entry(pos->member.next, typeof(*pos), member)
  1567. static void perf_event_sync_stat(struct perf_event_context *ctx,
  1568. struct perf_event_context *next_ctx)
  1569. {
  1570. struct perf_event *event, *next_event;
  1571. if (!ctx->nr_stat)
  1572. return;
  1573. update_context_time(ctx);
  1574. event = list_first_entry(&ctx->event_list,
  1575. struct perf_event, event_entry);
  1576. next_event = list_first_entry(&next_ctx->event_list,
  1577. struct perf_event, event_entry);
  1578. while (&event->event_entry != &ctx->event_list &&
  1579. &next_event->event_entry != &next_ctx->event_list) {
  1580. __perf_event_sync_stat(event, next_event);
  1581. event = list_next_entry(event, event_entry);
  1582. next_event = list_next_entry(next_event, event_entry);
  1583. }
  1584. }
  1585. static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  1586. struct task_struct *next)
  1587. {
  1588. struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
  1589. struct perf_event_context *next_ctx;
  1590. struct perf_event_context *parent;
  1591. struct perf_cpu_context *cpuctx;
  1592. int do_switch = 1;
  1593. if (likely(!ctx))
  1594. return;
  1595. cpuctx = __get_cpu_context(ctx);
  1596. if (!cpuctx->task_ctx)
  1597. return;
  1598. rcu_read_lock();
  1599. parent = rcu_dereference(ctx->parent_ctx);
  1600. next_ctx = next->perf_event_ctxp[ctxn];
  1601. if (parent && next_ctx &&
  1602. rcu_dereference(next_ctx->parent_ctx) == parent) {
  1603. /*
  1604. * Looks like the two contexts are clones, so we might be
  1605. * able to optimize the context switch. We lock both
  1606. * contexts and check that they are clones under the
  1607. * lock (including re-checking that neither has been
  1608. * uncloned in the meantime). It doesn't matter which
  1609. * order we take the locks because no other cpu could
  1610. * be trying to lock both of these tasks.
  1611. */
  1612. raw_spin_lock(&ctx->lock);
  1613. raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  1614. if (context_equiv(ctx, next_ctx)) {
  1615. /*
  1616. * XXX do we need a memory barrier of sorts
  1617. * wrt to rcu_dereference() of perf_event_ctxp
  1618. */
  1619. task->perf_event_ctxp[ctxn] = next_ctx;
  1620. next->perf_event_ctxp[ctxn] = ctx;
  1621. ctx->task = next;
  1622. next_ctx->task = task;
  1623. do_switch = 0;
  1624. perf_event_sync_stat(ctx, next_ctx);
  1625. }
  1626. raw_spin_unlock(&next_ctx->lock);
  1627. raw_spin_unlock(&ctx->lock);
  1628. }
  1629. rcu_read_unlock();
  1630. if (do_switch) {
  1631. raw_spin_lock(&ctx->lock);
  1632. ctx_sched_out(ctx, cpuctx, EVENT_ALL);
  1633. cpuctx->task_ctx = NULL;
  1634. raw_spin_unlock(&ctx->lock);
  1635. }
  1636. }
  1637. #define for_each_task_context_nr(ctxn) \
  1638. for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
  1639. /*
  1640. * Called from scheduler to remove the events of the current task,
  1641. * with interrupts disabled.
  1642. *
  1643. * We stop each event and update the event value in event->count.
  1644. *
  1645. * This does not protect us against NMI, but disable()
  1646. * sets the disabled bit in the control field of event _before_
  1647. * accessing the event control register. If a NMI hits, then it will
  1648. * not restart the event.
  1649. */
  1650. void __perf_event_task_sched_out(struct task_struct *task,
  1651. struct task_struct *next)
  1652. {
  1653. int ctxn;
  1654. for_each_task_context_nr(ctxn)
  1655. perf_event_context_sched_out(task, ctxn, next);
  1656. /*
  1657. * if cgroup events exist on this CPU, then we need
  1658. * to check if we have to switch out PMU state.
  1659. * cgroup event are system-wide mode only
  1660. */
  1661. if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
  1662. perf_cgroup_sched_out(task);
  1663. }
  1664. static void task_ctx_sched_out(struct perf_event_context *ctx)
  1665. {
  1666. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  1667. if (!cpuctx->task_ctx)
  1668. return;
  1669. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  1670. return;
  1671. ctx_sched_out(ctx, cpuctx, EVENT_ALL);
  1672. cpuctx->task_ctx = NULL;
  1673. }
  1674. /*
  1675. * Called with IRQs disabled
  1676. */
  1677. static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  1678. enum event_type_t event_type)
  1679. {
  1680. ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
  1681. }
  1682. static void
  1683. ctx_pinned_sched_in(struct perf_event_context *ctx,
  1684. struct perf_cpu_context *cpuctx)
  1685. {
  1686. struct perf_event *event;
  1687. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  1688. if (event->state <= PERF_EVENT_STATE_OFF)
  1689. continue;
  1690. if (!event_filter_match(event))
  1691. continue;
  1692. /* may need to reset tstamp_enabled */
  1693. if (is_cgroup_event(event))
  1694. perf_cgroup_mark_enabled(event, ctx);
  1695. if (group_can_go_on(event, cpuctx, 1))
  1696. group_sched_in(event, cpuctx, ctx);
  1697. /*
  1698. * If this pinned group hasn't been scheduled,
  1699. * put it in error state.
  1700. */
  1701. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1702. update_group_times(event);
  1703. event->state = PERF_EVENT_STATE_ERROR;
  1704. }
  1705. }
  1706. }
  1707. static void
  1708. ctx_flexible_sched_in(struct perf_event_context *ctx,
  1709. struct perf_cpu_context *cpuctx)
  1710. {
  1711. struct perf_event *event;
  1712. int can_add_hw = 1;
  1713. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  1714. /* Ignore events in OFF or ERROR state */
  1715. if (event->state <= PERF_EVENT_STATE_OFF)
  1716. continue;
  1717. /*
  1718. * Listen to the 'cpu' scheduling filter constraint
  1719. * of events:
  1720. */
  1721. if (!event_filter_match(event))
  1722. continue;
  1723. /* may need to reset tstamp_enabled */
  1724. if (is_cgroup_event(event))
  1725. perf_cgroup_mark_enabled(event, ctx);
  1726. if (group_can_go_on(event, cpuctx, can_add_hw)) {
  1727. if (group_sched_in(event, cpuctx, ctx))
  1728. can_add_hw = 0;
  1729. }
  1730. }
  1731. }
  1732. static void
  1733. ctx_sched_in(struct perf_event_context *ctx,
  1734. struct perf_cpu_context *cpuctx,
  1735. enum event_type_t event_type,
  1736. struct task_struct *task)
  1737. {
  1738. u64 now;
  1739. ctx->is_active = 1;
  1740. if (likely(!ctx->nr_events))
  1741. return;
  1742. now = perf_clock();
  1743. ctx->timestamp = now;
  1744. perf_cgroup_set_timestamp(task, ctx);
  1745. /*
  1746. * First go through the list and put on any pinned groups
  1747. * in order to give them the best chance of going on.
  1748. */
  1749. if (event_type & EVENT_PINNED)
  1750. ctx_pinned_sched_in(ctx, cpuctx);
  1751. /* Then walk through the lower prio flexible groups */
  1752. if (event_type & EVENT_FLEXIBLE)
  1753. ctx_flexible_sched_in(ctx, cpuctx);
  1754. }
  1755. static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
  1756. enum event_type_t event_type,
  1757. struct task_struct *task)
  1758. {
  1759. struct perf_event_context *ctx = &cpuctx->ctx;
  1760. ctx_sched_in(ctx, cpuctx, event_type, task);
  1761. }
  1762. static void perf_event_context_sched_in(struct perf_event_context *ctx,
  1763. struct task_struct *task)
  1764. {
  1765. struct perf_cpu_context *cpuctx;
  1766. cpuctx = __get_cpu_context(ctx);
  1767. if (cpuctx->task_ctx == ctx)
  1768. return;
  1769. perf_ctx_lock(cpuctx, ctx);
  1770. perf_pmu_disable(ctx->pmu);
  1771. /*
  1772. * We want to keep the following priority order:
  1773. * cpu pinned (that don't need to move), task pinned,
  1774. * cpu flexible, task flexible.
  1775. */
  1776. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1777. ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
  1778. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
  1779. ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
  1780. cpuctx->task_ctx = ctx;
  1781. perf_pmu_enable(ctx->pmu);
  1782. perf_ctx_unlock(cpuctx, ctx);
  1783. /*
  1784. * Since these rotations are per-cpu, we need to ensure the
  1785. * cpu-context we got scheduled on is actually rotating.
  1786. */
  1787. perf_pmu_rotate_start(ctx->pmu);
  1788. }
  1789. /*
  1790. * Called from scheduler to add the events of the current task
  1791. * with interrupts disabled.
  1792. *
  1793. * We restore the event value and then enable it.
  1794. *
  1795. * This does not protect us against NMI, but enable()
  1796. * sets the enabled bit in the control field of event _before_
  1797. * accessing the event control register. If a NMI hits, then it will
  1798. * keep the event running.
  1799. */
  1800. void __perf_event_task_sched_in(struct task_struct *task)
  1801. {
  1802. struct perf_event_context *ctx;
  1803. int ctxn;
  1804. for_each_task_context_nr(ctxn) {
  1805. ctx = task->perf_event_ctxp[ctxn];
  1806. if (likely(!ctx))
  1807. continue;
  1808. perf_event_context_sched_in(ctx, task);
  1809. }
  1810. /*
  1811. * if cgroup events exist on this CPU, then we need
  1812. * to check if we have to switch in PMU state.
  1813. * cgroup event are system-wide mode only
  1814. */
  1815. if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
  1816. perf_cgroup_sched_in(task);
  1817. }
  1818. static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
  1819. {
  1820. u64 frequency = event->attr.sample_freq;
  1821. u64 sec = NSEC_PER_SEC;
  1822. u64 divisor, dividend;
  1823. int count_fls, nsec_fls, frequency_fls, sec_fls;
  1824. count_fls = fls64(count);
  1825. nsec_fls = fls64(nsec);
  1826. frequency_fls = fls64(frequency);
  1827. sec_fls = 30;
  1828. /*
  1829. * We got @count in @nsec, with a target of sample_freq HZ
  1830. * the target period becomes:
  1831. *
  1832. * @count * 10^9
  1833. * period = -------------------
  1834. * @nsec * sample_freq
  1835. *
  1836. */
  1837. /*
  1838. * Reduce accuracy by one bit such that @a and @b converge
  1839. * to a similar magnitude.
  1840. */
  1841. #define REDUCE_FLS(a, b) \
  1842. do { \
  1843. if (a##_fls > b##_fls) { \
  1844. a >>= 1; \
  1845. a##_fls--; \
  1846. } else { \
  1847. b >>= 1; \
  1848. b##_fls--; \
  1849. } \
  1850. } while (0)
  1851. /*
  1852. * Reduce accuracy until either term fits in a u64, then proceed with
  1853. * the other, so that finally we can do a u64/u64 division.
  1854. */
  1855. while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
  1856. REDUCE_FLS(nsec, frequency);
  1857. REDUCE_FLS(sec, count);
  1858. }
  1859. if (count_fls + sec_fls > 64) {
  1860. divisor = nsec * frequency;
  1861. while (count_fls + sec_fls > 64) {
  1862. REDUCE_FLS(count, sec);
  1863. divisor >>= 1;
  1864. }
  1865. dividend = count * sec;
  1866. } else {
  1867. dividend = count * sec;
  1868. while (nsec_fls + frequency_fls > 64) {
  1869. REDUCE_FLS(nsec, frequency);
  1870. dividend >>= 1;
  1871. }
  1872. divisor = nsec * frequency;
  1873. }
  1874. if (!divisor)
  1875. return dividend;
  1876. return div64_u64(dividend, divisor);
  1877. }
  1878. static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
  1879. {
  1880. struct hw_perf_event *hwc = &event->hw;
  1881. s64 period, sample_period;
  1882. s64 delta;
  1883. period = perf_calculate_period(event, nsec, count);
  1884. delta = (s64)(period - hwc->sample_period);
  1885. delta = (delta + 7) / 8; /* low pass filter */
  1886. sample_period = hwc->sample_period + delta;
  1887. if (!sample_period)
  1888. sample_period = 1;
  1889. hwc->sample_period = sample_period;
  1890. if (local64_read(&hwc->period_left) > 8*sample_period) {
  1891. event->pmu->stop(event, PERF_EF_UPDATE);
  1892. local64_set(&hwc->period_left, 0);
  1893. event->pmu->start(event, PERF_EF_RELOAD);
  1894. }
  1895. }
  1896. static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
  1897. {
  1898. struct perf_event *event;
  1899. struct hw_perf_event *hwc;
  1900. u64 interrupts, now;
  1901. s64 delta;
  1902. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  1903. if (event->state != PERF_EVENT_STATE_ACTIVE)
  1904. continue;
  1905. if (!event_filter_match(event))
  1906. continue;
  1907. hwc = &event->hw;
  1908. interrupts = hwc->interrupts;
  1909. hwc->interrupts = 0;
  1910. /*
  1911. * unthrottle events on the tick
  1912. */
  1913. if (interrupts == MAX_INTERRUPTS) {
  1914. perf_log_throttle(event, 1);
  1915. event->pmu->start(event, 0);
  1916. }
  1917. if (!event->attr.freq || !event->attr.sample_freq)
  1918. continue;
  1919. event->pmu->read(event);
  1920. now = local64_read(&event->count);
  1921. delta = now - hwc->freq_count_stamp;
  1922. hwc->freq_count_stamp = now;
  1923. if (delta > 0)
  1924. perf_adjust_period(event, period, delta);
  1925. }
  1926. }
  1927. /*
  1928. * Round-robin a context's events:
  1929. */
  1930. static void rotate_ctx(struct perf_event_context *ctx)
  1931. {
  1932. /*
  1933. * Rotate the first entry last of non-pinned groups. Rotation might be
  1934. * disabled by the inheritance code.
  1935. */
  1936. if (!ctx->rotate_disable)
  1937. list_rotate_left(&ctx->flexible_groups);
  1938. }
  1939. /*
  1940. * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
  1941. * because they're strictly cpu affine and rotate_start is called with IRQs
  1942. * disabled, while rotate_context is called from IRQ context.
  1943. */
  1944. static void perf_rotate_context(struct perf_cpu_context *cpuctx)
  1945. {
  1946. u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
  1947. struct perf_event_context *ctx = NULL;
  1948. int rotate = 0, remove = 1;
  1949. if (cpuctx->ctx.nr_events) {
  1950. remove = 0;
  1951. if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
  1952. rotate = 1;
  1953. }
  1954. ctx = cpuctx->task_ctx;
  1955. if (ctx && ctx->nr_events) {
  1956. remove = 0;
  1957. if (ctx->nr_events != ctx->nr_active)
  1958. rotate = 1;
  1959. }
  1960. perf_ctx_lock(cpuctx, cpuctx->task_ctx);
  1961. perf_pmu_disable(cpuctx->ctx.pmu);
  1962. perf_ctx_adjust_freq(&cpuctx->ctx, interval);
  1963. if (ctx)
  1964. perf_ctx_adjust_freq(ctx, interval);
  1965. if (!rotate)
  1966. goto done;
  1967. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1968. if (ctx)
  1969. ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
  1970. rotate_ctx(&cpuctx->ctx);
  1971. if (ctx)
  1972. rotate_ctx(ctx);
  1973. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
  1974. if (ctx)
  1975. ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);
  1976. done:
  1977. if (remove)
  1978. list_del_init(&cpuctx->rotation_list);
  1979. perf_pmu_enable(cpuctx->ctx.pmu);
  1980. perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
  1981. }
  1982. void perf_event_task_tick(void)
  1983. {
  1984. struct list_head *head = &__get_cpu_var(rotation_list);
  1985. struct perf_cpu_context *cpuctx, *tmp;
  1986. WARN_ON(!irqs_disabled());
  1987. list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
  1988. if (cpuctx->jiffies_interval == 1 ||
  1989. !(jiffies % cpuctx->jiffies_interval))
  1990. perf_rotate_context(cpuctx);
  1991. }
  1992. }
  1993. static int event_enable_on_exec(struct perf_event *event,
  1994. struct perf_event_context *ctx)
  1995. {
  1996. if (!event->attr.enable_on_exec)
  1997. return 0;
  1998. event->attr.enable_on_exec = 0;
  1999. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  2000. return 0;
  2001. __perf_event_mark_enabled(event, ctx);
  2002. return 1;
  2003. }
  2004. /*
  2005. * Enable all of a task's events that have been marked enable-on-exec.
  2006. * This expects task == current.
  2007. */
  2008. static void perf_event_enable_on_exec(struct perf_event_context *ctx)
  2009. {
  2010. struct perf_event *event;
  2011. unsigned long flags;
  2012. int enabled = 0;
  2013. int ret;
  2014. local_irq_save(flags);
  2015. if (!ctx || !ctx->nr_events)
  2016. goto out;
  2017. /*
  2018. * We must ctxsw out cgroup events to avoid conflict
  2019. * when invoking perf_task_event_sched_in() later on
  2020. * in this function. Otherwise we end up trying to
  2021. * ctxswin cgroup events which are already scheduled
  2022. * in.
  2023. */
  2024. perf_cgroup_sched_out(current);
  2025. raw_spin_lock(&ctx->lock);
  2026. task_ctx_sched_out(ctx);
  2027. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  2028. ret = event_enable_on_exec(event, ctx);
  2029. if (ret)
  2030. enabled = 1;
  2031. }
  2032. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  2033. ret = event_enable_on_exec(event, ctx);
  2034. if (ret)
  2035. enabled = 1;
  2036. }
  2037. /*
  2038. * Unclone this context if we enabled any event.
  2039. */
  2040. if (enabled)
  2041. unclone_ctx(ctx);
  2042. raw_spin_unlock(&ctx->lock);
  2043. /*
  2044. * Also calls ctxswin for cgroup events, if any:
  2045. */
  2046. perf_event_context_sched_in(ctx, ctx->task);
  2047. out:
  2048. local_irq_restore(flags);
  2049. }
  2050. /*
  2051. * Cross CPU call to read the hardware event
  2052. */
  2053. static void __perf_event_read(void *info)
  2054. {
  2055. struct perf_event *event = info;
  2056. struct perf_event_context *ctx = event->ctx;
  2057. struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
  2058. /*
  2059. * If this is a task context, we need to check whether it is
  2060. * the current task context of this cpu. If not it has been
  2061. * scheduled out before the smp call arrived. In that case
  2062. * event->count would have been updated to a recent sample
  2063. * when the event was scheduled out.
  2064. */
  2065. if (ctx->task && cpuctx->task_ctx != ctx)
  2066. return;
  2067. raw_spin_lock(&ctx->lock);
  2068. if (ctx->is_active) {
  2069. update_context_time(ctx);
  2070. update_cgrp_time_from_event(event);
  2071. }
  2072. update_event_times(event);
  2073. if (event->state == PERF_EVENT_STATE_ACTIVE)
  2074. event->pmu->read(event);
  2075. raw_spin_unlock(&ctx->lock);
  2076. }
  2077. static inline u64 perf_event_count(struct perf_event *event)
  2078. {
  2079. return local64_read(&event->count) + atomic64_read(&event->child_count);
  2080. }
  2081. static u64 perf_event_read(struct perf_event *event)
  2082. {
  2083. /*
  2084. * If event is enabled and currently active on a CPU, update the
  2085. * value in the event structure:
  2086. */
  2087. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  2088. smp_call_function_single(event->oncpu,
  2089. __perf_event_read, event, 1);
  2090. } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
  2091. struct perf_event_context *ctx = event->ctx;
  2092. unsigned long flags;
  2093. raw_spin_lock_irqsave(&ctx->lock, flags);
  2094. /*
  2095. * may read while context is not active
  2096. * (e.g., thread is blocked), in that case
  2097. * we cannot update context time
  2098. */
  2099. if (ctx->is_active) {
  2100. update_context_time(ctx);
  2101. update_cgrp_time_from_event(event);
  2102. }
  2103. update_event_times(event);
  2104. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  2105. }
  2106. return perf_event_count(event);
  2107. }
  2108. /*
  2109. * Callchain support
  2110. */
  2111. struct callchain_cpus_entries {
  2112. struct rcu_head rcu_head;
  2113. struct perf_callchain_entry *cpu_entries[0];
  2114. };
  2115. static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
  2116. static atomic_t nr_callchain_events;
  2117. static DEFINE_MUTEX(callchain_mutex);
  2118. struct callchain_cpus_entries *callchain_cpus_entries;
  2119. __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
  2120. struct pt_regs *regs)
  2121. {
  2122. }
  2123. __weak void perf_callchain_user(struct perf_callchain_entry *entry,
  2124. struct pt_regs *regs)
  2125. {
  2126. }
  2127. static void release_callchain_buffers_rcu(struct rcu_head *head)
  2128. {
  2129. struct callchain_cpus_entries *entries;
  2130. int cpu;
  2131. entries = container_of(head, struct callchain_cpus_entries, rcu_head);
  2132. for_each_possible_cpu(cpu)
  2133. kfree(entries->cpu_entries[cpu]);
  2134. kfree(entries);
  2135. }
  2136. static void release_callchain_buffers(void)
  2137. {
  2138. struct callchain_cpus_entries *entries;
  2139. entries = callchain_cpus_entries;
  2140. rcu_assign_pointer(callchain_cpus_entries, NULL);
  2141. call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
  2142. }
  2143. static int alloc_callchain_buffers(void)
  2144. {
  2145. int cpu;
  2146. int size;
  2147. struct callchain_cpus_entries *entries;
  2148. /*
  2149. * We can't use the percpu allocation API for data that can be
  2150. * accessed from NMI. Use a temporary manual per cpu allocation
  2151. * until that gets sorted out.
  2152. */
  2153. size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
  2154. entries = kzalloc(size, GFP_KERNEL);
  2155. if (!entries)
  2156. return -ENOMEM;
  2157. size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
  2158. for_each_possible_cpu(cpu) {
  2159. entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
  2160. cpu_to_node(cpu));
  2161. if (!entries->cpu_entries[cpu])
  2162. goto fail;
  2163. }
  2164. rcu_assign_pointer(callchain_cpus_entries, entries);
  2165. return 0;
  2166. fail:
  2167. for_each_possible_cpu(cpu)
  2168. kfree(entries->cpu_entries[cpu]);
  2169. kfree(entries);
  2170. return -ENOMEM;
  2171. }
  2172. static int get_callchain_buffers(void)
  2173. {
  2174. int err = 0;
  2175. int count;
  2176. mutex_lock(&callchain_mutex);
  2177. count = atomic_inc_return(&nr_callchain_events);
  2178. if (WARN_ON_ONCE(count < 1)) {
  2179. err = -EINVAL;
  2180. goto exit;
  2181. }
  2182. if (count > 1) {
  2183. /* If the allocation failed, give up */
  2184. if (!callchain_cpus_entries)
  2185. err = -ENOMEM;
  2186. goto exit;
  2187. }
  2188. err = alloc_callchain_buffers();
  2189. if (err)
  2190. release_callchain_buffers();
  2191. exit:
  2192. mutex_unlock(&callchain_mutex);
  2193. return err;
  2194. }
  2195. static void put_callchain_buffers(void)
  2196. {
  2197. if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
  2198. release_callchain_buffers();
  2199. mutex_unlock(&callchain_mutex);
  2200. }
  2201. }
  2202. static int get_recursion_context(int *recursion)
  2203. {
  2204. int rctx;
  2205. if (in_nmi())
  2206. rctx = 3;
  2207. else if (in_irq())
  2208. rctx = 2;
  2209. else if (in_softirq())
  2210. rctx = 1;
  2211. else
  2212. rctx = 0;
  2213. if (recursion[rctx])
  2214. return -1;
  2215. recursion[rctx]++;
  2216. barrier();
  2217. return rctx;
  2218. }
  2219. static inline void put_recursion_context(int *recursion, int rctx)
  2220. {
  2221. barrier();
  2222. recursion[rctx]--;
  2223. }
  2224. static struct perf_callchain_entry *get_callchain_entry(int *rctx)
  2225. {
  2226. int cpu;
  2227. struct callchain_cpus_entries *entries;
  2228. *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
  2229. if (*rctx == -1)
  2230. return NULL;
  2231. entries = rcu_dereference(callchain_cpus_entries);
  2232. if (!entries)
  2233. return NULL;
  2234. cpu = smp_processor_id();
  2235. return &entries->cpu_entries[cpu][*rctx];
  2236. }
  2237. static void
  2238. put_callchain_entry(int rctx)
  2239. {
  2240. put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
  2241. }
  2242. static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  2243. {
  2244. int rctx;
  2245. struct perf_callchain_entry *entry;
  2246. entry = get_callchain_entry(&rctx);
  2247. if (rctx == -1)
  2248. return NULL;
  2249. if (!entry)
  2250. goto exit_put;
  2251. entry->nr = 0;
  2252. if (!user_mode(regs)) {
  2253. perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
  2254. perf_callchain_kernel(entry, regs);
  2255. if (current->mm)
  2256. regs = task_pt_regs(current);
  2257. else
  2258. regs = NULL;
  2259. }
  2260. if (regs) {
  2261. perf_callchain_store(entry, PERF_CONTEXT_USER);
  2262. perf_callchain_user(entry, regs);
  2263. }
  2264. exit_put:
  2265. put_callchain_entry(rctx);
  2266. return entry;
  2267. }
  2268. /*
  2269. * Initialize the perf_event context in a task_struct:
  2270. */
  2271. static void __perf_event_init_context(struct perf_event_context *ctx)
  2272. {
  2273. raw_spin_lock_init(&ctx->lock);
  2274. mutex_init(&ctx->mutex);
  2275. INIT_LIST_HEAD(&ctx->pinned_groups);
  2276. INIT_LIST_HEAD(&ctx->flexible_groups);
  2277. INIT_LIST_HEAD(&ctx->event_list);
  2278. atomic_set(&ctx->refcount, 1);
  2279. }
  2280. static struct perf_event_context *
  2281. alloc_perf_context(struct pmu *pmu, struct task_struct *task)
  2282. {
  2283. struct perf_event_context *ctx;
  2284. ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
  2285. if (!ctx)
  2286. return NULL;
  2287. __perf_event_init_context(ctx);
  2288. if (task) {
  2289. ctx->task = task;
  2290. get_task_struct(task);
  2291. }
  2292. ctx->pmu = pmu;
  2293. return ctx;
  2294. }
  2295. static struct task_struct *
  2296. find_lively_task_by_vpid(pid_t vpid)
  2297. {
  2298. struct task_struct *task;
  2299. int err;
  2300. rcu_read_lock();
  2301. if (!vpid)
  2302. task = current;
  2303. else
  2304. task = find_task_by_vpid(vpid);
  2305. if (task)
  2306. get_task_struct(task);
  2307. rcu_read_unlock();
  2308. if (!task)
  2309. return ERR_PTR(-ESRCH);
  2310. /* Reuse ptrace permission checks for now. */
  2311. err = -EACCES;
  2312. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  2313. goto errout;
  2314. return task;
  2315. errout:
  2316. put_task_struct(task);
  2317. return ERR_PTR(err);
  2318. }
  2319. /*
  2320. * Returns a matching context with refcount and pincount.
  2321. */
  2322. static struct perf_event_context *
  2323. find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
  2324. {
  2325. struct perf_event_context *ctx;
  2326. struct perf_cpu_context *cpuctx;
  2327. unsigned long flags;
  2328. int ctxn, err;
  2329. if (!task) {
  2330. /* Must be root to operate on a CPU event: */
  2331. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  2332. return ERR_PTR(-EACCES);
  2333. /*
  2334. * We could be clever and allow to attach a event to an
  2335. * offline CPU and activate it when the CPU comes up, but
  2336. * that's for later.
  2337. */
  2338. if (!cpu_online(cpu))
  2339. return ERR_PTR(-ENODEV);
  2340. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  2341. ctx = &cpuctx->ctx;
  2342. get_ctx(ctx);
  2343. ++ctx->pin_count;
  2344. return ctx;
  2345. }
  2346. err = -EINVAL;
  2347. ctxn = pmu->task_ctx_nr;
  2348. if (ctxn < 0)
  2349. goto errout;
  2350. retry:
  2351. ctx = perf_lock_task_context(task, ctxn, &flags);
  2352. if (ctx) {
  2353. unclone_ctx(ctx);
  2354. ++ctx->pin_count;
  2355. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  2356. } else {
  2357. ctx = alloc_perf_context(pmu, task);
  2358. err = -ENOMEM;
  2359. if (!ctx)
  2360. goto errout;
  2361. err = 0;
  2362. mutex_lock(&task->perf_event_mutex);
  2363. /*
  2364. * If it has already passed perf_event_exit_task().
  2365. * we must see PF_EXITING, it takes this mutex too.
  2366. */
  2367. if (task->flags & PF_EXITING)
  2368. err = -ESRCH;
  2369. else if (task->perf_event_ctxp[ctxn])
  2370. err = -EAGAIN;
  2371. else {
  2372. get_ctx(ctx);
  2373. ++ctx->pin_count;
  2374. rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
  2375. }
  2376. mutex_unlock(&task->perf_event_mutex);
  2377. if (unlikely(err)) {
  2378. put_ctx(ctx);
  2379. if (err == -EAGAIN)
  2380. goto retry;
  2381. goto errout;
  2382. }
  2383. }
  2384. return ctx;
  2385. errout:
  2386. return ERR_PTR(err);
  2387. }
  2388. static void perf_event_free_filter(struct perf_event *event);
  2389. static void free_event_rcu(struct rcu_head *head)
  2390. {
  2391. struct perf_event *event;
  2392. event = container_of(head, struct perf_event, rcu_head);
  2393. if (event->ns)
  2394. put_pid_ns(event->ns);
  2395. perf_event_free_filter(event);
  2396. kfree(event);
  2397. }
  2398. static void perf_buffer_put(struct perf_buffer *buffer);
  2399. static void free_event(struct perf_event *event)
  2400. {
  2401. irq_work_sync(&event->pending);
  2402. if (!event->parent) {
  2403. if (event->attach_state & PERF_ATTACH_TASK)
  2404. jump_label_dec(&perf_sched_events);
  2405. if (event->attr.mmap || event->attr.mmap_data)
  2406. atomic_dec(&nr_mmap_events);
  2407. if (event->attr.comm)
  2408. atomic_dec(&nr_comm_events);
  2409. if (event->attr.task)
  2410. atomic_dec(&nr_task_events);
  2411. if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
  2412. put_callchain_buffers();
  2413. if (is_cgroup_event(event)) {
  2414. atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
  2415. jump_label_dec(&perf_sched_events);
  2416. }
  2417. }
  2418. if (event->buffer) {
  2419. perf_buffer_put(event->buffer);
  2420. event->buffer = NULL;
  2421. }
  2422. if (is_cgroup_event(event))
  2423. perf_detach_cgroup(event);
  2424. if (event->destroy)
  2425. event->destroy(event);
  2426. if (event->ctx)
  2427. put_ctx(event->ctx);
  2428. call_rcu(&event->rcu_head, free_event_rcu);
  2429. }
  2430. int perf_event_release_kernel(struct perf_event *event)
  2431. {
  2432. struct perf_event_context *ctx = event->ctx;
  2433. /*
  2434. * Remove from the PMU, can't get re-enabled since we got
  2435. * here because the last ref went.
  2436. */
  2437. perf_event_disable(event);
  2438. WARN_ON_ONCE(ctx->parent_ctx);
  2439. /*
  2440. * There are two ways this annotation is useful:
  2441. *
  2442. * 1) there is a lock recursion from perf_event_exit_task
  2443. * see the comment there.
  2444. *
  2445. * 2) there is a lock-inversion with mmap_sem through
  2446. * perf_event_read_group(), which takes faults while
  2447. * holding ctx->mutex, however this is called after
  2448. * the last filedesc died, so there is no possibility
  2449. * to trigger the AB-BA case.
  2450. */
  2451. mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
  2452. raw_spin_lock_irq(&ctx->lock);
  2453. perf_group_detach(event);
  2454. list_del_event(event, ctx);
  2455. raw_spin_unlock_irq(&ctx->lock);
  2456. mutex_unlock(&ctx->mutex);
  2457. free_event(event);
  2458. return 0;
  2459. }
  2460. EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  2461. /*
  2462. * Called when the last reference to the file is gone.
  2463. */
  2464. static int perf_release(struct inode *inode, struct file *file)
  2465. {
  2466. struct perf_event *event = file->private_data;
  2467. struct task_struct *owner;
  2468. file->private_data = NULL;
  2469. rcu_read_lock();
  2470. owner = ACCESS_ONCE(event->owner);
  2471. /*
  2472. * Matches the smp_wmb() in perf_event_exit_task(). If we observe
  2473. * !owner it means the list deletion is complete and we can indeed
  2474. * free this event, otherwise we need to serialize on
  2475. * owner->perf_event_mutex.
  2476. */
  2477. smp_read_barrier_depends();
  2478. if (owner) {
  2479. /*
  2480. * Since delayed_put_task_struct() also drops the last
  2481. * task reference we can safely take a new reference
  2482. * while holding the rcu_read_lock().
  2483. */
  2484. get_task_struct(owner);
  2485. }
  2486. rcu_read_unlock();
  2487. if (owner) {
  2488. mutex_lock(&owner->perf_event_mutex);
  2489. /*
  2490. * We have to re-check the event->owner field, if it is cleared
  2491. * we raced with perf_event_exit_task(), acquiring the mutex
  2492. * ensured they're done, and we can proceed with freeing the
  2493. * event.
  2494. */
  2495. if (event->owner)
  2496. list_del_init(&event->owner_entry);
  2497. mutex_unlock(&owner->perf_event_mutex);
  2498. put_task_struct(owner);
  2499. }
  2500. return perf_event_release_kernel(event);
  2501. }
  2502. u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
  2503. {
  2504. struct perf_event *child;
  2505. u64 total = 0;
  2506. *enabled = 0;
  2507. *running = 0;
  2508. mutex_lock(&event->child_mutex);
  2509. total += perf_event_read(event);
  2510. *enabled += event->total_time_enabled +
  2511. atomic64_read(&event->child_total_time_enabled);
  2512. *running += event->total_time_running +
  2513. atomic64_read(&event->child_total_time_running);
  2514. list_for_each_entry(child, &event->child_list, child_list) {
  2515. total += perf_event_read(child);
  2516. *enabled += child->total_time_enabled;
  2517. *running += child->total_time_running;
  2518. }
  2519. mutex_unlock(&event->child_mutex);
  2520. return total;
  2521. }
  2522. EXPORT_SYMBOL_GPL(perf_event_read_value);
  2523. static int perf_event_read_group(struct perf_event *event,
  2524. u64 read_format, char __user *buf)
  2525. {
  2526. struct perf_event *leader = event->group_leader, *sub;
  2527. int n = 0, size = 0, ret = -EFAULT;
  2528. struct perf_event_context *ctx = leader->ctx;
  2529. u64 values[5];
  2530. u64 count, enabled, running;
  2531. mutex_lock(&ctx->mutex);
  2532. count = perf_event_read_value(leader, &enabled, &running);
  2533. values[n++] = 1 + leader->nr_siblings;
  2534. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  2535. values[n++] = enabled;
  2536. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  2537. values[n++] = running;
  2538. values[n++] = count;
  2539. if (read_format & PERF_FORMAT_ID)
  2540. values[n++] = primary_event_id(leader);
  2541. size = n * sizeof(u64);
  2542. if (copy_to_user(buf, values, size))
  2543. goto unlock;
  2544. ret = size;
  2545. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  2546. n = 0;
  2547. values[n++] = perf_event_read_value(sub, &enabled, &running);
  2548. if (read_format & PERF_FORMAT_ID)
  2549. values[n++] = primary_event_id(sub);
  2550. size = n * sizeof(u64);
  2551. if (copy_to_user(buf + ret, values, size)) {
  2552. ret = -EFAULT;
  2553. goto unlock;
  2554. }
  2555. ret += size;
  2556. }
  2557. unlock:
  2558. mutex_unlock(&ctx->mutex);
  2559. return ret;
  2560. }
  2561. static int perf_event_read_one(struct perf_event *event,
  2562. u64 read_format, char __user *buf)
  2563. {
  2564. u64 enabled, running;
  2565. u64 values[4];
  2566. int n = 0;
  2567. values[n++] = perf_event_read_value(event, &enabled, &running);
  2568. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  2569. values[n++] = enabled;
  2570. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  2571. values[n++] = running;
  2572. if (read_format & PERF_FORMAT_ID)
  2573. values[n++] = primary_event_id(event);
  2574. if (copy_to_user(buf, values, n * sizeof(u64)))
  2575. return -EFAULT;
  2576. return n * sizeof(u64);
  2577. }
  2578. /*
  2579. * Read the performance event - simple non blocking version for now
  2580. */
  2581. static ssize_t
  2582. perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
  2583. {
  2584. u64 read_format = event->attr.read_format;
  2585. int ret;
  2586. /*
  2587. * Return end-of-file for a read on a event that is in
  2588. * error state (i.e. because it was pinned but it couldn't be
  2589. * scheduled on to the CPU at some point).
  2590. */
  2591. if (event->state == PERF_EVENT_STATE_ERROR)
  2592. return 0;
  2593. if (count < event->read_size)
  2594. return -ENOSPC;
  2595. WARN_ON_ONCE(event->ctx->parent_ctx);
  2596. if (read_format & PERF_FORMAT_GROUP)
  2597. ret = perf_event_read_group(event, read_format, buf);
  2598. else
  2599. ret = perf_event_read_one(event, read_format, buf);
  2600. return ret;
  2601. }
  2602. static ssize_t
  2603. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  2604. {
  2605. struct perf_event *event = file->private_data;
  2606. return perf_read_hw(event, buf, count);
  2607. }
  2608. static unsigned int perf_poll(struct file *file, poll_table *wait)
  2609. {
  2610. struct perf_event *event = file->private_data;
  2611. struct perf_buffer *buffer;
  2612. unsigned int events = POLL_HUP;
  2613. rcu_read_lock();
  2614. buffer = rcu_dereference(event->buffer);
  2615. if (buffer)
  2616. events = atomic_xchg(&buffer->poll, 0);
  2617. rcu_read_unlock();
  2618. poll_wait(file, &event->waitq, wait);
  2619. return events;
  2620. }
  2621. static void perf_event_reset(struct perf_event *event)
  2622. {
  2623. (void)perf_event_read(event);
  2624. local64_set(&event->count, 0);
  2625. perf_event_update_userpage(event);
  2626. }
  2627. /*
  2628. * Holding the top-level event's child_mutex means that any
  2629. * descendant process that has inherited this event will block
  2630. * in sync_child_event if it goes to exit, thus satisfying the
  2631. * task existence requirements of perf_event_enable/disable.
  2632. */
  2633. static void perf_event_for_each_child(struct perf_event *event,
  2634. void (*func)(struct perf_event *))
  2635. {
  2636. struct perf_event *child;
  2637. WARN_ON_ONCE(event->ctx->parent_ctx);
  2638. mutex_lock(&event->child_mutex);
  2639. func(event);
  2640. list_for_each_entry(child, &event->child_list, child_list)
  2641. func(child);
  2642. mutex_unlock(&event->child_mutex);
  2643. }
  2644. static void perf_event_for_each(struct perf_event *event,
  2645. void (*func)(struct perf_event *))
  2646. {
  2647. struct perf_event_context *ctx = event->ctx;
  2648. struct perf_event *sibling;
  2649. WARN_ON_ONCE(ctx->parent_ctx);
  2650. mutex_lock(&ctx->mutex);
  2651. event = event->group_leader;
  2652. perf_event_for_each_child(event, func);
  2653. func(event);
  2654. list_for_each_entry(sibling, &event->sibling_list, group_entry)
  2655. perf_event_for_each_child(event, func);
  2656. mutex_unlock(&ctx->mutex);
  2657. }
  2658. static int perf_event_period(struct perf_event *event, u64 __user *arg)
  2659. {
  2660. struct perf_event_context *ctx = event->ctx;
  2661. int ret = 0;
  2662. u64 value;
  2663. if (!is_sampling_event(event))
  2664. return -EINVAL;
  2665. if (copy_from_user(&value, arg, sizeof(value)))
  2666. return -EFAULT;
  2667. if (!value)
  2668. return -EINVAL;
  2669. raw_spin_lock_irq(&ctx->lock);
  2670. if (event->attr.freq) {
  2671. if (value > sysctl_perf_event_sample_rate) {
  2672. ret = -EINVAL;
  2673. goto unlock;
  2674. }
  2675. event->attr.sample_freq = value;
  2676. } else {
  2677. event->attr.sample_period = value;
  2678. event->hw.sample_period = value;
  2679. }
  2680. unlock:
  2681. raw_spin_unlock_irq(&ctx->lock);
  2682. return ret;
  2683. }
  2684. static const struct file_operations perf_fops;
  2685. static struct perf_event *perf_fget_light(int fd, int *fput_needed)
  2686. {
  2687. struct file *file;
  2688. file = fget_light(fd, fput_needed);
  2689. if (!file)
  2690. return ERR_PTR(-EBADF);
  2691. if (file->f_op != &perf_fops) {
  2692. fput_light(file, *fput_needed);
  2693. *fput_needed = 0;
  2694. return ERR_PTR(-EBADF);
  2695. }
  2696. return file->private_data;
  2697. }
  2698. static int perf_event_set_output(struct perf_event *event,
  2699. struct perf_event *output_event);
  2700. static int perf_event_set_filter(struct perf_event *event, void __user *arg);
  2701. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2702. {
  2703. struct perf_event *event = file->private_data;
  2704. void (*func)(struct perf_event *);
  2705. u32 flags = arg;
  2706. switch (cmd) {
  2707. case PERF_EVENT_IOC_ENABLE:
  2708. func = perf_event_enable;
  2709. break;
  2710. case PERF_EVENT_IOC_DISABLE:
  2711. func = perf_event_disable;
  2712. break;
  2713. case PERF_EVENT_IOC_RESET:
  2714. func = perf_event_reset;
  2715. break;
  2716. case PERF_EVENT_IOC_REFRESH:
  2717. return perf_event_refresh(event, arg);
  2718. case PERF_EVENT_IOC_PERIOD:
  2719. return perf_event_period(event, (u64 __user *)arg);
  2720. case PERF_EVENT_IOC_SET_OUTPUT:
  2721. {
  2722. struct perf_event *output_event = NULL;
  2723. int fput_needed = 0;
  2724. int ret;
  2725. if (arg != -1) {
  2726. output_event = perf_fget_light(arg, &fput_needed);
  2727. if (IS_ERR(output_event))
  2728. return PTR_ERR(output_event);
  2729. }
  2730. ret = perf_event_set_output(event, output_event);
  2731. if (output_event)
  2732. fput_light(output_event->filp, fput_needed);
  2733. return ret;
  2734. }
  2735. case PERF_EVENT_IOC_SET_FILTER:
  2736. return perf_event_set_filter(event, (void __user *)arg);
  2737. default:
  2738. return -ENOTTY;
  2739. }
  2740. if (flags & PERF_IOC_FLAG_GROUP)
  2741. perf_event_for_each(event, func);
  2742. else
  2743. perf_event_for_each_child(event, func);
  2744. return 0;
  2745. }
  2746. int perf_event_task_enable(void)
  2747. {
  2748. struct perf_event *event;
  2749. mutex_lock(&current->perf_event_mutex);
  2750. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  2751. perf_event_for_each_child(event, perf_event_enable);
  2752. mutex_unlock(&current->perf_event_mutex);
  2753. return 0;
  2754. }
  2755. int perf_event_task_disable(void)
  2756. {
  2757. struct perf_event *event;
  2758. mutex_lock(&current->perf_event_mutex);
  2759. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  2760. perf_event_for_each_child(event, perf_event_disable);
  2761. mutex_unlock(&current->perf_event_mutex);
  2762. return 0;
  2763. }
  2764. #ifndef PERF_EVENT_INDEX_OFFSET
  2765. # define PERF_EVENT_INDEX_OFFSET 0
  2766. #endif
  2767. static int perf_event_index(struct perf_event *event)
  2768. {
  2769. if (event->hw.state & PERF_HES_STOPPED)
  2770. return 0;
  2771. if (event->state != PERF_EVENT_STATE_ACTIVE)
  2772. return 0;
  2773. return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
  2774. }
  2775. /*
  2776. * Callers need to ensure there can be no nesting of this function, otherwise
  2777. * the seqlock logic goes bad. We can not serialize this because the arch
  2778. * code calls this from NMI context.
  2779. */
  2780. void perf_event_update_userpage(struct perf_event *event)
  2781. {
  2782. struct perf_event_mmap_page *userpg;
  2783. struct perf_buffer *buffer;
  2784. rcu_read_lock();
  2785. buffer = rcu_dereference(event->buffer);
  2786. if (!buffer)
  2787. goto unlock;
  2788. userpg = buffer->user_page;
  2789. /*
  2790. * Disable preemption so as to not let the corresponding user-space
  2791. * spin too long if we get preempted.
  2792. */
  2793. preempt_disable();
  2794. ++userpg->lock;
  2795. barrier();
  2796. userpg->index = perf_event_index(event);
  2797. userpg->offset = perf_event_count(event);
  2798. if (event->state == PERF_EVENT_STATE_ACTIVE)
  2799. userpg->offset -= local64_read(&event->hw.prev_count);
  2800. userpg->time_enabled = event->total_time_enabled +
  2801. atomic64_read(&event->child_total_time_enabled);
  2802. userpg->time_running = event->total_time_running +
  2803. atomic64_read(&event->child_total_time_running);
  2804. barrier();
  2805. ++userpg->lock;
  2806. preempt_enable();
  2807. unlock:
  2808. rcu_read_unlock();
  2809. }
  2810. static unsigned long perf_data_size(struct perf_buffer *buffer);
  2811. static void
  2812. perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
  2813. {
  2814. long max_size = perf_data_size(buffer);
  2815. if (watermark)
  2816. buffer->watermark = min(max_size, watermark);
  2817. if (!buffer->watermark)
  2818. buffer->watermark = max_size / 2;
  2819. if (flags & PERF_BUFFER_WRITABLE)
  2820. buffer->writable = 1;
  2821. atomic_set(&buffer->refcount, 1);
  2822. }
  2823. #ifndef CONFIG_PERF_USE_VMALLOC
  2824. /*
  2825. * Back perf_mmap() with regular GFP_KERNEL-0 pages.
  2826. */
  2827. static struct page *
  2828. perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
  2829. {
  2830. if (pgoff > buffer->nr_pages)
  2831. return NULL;
  2832. if (pgoff == 0)
  2833. return virt_to_page(buffer->user_page);
  2834. return virt_to_page(buffer->data_pages[pgoff - 1]);
  2835. }
  2836. static void *perf_mmap_alloc_page(int cpu)
  2837. {
  2838. struct page *page;
  2839. int node;
  2840. node = (cpu == -1) ? cpu : cpu_to_node(cpu);
  2841. page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
  2842. if (!page)
  2843. return NULL;
  2844. return page_address(page);
  2845. }
  2846. static struct perf_buffer *
  2847. perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
  2848. {
  2849. struct perf_buffer *buffer;
  2850. unsigned long size;
  2851. int i;
  2852. size = sizeof(struct perf_buffer);
  2853. size += nr_pages * sizeof(void *);
  2854. buffer = kzalloc(size, GFP_KERNEL);
  2855. if (!buffer)
  2856. goto fail;
  2857. buffer->user_page = perf_mmap_alloc_page(cpu);
  2858. if (!buffer->user_page)
  2859. goto fail_user_page;
  2860. for (i = 0; i < nr_pages; i++) {
  2861. buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
  2862. if (!buffer->data_pages[i])
  2863. goto fail_data_pages;
  2864. }
  2865. buffer->nr_pages = nr_pages;
  2866. perf_buffer_init(buffer, watermark, flags);
  2867. return buffer;
  2868. fail_data_pages:
  2869. for (i--; i >= 0; i--)
  2870. free_page((unsigned long)buffer->data_pages[i]);
  2871. free_page((unsigned long)buffer->user_page);
  2872. fail_user_page:
  2873. kfree(buffer);
  2874. fail:
  2875. return NULL;
  2876. }
  2877. static void perf_mmap_free_page(unsigned long addr)
  2878. {
  2879. struct page *page = virt_to_page((void *)addr);
  2880. page->mapping = NULL;
  2881. __free_page(page);
  2882. }
  2883. static void perf_buffer_free(struct perf_buffer *buffer)
  2884. {
  2885. int i;
  2886. perf_mmap_free_page((unsigned long)buffer->user_page);
  2887. for (i = 0; i < buffer->nr_pages; i++)
  2888. perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
  2889. kfree(buffer);
  2890. }
  2891. static inline int page_order(struct perf_buffer *buffer)
  2892. {
  2893. return 0;
  2894. }
  2895. #else
  2896. /*
  2897. * Back perf_mmap() with vmalloc memory.
  2898. *
  2899. * Required for architectures that have d-cache aliasing issues.
  2900. */
  2901. static inline int page_order(struct perf_buffer *buffer)
  2902. {
  2903. return buffer->page_order;
  2904. }
  2905. static struct page *
  2906. perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
  2907. {
  2908. if (pgoff > (1UL << page_order(buffer)))
  2909. return NULL;
  2910. return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
  2911. }
  2912. static void perf_mmap_unmark_page(void *addr)
  2913. {
  2914. struct page *page = vmalloc_to_page(addr);
  2915. page->mapping = NULL;
  2916. }
  2917. static void perf_buffer_free_work(struct work_struct *work)
  2918. {
  2919. struct perf_buffer *buffer;
  2920. void *base;
  2921. int i, nr;
  2922. buffer = container_of(work, struct perf_buffer, work);
  2923. nr = 1 << page_order(buffer);
  2924. base = buffer->user_page;
  2925. for (i = 0; i < nr + 1; i++)
  2926. perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  2927. vfree(base);
  2928. kfree(buffer);
  2929. }
  2930. static void perf_buffer_free(struct perf_buffer *buffer)
  2931. {
  2932. schedule_work(&buffer->work);
  2933. }
  2934. static struct perf_buffer *
  2935. perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
  2936. {
  2937. struct perf_buffer *buffer;
  2938. unsigned long size;
  2939. void *all_buf;
  2940. size = sizeof(struct perf_buffer);
  2941. size += sizeof(void *);
  2942. buffer = kzalloc(size, GFP_KERNEL);
  2943. if (!buffer)
  2944. goto fail;
  2945. INIT_WORK(&buffer->work, perf_buffer_free_work);
  2946. all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
  2947. if (!all_buf)
  2948. goto fail_all_buf;
  2949. buffer->user_page = all_buf;
  2950. buffer->data_pages[0] = all_buf + PAGE_SIZE;
  2951. buffer->page_order = ilog2(nr_pages);
  2952. buffer->nr_pages = 1;
  2953. perf_buffer_init(buffer, watermark, flags);
  2954. return buffer;
  2955. fail_all_buf:
  2956. kfree(buffer);
  2957. fail:
  2958. return NULL;
  2959. }
  2960. #endif
  2961. static unsigned long perf_data_size(struct perf_buffer *buffer)
  2962. {
  2963. return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
  2964. }
  2965. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  2966. {
  2967. struct perf_event *event = vma->vm_file->private_data;
  2968. struct perf_buffer *buffer;
  2969. int ret = VM_FAULT_SIGBUS;
  2970. if (vmf->flags & FAULT_FLAG_MKWRITE) {
  2971. if (vmf->pgoff == 0)
  2972. ret = 0;
  2973. return ret;
  2974. }
  2975. rcu_read_lock();
  2976. buffer = rcu_dereference(event->buffer);
  2977. if (!buffer)
  2978. goto unlock;
  2979. if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
  2980. goto unlock;
  2981. vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
  2982. if (!vmf->page)
  2983. goto unlock;
  2984. get_page(vmf->page);
  2985. vmf->page->mapping = vma->vm_file->f_mapping;
  2986. vmf->page->index = vmf->pgoff;
  2987. ret = 0;
  2988. unlock:
  2989. rcu_read_unlock();
  2990. return ret;
  2991. }
  2992. static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
  2993. {
  2994. struct perf_buffer *buffer;
  2995. buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
  2996. perf_buffer_free(buffer);
  2997. }
  2998. static struct perf_buffer *perf_buffer_get(struct perf_event *event)
  2999. {
  3000. struct perf_buffer *buffer;
  3001. rcu_read_lock();
  3002. buffer = rcu_dereference(event->buffer);
  3003. if (buffer) {
  3004. if (!atomic_inc_not_zero(&buffer->refcount))
  3005. buffer = NULL;
  3006. }
  3007. rcu_read_unlock();
  3008. return buffer;
  3009. }
  3010. static void perf_buffer_put(struct perf_buffer *buffer)
  3011. {
  3012. if (!atomic_dec_and_test(&buffer->refcount))
  3013. return;
  3014. call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
  3015. }
  3016. static void perf_mmap_open(struct vm_area_struct *vma)
  3017. {
  3018. struct perf_event *event = vma->vm_file->private_data;
  3019. atomic_inc(&event->mmap_count);
  3020. }
  3021. static void perf_mmap_close(struct vm_area_struct *vma)
  3022. {
  3023. struct perf_event *event = vma->vm_file->private_data;
  3024. if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
  3025. unsigned long size = perf_data_size(event->buffer);
  3026. struct user_struct *user = event->mmap_user;
  3027. struct perf_buffer *buffer = event->buffer;
  3028. atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
  3029. vma->vm_mm->locked_vm -= event->mmap_locked;
  3030. rcu_assign_pointer(event->buffer, NULL);
  3031. mutex_unlock(&event->mmap_mutex);
  3032. perf_buffer_put(buffer);
  3033. free_uid(user);
  3034. }
  3035. }
  3036. static const struct vm_operations_struct perf_mmap_vmops = {
  3037. .open = perf_mmap_open,
  3038. .close = perf_mmap_close,
  3039. .fault = perf_mmap_fault,
  3040. .page_mkwrite = perf_mmap_fault,
  3041. };
  3042. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  3043. {
  3044. struct perf_event *event = file->private_data;
  3045. unsigned long user_locked, user_lock_limit;
  3046. struct user_struct *user = current_user();
  3047. unsigned long locked, lock_limit;
  3048. struct perf_buffer *buffer;
  3049. unsigned long vma_size;
  3050. unsigned long nr_pages;
  3051. long user_extra, extra;
  3052. int ret = 0, flags = 0;
  3053. /*
  3054. * Don't allow mmap() of inherited per-task counters. This would
  3055. * create a performance issue due to all children writing to the
  3056. * same buffer.
  3057. */
  3058. if (event->cpu == -1 && event->attr.inherit)
  3059. return -EINVAL;
  3060. if (!(vma->vm_flags & VM_SHARED))
  3061. return -EINVAL;
  3062. vma_size = vma->vm_end - vma->vm_start;
  3063. nr_pages = (vma_size / PAGE_SIZE) - 1;
  3064. /*
  3065. * If we have buffer pages ensure they're a power-of-two number, so we
  3066. * can do bitmasks instead of modulo.
  3067. */
  3068. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  3069. return -EINVAL;
  3070. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  3071. return -EINVAL;
  3072. if (vma->vm_pgoff != 0)
  3073. return -EINVAL;
  3074. WARN_ON_ONCE(event->ctx->parent_ctx);
  3075. mutex_lock(&event->mmap_mutex);
  3076. if (event->buffer) {
  3077. if (event->buffer->nr_pages == nr_pages)
  3078. atomic_inc(&event->buffer->refcount);
  3079. else
  3080. ret = -EINVAL;
  3081. goto unlock;
  3082. }
  3083. user_extra = nr_pages + 1;
  3084. user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
  3085. /*
  3086. * Increase the limit linearly with more CPUs:
  3087. */
  3088. user_lock_limit *= num_online_cpus();
  3089. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  3090. extra = 0;
  3091. if (user_locked > user_lock_limit)
  3092. extra = user_locked - user_lock_limit;
  3093. lock_limit = rlimit(RLIMIT_MEMLOCK);
  3094. lock_limit >>= PAGE_SHIFT;
  3095. locked = vma->vm_mm->locked_vm + extra;
  3096. if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
  3097. !capable(CAP_IPC_LOCK)) {
  3098. ret = -EPERM;
  3099. goto unlock;
  3100. }
  3101. WARN_ON(event->buffer);
  3102. if (vma->vm_flags & VM_WRITE)
  3103. flags |= PERF_BUFFER_WRITABLE;
  3104. buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
  3105. event->cpu, flags);
  3106. if (!buffer) {
  3107. ret = -ENOMEM;
  3108. goto unlock;
  3109. }
  3110. rcu_assign_pointer(event->buffer, buffer);
  3111. atomic_long_add(user_extra, &user->locked_vm);
  3112. event->mmap_locked = extra;
  3113. event->mmap_user = get_current_user();
  3114. vma->vm_mm->locked_vm += event->mmap_locked;
  3115. unlock:
  3116. if (!ret)
  3117. atomic_inc(&event->mmap_count);
  3118. mutex_unlock(&event->mmap_mutex);
  3119. vma->vm_flags |= VM_RESERVED;
  3120. vma->vm_ops = &perf_mmap_vmops;
  3121. return ret;
  3122. }
  3123. static int perf_fasync(int fd, struct file *filp, int on)
  3124. {
  3125. struct inode *inode = filp->f_path.dentry->d_inode;
  3126. struct perf_event *event = filp->private_data;
  3127. int retval;
  3128. mutex_lock(&inode->i_mutex);
  3129. retval = fasync_helper(fd, filp, on, &event->fasync);
  3130. mutex_unlock(&inode->i_mutex);
  3131. if (retval < 0)
  3132. return retval;
  3133. return 0;
  3134. }
  3135. static const struct file_operations perf_fops = {
  3136. .llseek = no_llseek,
  3137. .release = perf_release,
  3138. .read = perf_read,
  3139. .poll = perf_poll,
  3140. .unlocked_ioctl = perf_ioctl,
  3141. .compat_ioctl = perf_ioctl,
  3142. .mmap = perf_mmap,
  3143. .fasync = perf_fasync,
  3144. };
  3145. /*
  3146. * Perf event wakeup
  3147. *
  3148. * If there's data, ensure we set the poll() state and publish everything
  3149. * to user-space before waking everybody up.
  3150. */
  3151. void perf_event_wakeup(struct perf_event *event)
  3152. {
  3153. wake_up_all(&event->waitq);
  3154. if (event->pending_kill) {
  3155. kill_fasync(&event->fasync, SIGIO, event->pending_kill);
  3156. event->pending_kill = 0;
  3157. }
  3158. }
  3159. static void perf_pending_event(struct irq_work *entry)
  3160. {
  3161. struct perf_event *event = container_of(entry,
  3162. struct perf_event, pending);
  3163. if (event->pending_disable) {
  3164. event->pending_disable = 0;
  3165. __perf_event_disable(event);
  3166. }
  3167. if (event->pending_wakeup) {
  3168. event->pending_wakeup = 0;
  3169. perf_event_wakeup(event);
  3170. }
  3171. }
  3172. /*
  3173. * We assume there is only KVM supporting the callbacks.
  3174. * Later on, we might change it to a list if there is
  3175. * another virtualization implementation supporting the callbacks.
  3176. */
  3177. struct perf_guest_info_callbacks *perf_guest_cbs;
  3178. int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  3179. {
  3180. perf_guest_cbs = cbs;
  3181. return 0;
  3182. }
  3183. EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
  3184. int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
  3185. {
  3186. perf_guest_cbs = NULL;
  3187. return 0;
  3188. }
  3189. EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
  3190. /*
  3191. * Output
  3192. */
  3193. static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
  3194. unsigned long offset, unsigned long head)
  3195. {
  3196. unsigned long mask;
  3197. if (!buffer->writable)
  3198. return true;
  3199. mask = perf_data_size(buffer) - 1;
  3200. offset = (offset - tail) & mask;
  3201. head = (head - tail) & mask;
  3202. if ((int)(head - offset) < 0)
  3203. return false;
  3204. return true;
  3205. }
  3206. static void perf_output_wakeup(struct perf_output_handle *handle)
  3207. {
  3208. atomic_set(&handle->buffer->poll, POLL_IN);
  3209. if (handle->nmi) {
  3210. handle->event->pending_wakeup = 1;
  3211. irq_work_queue(&handle->event->pending);
  3212. } else
  3213. perf_event_wakeup(handle->event);
  3214. }
  3215. /*
  3216. * We need to ensure a later event_id doesn't publish a head when a former
  3217. * event isn't done writing. However since we need to deal with NMIs we
  3218. * cannot fully serialize things.
  3219. *
  3220. * We only publish the head (and generate a wakeup) when the outer-most
  3221. * event completes.
  3222. */
  3223. static void perf_output_get_handle(struct perf_output_handle *handle)
  3224. {
  3225. struct perf_buffer *buffer = handle->buffer;
  3226. preempt_disable();
  3227. local_inc(&buffer->nest);
  3228. handle->wakeup = local_read(&buffer->wakeup);
  3229. }
  3230. static void perf_output_put_handle(struct perf_output_handle *handle)
  3231. {
  3232. struct perf_buffer *buffer = handle->buffer;
  3233. unsigned long head;
  3234. again:
  3235. head = local_read(&buffer->head);
  3236. /*
  3237. * IRQ/NMI can happen here, which means we can miss a head update.
  3238. */
  3239. if (!local_dec_and_test(&buffer->nest))
  3240. goto out;
  3241. /*
  3242. * Publish the known good head. Rely on the full barrier implied
  3243. * by atomic_dec_and_test() order the buffer->head read and this
  3244. * write.
  3245. */
  3246. buffer->user_page->data_head = head;
  3247. /*
  3248. * Now check if we missed an update, rely on the (compiler)
  3249. * barrier in atomic_dec_and_test() to re-read buffer->head.
  3250. */
  3251. if (unlikely(head != local_read(&buffer->head))) {
  3252. local_inc(&buffer->nest);
  3253. goto again;
  3254. }
  3255. if (handle->wakeup != local_read(&buffer->wakeup))
  3256. perf_output_wakeup(handle);
  3257. out:
  3258. preempt_enable();
  3259. }
  3260. __always_inline void perf_output_copy(struct perf_output_handle *handle,
  3261. const void *buf, unsigned int len)
  3262. {
  3263. do {
  3264. unsigned long size = min_t(unsigned long, handle->size, len);
  3265. memcpy(handle->addr, buf, size);
  3266. len -= size;
  3267. handle->addr += size;
  3268. buf += size;
  3269. handle->size -= size;
  3270. if (!handle->size) {
  3271. struct perf_buffer *buffer = handle->buffer;
  3272. handle->page++;
  3273. handle->page &= buffer->nr_pages - 1;
  3274. handle->addr = buffer->data_pages[handle->page];
  3275. handle->size = PAGE_SIZE << page_order(buffer);
  3276. }
  3277. } while (len);
  3278. }
  3279. static void __perf_event_header__init_id(struct perf_event_header *header,
  3280. struct perf_sample_data *data,
  3281. struct perf_event *event)
  3282. {
  3283. u64 sample_type = event->attr.sample_type;
  3284. data->type = sample_type;
  3285. header->size += event->id_header_size;
  3286. if (sample_type & PERF_SAMPLE_TID) {
  3287. /* namespace issues */
  3288. data->tid_entry.pid = perf_event_pid(event, current);
  3289. data->tid_entry.tid = perf_event_tid(event, current);
  3290. }
  3291. if (sample_type & PERF_SAMPLE_TIME)
  3292. data->time = perf_clock();
  3293. if (sample_type & PERF_SAMPLE_ID)
  3294. data->id = primary_event_id(event);
  3295. if (sample_type & PERF_SAMPLE_STREAM_ID)
  3296. data->stream_id = event->id;
  3297. if (sample_type & PERF_SAMPLE_CPU) {
  3298. data->cpu_entry.cpu = raw_smp_processor_id();
  3299. data->cpu_entry.reserved = 0;
  3300. }
  3301. }
  3302. static void perf_event_header__init_id(struct perf_event_header *header,
  3303. struct perf_sample_data *data,
  3304. struct perf_event *event)
  3305. {
  3306. if (event->attr.sample_id_all)
  3307. __perf_event_header__init_id(header, data, event);
  3308. }
  3309. static void __perf_event__output_id_sample(struct perf_output_handle *handle,
  3310. struct perf_sample_data *data)
  3311. {
  3312. u64 sample_type = data->type;
  3313. if (sample_type & PERF_SAMPLE_TID)
  3314. perf_output_put(handle, data->tid_entry);
  3315. if (sample_type & PERF_SAMPLE_TIME)
  3316. perf_output_put(handle, data->time);
  3317. if (sample_type & PERF_SAMPLE_ID)
  3318. perf_output_put(handle, data->id);
  3319. if (sample_type & PERF_SAMPLE_STREAM_ID)
  3320. perf_output_put(handle, data->stream_id);
  3321. if (sample_type & PERF_SAMPLE_CPU)
  3322. perf_output_put(handle, data->cpu_entry);
  3323. }
  3324. static void perf_event__output_id_sample(struct perf_event *event,
  3325. struct perf_output_handle *handle,
  3326. struct perf_sample_data *sample)
  3327. {
  3328. if (event->attr.sample_id_all)
  3329. __perf_event__output_id_sample(handle, sample);
  3330. }
  3331. int perf_output_begin(struct perf_output_handle *handle,
  3332. struct perf_event *event, unsigned int size,
  3333. int nmi, int sample)
  3334. {
  3335. struct perf_buffer *buffer;
  3336. unsigned long tail, offset, head;
  3337. int have_lost;
  3338. struct perf_sample_data sample_data;
  3339. struct {
  3340. struct perf_event_header header;
  3341. u64 id;
  3342. u64 lost;
  3343. } lost_event;
  3344. rcu_read_lock();
  3345. /*
  3346. * For inherited events we send all the output towards the parent.
  3347. */
  3348. if (event->parent)
  3349. event = event->parent;
  3350. buffer = rcu_dereference(event->buffer);
  3351. if (!buffer)
  3352. goto out;
  3353. handle->buffer = buffer;
  3354. handle->event = event;
  3355. handle->nmi = nmi;
  3356. handle->sample = sample;
  3357. if (!buffer->nr_pages)
  3358. goto out;
  3359. have_lost = local_read(&buffer->lost);
  3360. if (have_lost) {
  3361. lost_event.header.size = sizeof(lost_event);
  3362. perf_event_header__init_id(&lost_event.header, &sample_data,
  3363. event);
  3364. size += lost_event.header.size;
  3365. }
  3366. perf_output_get_handle(handle);
  3367. do {
  3368. /*
  3369. * Userspace could choose to issue a mb() before updating the
  3370. * tail pointer. So that all reads will be completed before the
  3371. * write is issued.
  3372. */
  3373. tail = ACCESS_ONCE(buffer->user_page->data_tail);
  3374. smp_rmb();
  3375. offset = head = local_read(&buffer->head);
  3376. head += size;
  3377. if (unlikely(!perf_output_space(buffer, tail, offset, head)))
  3378. goto fail;
  3379. } while (local_cmpxchg(&buffer->head, offset, head) != offset);
  3380. if (head - local_read(&buffer->wakeup) > buffer->watermark)
  3381. local_add(buffer->watermark, &buffer->wakeup);
  3382. handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
  3383. handle->page &= buffer->nr_pages - 1;
  3384. handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
  3385. handle->addr = buffer->data_pages[handle->page];
  3386. handle->addr += handle->size;
  3387. handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
  3388. if (have_lost) {
  3389. lost_event.header.type = PERF_RECORD_LOST;
  3390. lost_event.header.misc = 0;
  3391. lost_event.id = event->id;
  3392. lost_event.lost = local_xchg(&buffer->lost, 0);
  3393. perf_output_put(handle, lost_event);
  3394. perf_event__output_id_sample(event, handle, &sample_data);
  3395. }
  3396. return 0;
  3397. fail:
  3398. local_inc(&buffer->lost);
  3399. perf_output_put_handle(handle);
  3400. out:
  3401. rcu_read_unlock();
  3402. return -ENOSPC;
  3403. }
  3404. void perf_output_end(struct perf_output_handle *handle)
  3405. {
  3406. struct perf_event *event = handle->event;
  3407. struct perf_buffer *buffer = handle->buffer;
  3408. int wakeup_events = event->attr.wakeup_events;
  3409. if (handle->sample && wakeup_events) {
  3410. int events = local_inc_return(&buffer->events);
  3411. if (events >= wakeup_events) {
  3412. local_sub(wakeup_events, &buffer->events);
  3413. local_inc(&buffer->wakeup);
  3414. }
  3415. }
  3416. perf_output_put_handle(handle);
  3417. rcu_read_unlock();
  3418. }
  3419. static void perf_output_read_one(struct perf_output_handle *handle,
  3420. struct perf_event *event,
  3421. u64 enabled, u64 running)
  3422. {
  3423. u64 read_format = event->attr.read_format;
  3424. u64 values[4];
  3425. int n = 0;
  3426. values[n++] = perf_event_count(event);
  3427. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  3428. values[n++] = enabled +
  3429. atomic64_read(&event->child_total_time_enabled);
  3430. }
  3431. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  3432. values[n++] = running +
  3433. atomic64_read(&event->child_total_time_running);
  3434. }
  3435. if (read_format & PERF_FORMAT_ID)
  3436. values[n++] = primary_event_id(event);
  3437. perf_output_copy(handle, values, n * sizeof(u64));
  3438. }
  3439. /*
  3440. * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  3441. */
  3442. static void perf_output_read_group(struct perf_output_handle *handle,
  3443. struct perf_event *event,
  3444. u64 enabled, u64 running)
  3445. {
  3446. struct perf_event *leader = event->group_leader, *sub;
  3447. u64 read_format = event->attr.read_format;
  3448. u64 values[5];
  3449. int n = 0;
  3450. values[n++] = 1 + leader->nr_siblings;
  3451. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  3452. values[n++] = enabled;
  3453. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  3454. values[n++] = running;
  3455. if (leader != event)
  3456. leader->pmu->read(leader);
  3457. values[n++] = perf_event_count(leader);
  3458. if (read_format & PERF_FORMAT_ID)
  3459. values[n++] = primary_event_id(leader);
  3460. perf_output_copy(handle, values, n * sizeof(u64));
  3461. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  3462. n = 0;
  3463. if (sub != event)
  3464. sub->pmu->read(sub);
  3465. values[n++] = perf_event_count(sub);
  3466. if (read_format & PERF_FORMAT_ID)
  3467. values[n++] = primary_event_id(sub);
  3468. perf_output_copy(handle, values, n * sizeof(u64));
  3469. }
  3470. }
  3471. #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
  3472. PERF_FORMAT_TOTAL_TIME_RUNNING)
  3473. static void perf_output_read(struct perf_output_handle *handle,
  3474. struct perf_event *event)
  3475. {
  3476. u64 enabled = 0, running = 0, now, ctx_time;
  3477. u64 read_format = event->attr.read_format;
  3478. /*
  3479. * compute total_time_enabled, total_time_running
  3480. * based on snapshot values taken when the event
  3481. * was last scheduled in.
  3482. *
  3483. * we cannot simply called update_context_time()
  3484. * because of locking issue as we are called in
  3485. * NMI context
  3486. */
  3487. if (read_format & PERF_FORMAT_TOTAL_TIMES) {
  3488. now = perf_clock();
  3489. ctx_time = event->shadow_ctx_time + now;
  3490. enabled = ctx_time - event->tstamp_enabled;
  3491. running = ctx_time - event->tstamp_running;
  3492. }
  3493. if (event->attr.read_format & PERF_FORMAT_GROUP)
  3494. perf_output_read_group(handle, event, enabled, running);
  3495. else
  3496. perf_output_read_one(handle, event, enabled, running);
  3497. }
  3498. void perf_output_sample(struct perf_output_handle *handle,
  3499. struct perf_event_header *header,
  3500. struct perf_sample_data *data,
  3501. struct perf_event *event)
  3502. {
  3503. u64 sample_type = data->type;
  3504. perf_output_put(handle, *header);
  3505. if (sample_type & PERF_SAMPLE_IP)
  3506. perf_output_put(handle, data->ip);
  3507. if (sample_type & PERF_SAMPLE_TID)
  3508. perf_output_put(handle, data->tid_entry);
  3509. if (sample_type & PERF_SAMPLE_TIME)
  3510. perf_output_put(handle, data->time);
  3511. if (sample_type & PERF_SAMPLE_ADDR)
  3512. perf_output_put(handle, data->addr);
  3513. if (sample_type & PERF_SAMPLE_ID)
  3514. perf_output_put(handle, data->id);
  3515. if (sample_type & PERF_SAMPLE_STREAM_ID)
  3516. perf_output_put(handle, data->stream_id);
  3517. if (sample_type & PERF_SAMPLE_CPU)
  3518. perf_output_put(handle, data->cpu_entry);
  3519. if (sample_type & PERF_SAMPLE_PERIOD)
  3520. perf_output_put(handle, data->period);
  3521. if (sample_type & PERF_SAMPLE_READ)
  3522. perf_output_read(handle, event);
  3523. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  3524. if (data->callchain) {
  3525. int size = 1;
  3526. if (data->callchain)
  3527. size += data->callchain->nr;
  3528. size *= sizeof(u64);
  3529. perf_output_copy(handle, data->callchain, size);
  3530. } else {
  3531. u64 nr = 0;
  3532. perf_output_put(handle, nr);
  3533. }
  3534. }
  3535. if (sample_type & PERF_SAMPLE_RAW) {
  3536. if (data->raw) {
  3537. perf_output_put(handle, data->raw->size);
  3538. perf_output_copy(handle, data->raw->data,
  3539. data->raw->size);
  3540. } else {
  3541. struct {
  3542. u32 size;
  3543. u32 data;
  3544. } raw = {
  3545. .size = sizeof(u32),
  3546. .data = 0,
  3547. };
  3548. perf_output_put(handle, raw);
  3549. }
  3550. }
  3551. }
  3552. void perf_prepare_sample(struct perf_event_header *header,
  3553. struct perf_sample_data *data,
  3554. struct perf_event *event,
  3555. struct pt_regs *regs)
  3556. {
  3557. u64 sample_type = event->attr.sample_type;
  3558. header->type = PERF_RECORD_SAMPLE;
  3559. header->size = sizeof(*header) + event->header_size;
  3560. header->misc = 0;
  3561. header->misc |= perf_misc_flags(regs);
  3562. __perf_event_header__init_id(header, data, event);
  3563. if (sample_type & PERF_SAMPLE_IP)
  3564. data->ip = perf_instruction_pointer(regs);
  3565. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  3566. int size = 1;
  3567. data->callchain = perf_callchain(regs);
  3568. if (data->callchain)
  3569. size += data->callchain->nr;
  3570. header->size += size * sizeof(u64);
  3571. }
  3572. if (sample_type & PERF_SAMPLE_RAW) {
  3573. int size = sizeof(u32);
  3574. if (data->raw)
  3575. size += data->raw->size;
  3576. else
  3577. size += sizeof(u32);
  3578. WARN_ON_ONCE(size & (sizeof(u64)-1));
  3579. header->size += size;
  3580. }
  3581. }
  3582. static void perf_event_output(struct perf_event *event, int nmi,
  3583. struct perf_sample_data *data,
  3584. struct pt_regs *regs)
  3585. {
  3586. struct perf_output_handle handle;
  3587. struct perf_event_header header;
  3588. /* protect the callchain buffers */
  3589. rcu_read_lock();
  3590. perf_prepare_sample(&header, data, event, regs);
  3591. if (perf_output_begin(&handle, event, header.size, nmi, 1))
  3592. goto exit;
  3593. perf_output_sample(&handle, &header, data, event);
  3594. perf_output_end(&handle);
  3595. exit:
  3596. rcu_read_unlock();
  3597. }
  3598. /*
  3599. * read event_id
  3600. */
  3601. struct perf_read_event {
  3602. struct perf_event_header header;
  3603. u32 pid;
  3604. u32 tid;
  3605. };
  3606. static void
  3607. perf_event_read_event(struct perf_event *event,
  3608. struct task_struct *task)
  3609. {
  3610. struct perf_output_handle handle;
  3611. struct perf_sample_data sample;
  3612. struct perf_read_event read_event = {
  3613. .header = {
  3614. .type = PERF_RECORD_READ,
  3615. .misc = 0,
  3616. .size = sizeof(read_event) + event->read_size,
  3617. },
  3618. .pid = perf_event_pid(event, task),
  3619. .tid = perf_event_tid(event, task),
  3620. };
  3621. int ret;
  3622. perf_event_header__init_id(&read_event.header, &sample, event);
  3623. ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
  3624. if (ret)
  3625. return;
  3626. perf_output_put(&handle, read_event);
  3627. perf_output_read(&handle, event);
  3628. perf_event__output_id_sample(event, &handle, &sample);
  3629. perf_output_end(&handle);
  3630. }
  3631. /*
  3632. * task tracking -- fork/exit
  3633. *
  3634. * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
  3635. */
  3636. struct perf_task_event {
  3637. struct task_struct *task;
  3638. struct perf_event_context *task_ctx;
  3639. struct {
  3640. struct perf_event_header header;
  3641. u32 pid;
  3642. u32 ppid;
  3643. u32 tid;
  3644. u32 ptid;
  3645. u64 time;
  3646. } event_id;
  3647. };
  3648. static void perf_event_task_output(struct perf_event *event,
  3649. struct perf_task_event *task_event)
  3650. {
  3651. struct perf_output_handle handle;
  3652. struct perf_sample_data sample;
  3653. struct task_struct *task = task_event->task;
  3654. int ret, size = task_event->event_id.header.size;
  3655. perf_event_header__init_id(&task_event->event_id.header, &sample, event);
  3656. ret = perf_output_begin(&handle, event,
  3657. task_event->event_id.header.size, 0, 0);
  3658. if (ret)
  3659. goto out;
  3660. task_event->event_id.pid = perf_event_pid(event, task);
  3661. task_event->event_id.ppid = perf_event_pid(event, current);
  3662. task_event->event_id.tid = perf_event_tid(event, task);
  3663. task_event->event_id.ptid = perf_event_tid(event, current);
  3664. perf_output_put(&handle, task_event->event_id);
  3665. perf_event__output_id_sample(event, &handle, &sample);
  3666. perf_output_end(&handle);
  3667. out:
  3668. task_event->event_id.header.size = size;
  3669. }
  3670. static int perf_event_task_match(struct perf_event *event)
  3671. {
  3672. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3673. return 0;
  3674. if (!event_filter_match(event))
  3675. return 0;
  3676. if (event->attr.comm || event->attr.mmap ||
  3677. event->attr.mmap_data || event->attr.task)
  3678. return 1;
  3679. return 0;
  3680. }
  3681. static void perf_event_task_ctx(struct perf_event_context *ctx,
  3682. struct perf_task_event *task_event)
  3683. {
  3684. struct perf_event *event;
  3685. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3686. if (perf_event_task_match(event))
  3687. perf_event_task_output(event, task_event);
  3688. }
  3689. }
  3690. static void perf_event_task_event(struct perf_task_event *task_event)
  3691. {
  3692. struct perf_cpu_context *cpuctx;
  3693. struct perf_event_context *ctx;
  3694. struct pmu *pmu;
  3695. int ctxn;
  3696. rcu_read_lock();
  3697. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3698. cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
  3699. if (cpuctx->active_pmu != pmu)
  3700. goto next;
  3701. perf_event_task_ctx(&cpuctx->ctx, task_event);
  3702. ctx = task_event->task_ctx;
  3703. if (!ctx) {
  3704. ctxn = pmu->task_ctx_nr;
  3705. if (ctxn < 0)
  3706. goto next;
  3707. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3708. }
  3709. if (ctx)
  3710. perf_event_task_ctx(ctx, task_event);
  3711. next:
  3712. put_cpu_ptr(pmu->pmu_cpu_context);
  3713. }
  3714. rcu_read_unlock();
  3715. }
  3716. static void perf_event_task(struct task_struct *task,
  3717. struct perf_event_context *task_ctx,
  3718. int new)
  3719. {
  3720. struct perf_task_event task_event;
  3721. if (!atomic_read(&nr_comm_events) &&
  3722. !atomic_read(&nr_mmap_events) &&
  3723. !atomic_read(&nr_task_events))
  3724. return;
  3725. task_event = (struct perf_task_event){
  3726. .task = task,
  3727. .task_ctx = task_ctx,
  3728. .event_id = {
  3729. .header = {
  3730. .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
  3731. .misc = 0,
  3732. .size = sizeof(task_event.event_id),
  3733. },
  3734. /* .pid */
  3735. /* .ppid */
  3736. /* .tid */
  3737. /* .ptid */
  3738. .time = perf_clock(),
  3739. },
  3740. };
  3741. perf_event_task_event(&task_event);
  3742. }
  3743. void perf_event_fork(struct task_struct *task)
  3744. {
  3745. perf_event_task(task, NULL, 1);
  3746. }
  3747. /*
  3748. * comm tracking
  3749. */
  3750. struct perf_comm_event {
  3751. struct task_struct *task;
  3752. char *comm;
  3753. int comm_size;
  3754. struct {
  3755. struct perf_event_header header;
  3756. u32 pid;
  3757. u32 tid;
  3758. } event_id;
  3759. };
  3760. static void perf_event_comm_output(struct perf_event *event,
  3761. struct perf_comm_event *comm_event)
  3762. {
  3763. struct perf_output_handle handle;
  3764. struct perf_sample_data sample;
  3765. int size = comm_event->event_id.header.size;
  3766. int ret;
  3767. perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
  3768. ret = perf_output_begin(&handle, event,
  3769. comm_event->event_id.header.size, 0, 0);
  3770. if (ret)
  3771. goto out;
  3772. comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
  3773. comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
  3774. perf_output_put(&handle, comm_event->event_id);
  3775. perf_output_copy(&handle, comm_event->comm,
  3776. comm_event->comm_size);
  3777. perf_event__output_id_sample(event, &handle, &sample);
  3778. perf_output_end(&handle);
  3779. out:
  3780. comm_event->event_id.header.size = size;
  3781. }
  3782. static int perf_event_comm_match(struct perf_event *event)
  3783. {
  3784. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3785. return 0;
  3786. if (!event_filter_match(event))
  3787. return 0;
  3788. if (event->attr.comm)
  3789. return 1;
  3790. return 0;
  3791. }
  3792. static void perf_event_comm_ctx(struct perf_event_context *ctx,
  3793. struct perf_comm_event *comm_event)
  3794. {
  3795. struct perf_event *event;
  3796. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3797. if (perf_event_comm_match(event))
  3798. perf_event_comm_output(event, comm_event);
  3799. }
  3800. }
  3801. static void perf_event_comm_event(struct perf_comm_event *comm_event)
  3802. {
  3803. struct perf_cpu_context *cpuctx;
  3804. struct perf_event_context *ctx;
  3805. char comm[TASK_COMM_LEN];
  3806. unsigned int size;
  3807. struct pmu *pmu;
  3808. int ctxn;
  3809. memset(comm, 0, sizeof(comm));
  3810. strlcpy(comm, comm_event->task->comm, sizeof(comm));
  3811. size = ALIGN(strlen(comm)+1, sizeof(u64));
  3812. comm_event->comm = comm;
  3813. comm_event->comm_size = size;
  3814. comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  3815. rcu_read_lock();
  3816. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3817. cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
  3818. if (cpuctx->active_pmu != pmu)
  3819. goto next;
  3820. perf_event_comm_ctx(&cpuctx->ctx, comm_event);
  3821. ctxn = pmu->task_ctx_nr;
  3822. if (ctxn < 0)
  3823. goto next;
  3824. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3825. if (ctx)
  3826. perf_event_comm_ctx(ctx, comm_event);
  3827. next:
  3828. put_cpu_ptr(pmu->pmu_cpu_context);
  3829. }
  3830. rcu_read_unlock();
  3831. }
  3832. void perf_event_comm(struct task_struct *task)
  3833. {
  3834. struct perf_comm_event comm_event;
  3835. struct perf_event_context *ctx;
  3836. int ctxn;
  3837. for_each_task_context_nr(ctxn) {
  3838. ctx = task->perf_event_ctxp[ctxn];
  3839. if (!ctx)
  3840. continue;
  3841. perf_event_enable_on_exec(ctx);
  3842. }
  3843. if (!atomic_read(&nr_comm_events))
  3844. return;
  3845. comm_event = (struct perf_comm_event){
  3846. .task = task,
  3847. /* .comm */
  3848. /* .comm_size */
  3849. .event_id = {
  3850. .header = {
  3851. .type = PERF_RECORD_COMM,
  3852. .misc = 0,
  3853. /* .size */
  3854. },
  3855. /* .pid */
  3856. /* .tid */
  3857. },
  3858. };
  3859. perf_event_comm_event(&comm_event);
  3860. }
  3861. /*
  3862. * mmap tracking
  3863. */
  3864. struct perf_mmap_event {
  3865. struct vm_area_struct *vma;
  3866. const char *file_name;
  3867. int file_size;
  3868. struct {
  3869. struct perf_event_header header;
  3870. u32 pid;
  3871. u32 tid;
  3872. u64 start;
  3873. u64 len;
  3874. u64 pgoff;
  3875. } event_id;
  3876. };
  3877. static void perf_event_mmap_output(struct perf_event *event,
  3878. struct perf_mmap_event *mmap_event)
  3879. {
  3880. struct perf_output_handle handle;
  3881. struct perf_sample_data sample;
  3882. int size = mmap_event->event_id.header.size;
  3883. int ret;
  3884. perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
  3885. ret = perf_output_begin(&handle, event,
  3886. mmap_event->event_id.header.size, 0, 0);
  3887. if (ret)
  3888. goto out;
  3889. mmap_event->event_id.pid = perf_event_pid(event, current);
  3890. mmap_event->event_id.tid = perf_event_tid(event, current);
  3891. perf_output_put(&handle, mmap_event->event_id);
  3892. perf_output_copy(&handle, mmap_event->file_name,
  3893. mmap_event->file_size);
  3894. perf_event__output_id_sample(event, &handle, &sample);
  3895. perf_output_end(&handle);
  3896. out:
  3897. mmap_event->event_id.header.size = size;
  3898. }
  3899. static int perf_event_mmap_match(struct perf_event *event,
  3900. struct perf_mmap_event *mmap_event,
  3901. int executable)
  3902. {
  3903. if (event->state < PERF_EVENT_STATE_INACTIVE)
  3904. return 0;
  3905. if (!event_filter_match(event))
  3906. return 0;
  3907. if ((!executable && event->attr.mmap_data) ||
  3908. (executable && event->attr.mmap))
  3909. return 1;
  3910. return 0;
  3911. }
  3912. static void perf_event_mmap_ctx(struct perf_event_context *ctx,
  3913. struct perf_mmap_event *mmap_event,
  3914. int executable)
  3915. {
  3916. struct perf_event *event;
  3917. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3918. if (perf_event_mmap_match(event, mmap_event, executable))
  3919. perf_event_mmap_output(event, mmap_event);
  3920. }
  3921. }
  3922. static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
  3923. {
  3924. struct perf_cpu_context *cpuctx;
  3925. struct perf_event_context *ctx;
  3926. struct vm_area_struct *vma = mmap_event->vma;
  3927. struct file *file = vma->vm_file;
  3928. unsigned int size;
  3929. char tmp[16];
  3930. char *buf = NULL;
  3931. const char *name;
  3932. struct pmu *pmu;
  3933. int ctxn;
  3934. memset(tmp, 0, sizeof(tmp));
  3935. if (file) {
  3936. /*
  3937. * d_path works from the end of the buffer backwards, so we
  3938. * need to add enough zero bytes after the string to handle
  3939. * the 64bit alignment we do later.
  3940. */
  3941. buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
  3942. if (!buf) {
  3943. name = strncpy(tmp, "//enomem", sizeof(tmp));
  3944. goto got_name;
  3945. }
  3946. name = d_path(&file->f_path, buf, PATH_MAX);
  3947. if (IS_ERR(name)) {
  3948. name = strncpy(tmp, "//toolong", sizeof(tmp));
  3949. goto got_name;
  3950. }
  3951. } else {
  3952. if (arch_vma_name(mmap_event->vma)) {
  3953. name = strncpy(tmp, arch_vma_name(mmap_event->vma),
  3954. sizeof(tmp));
  3955. goto got_name;
  3956. }
  3957. if (!vma->vm_mm) {
  3958. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  3959. goto got_name;
  3960. } else if (vma->vm_start <= vma->vm_mm->start_brk &&
  3961. vma->vm_end >= vma->vm_mm->brk) {
  3962. name = strncpy(tmp, "[heap]", sizeof(tmp));
  3963. goto got_name;
  3964. } else if (vma->vm_start <= vma->vm_mm->start_stack &&
  3965. vma->vm_end >= vma->vm_mm->start_stack) {
  3966. name = strncpy(tmp, "[stack]", sizeof(tmp));
  3967. goto got_name;
  3968. }
  3969. name = strncpy(tmp, "//anon", sizeof(tmp));
  3970. goto got_name;
  3971. }
  3972. got_name:
  3973. size = ALIGN(strlen(name)+1, sizeof(u64));
  3974. mmap_event->file_name = name;
  3975. mmap_event->file_size = size;
  3976. mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  3977. rcu_read_lock();
  3978. list_for_each_entry_rcu(pmu, &pmus, entry) {
  3979. cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
  3980. if (cpuctx->active_pmu != pmu)
  3981. goto next;
  3982. perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
  3983. vma->vm_flags & VM_EXEC);
  3984. ctxn = pmu->task_ctx_nr;
  3985. if (ctxn < 0)
  3986. goto next;
  3987. ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
  3988. if (ctx) {
  3989. perf_event_mmap_ctx(ctx, mmap_event,
  3990. vma->vm_flags & VM_EXEC);
  3991. }
  3992. next:
  3993. put_cpu_ptr(pmu->pmu_cpu_context);
  3994. }
  3995. rcu_read_unlock();
  3996. kfree(buf);
  3997. }
  3998. void perf_event_mmap(struct vm_area_struct *vma)
  3999. {
  4000. struct perf_mmap_event mmap_event;
  4001. if (!atomic_read(&nr_mmap_events))
  4002. return;
  4003. mmap_event = (struct perf_mmap_event){
  4004. .vma = vma,
  4005. /* .file_name */
  4006. /* .file_size */
  4007. .event_id = {
  4008. .header = {
  4009. .type = PERF_RECORD_MMAP,
  4010. .misc = PERF_RECORD_MISC_USER,
  4011. /* .size */
  4012. },
  4013. /* .pid */
  4014. /* .tid */
  4015. .start = vma->vm_start,
  4016. .len = vma->vm_end - vma->vm_start,
  4017. .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
  4018. },
  4019. };
  4020. perf_event_mmap_event(&mmap_event);
  4021. }
  4022. /*
  4023. * IRQ throttle logging
  4024. */
  4025. static void perf_log_throttle(struct perf_event *event, int enable)
  4026. {
  4027. struct perf_output_handle handle;
  4028. struct perf_sample_data sample;
  4029. int ret;
  4030. struct {
  4031. struct perf_event_header header;
  4032. u64 time;
  4033. u64 id;
  4034. u64 stream_id;
  4035. } throttle_event = {
  4036. .header = {
  4037. .type = PERF_RECORD_THROTTLE,
  4038. .misc = 0,
  4039. .size = sizeof(throttle_event),
  4040. },
  4041. .time = perf_clock(),
  4042. .id = primary_event_id(event),
  4043. .stream_id = event->id,
  4044. };
  4045. if (enable)
  4046. throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
  4047. perf_event_header__init_id(&throttle_event.header, &sample, event);
  4048. ret = perf_output_begin(&handle, event,
  4049. throttle_event.header.size, 1, 0);
  4050. if (ret)
  4051. return;
  4052. perf_output_put(&handle, throttle_event);
  4053. perf_event__output_id_sample(event, &handle, &sample);
  4054. perf_output_end(&handle);
  4055. }
  4056. /*
  4057. * Generic event overflow handling, sampling.
  4058. */
  4059. static int __perf_event_overflow(struct perf_event *event, int nmi,
  4060. int throttle, struct perf_sample_data *data,
  4061. struct pt_regs *regs)
  4062. {
  4063. int events = atomic_read(&event->event_limit);
  4064. struct hw_perf_event *hwc = &event->hw;
  4065. int ret = 0;
  4066. /*
  4067. * Non-sampling counters might still use the PMI to fold short
  4068. * hardware counters, ignore those.
  4069. */
  4070. if (unlikely(!is_sampling_event(event)))
  4071. return 0;
  4072. if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
  4073. if (throttle) {
  4074. hwc->interrupts = MAX_INTERRUPTS;
  4075. perf_log_throttle(event, 0);
  4076. ret = 1;
  4077. }
  4078. } else
  4079. hwc->interrupts++;
  4080. if (event->attr.freq) {
  4081. u64 now = perf_clock();
  4082. s64 delta = now - hwc->freq_time_stamp;
  4083. hwc->freq_time_stamp = now;
  4084. if (delta > 0 && delta < 2*TICK_NSEC)
  4085. perf_adjust_period(event, delta, hwc->last_period);
  4086. }
  4087. /*
  4088. * XXX event_limit might not quite work as expected on inherited
  4089. * events
  4090. */
  4091. event->pending_kill = POLL_IN;
  4092. if (events && atomic_dec_and_test(&event->event_limit)) {
  4093. ret = 1;
  4094. event->pending_kill = POLL_HUP;
  4095. if (nmi) {
  4096. event->pending_disable = 1;
  4097. irq_work_queue(&event->pending);
  4098. } else
  4099. perf_event_disable(event);
  4100. }
  4101. if (event->overflow_handler)
  4102. event->overflow_handler(event, nmi, data, regs);
  4103. else
  4104. perf_event_output(event, nmi, data, regs);
  4105. if (event->fasync && event->pending_kill) {
  4106. if (nmi) {
  4107. event->pending_wakeup = 1;
  4108. irq_work_queue(&event->pending);
  4109. } else
  4110. perf_event_wakeup(event);
  4111. }
  4112. return ret;
  4113. }
  4114. int perf_event_overflow(struct perf_event *event, int nmi,
  4115. struct perf_sample_data *data,
  4116. struct pt_regs *regs)
  4117. {
  4118. return __perf_event_overflow(event, nmi, 1, data, regs);
  4119. }
  4120. /*
  4121. * Generic software event infrastructure
  4122. */
  4123. struct swevent_htable {
  4124. struct swevent_hlist *swevent_hlist;
  4125. struct mutex hlist_mutex;
  4126. int hlist_refcount;
  4127. /* Recursion avoidance in each contexts */
  4128. int recursion[PERF_NR_CONTEXTS];
  4129. };
  4130. static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
  4131. /*
  4132. * We directly increment event->count and keep a second value in
  4133. * event->hw.period_left to count intervals. This period event
  4134. * is kept in the range [-sample_period, 0] so that we can use the
  4135. * sign as trigger.
  4136. */
  4137. static u64 perf_swevent_set_period(struct perf_event *event)
  4138. {
  4139. struct hw_perf_event *hwc = &event->hw;
  4140. u64 period = hwc->last_period;
  4141. u64 nr, offset;
  4142. s64 old, val;
  4143. hwc->last_period = hwc->sample_period;
  4144. again:
  4145. old = val = local64_read(&hwc->period_left);
  4146. if (val < 0)
  4147. return 0;
  4148. nr = div64_u64(period + val, period);
  4149. offset = nr * period;
  4150. val -= offset;
  4151. if (local64_cmpxchg(&hwc->period_left, old, val) != old)
  4152. goto again;
  4153. return nr;
  4154. }
  4155. static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
  4156. int nmi, struct perf_sample_data *data,
  4157. struct pt_regs *regs)
  4158. {
  4159. struct hw_perf_event *hwc = &event->hw;
  4160. int throttle = 0;
  4161. data->period = event->hw.last_period;
  4162. if (!overflow)
  4163. overflow = perf_swevent_set_period(event);
  4164. if (hwc->interrupts == MAX_INTERRUPTS)
  4165. return;
  4166. for (; overflow; overflow--) {
  4167. if (__perf_event_overflow(event, nmi, throttle,
  4168. data, regs)) {
  4169. /*
  4170. * We inhibit the overflow from happening when
  4171. * hwc->interrupts == MAX_INTERRUPTS.
  4172. */
  4173. break;
  4174. }
  4175. throttle = 1;
  4176. }
  4177. }
  4178. static void perf_swevent_event(struct perf_event *event, u64 nr,
  4179. int nmi, struct perf_sample_data *data,
  4180. struct pt_regs *regs)
  4181. {
  4182. struct hw_perf_event *hwc = &event->hw;
  4183. local64_add(nr, &event->count);
  4184. if (!regs)
  4185. return;
  4186. if (!is_sampling_event(event))
  4187. return;
  4188. if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
  4189. return perf_swevent_overflow(event, 1, nmi, data, regs);
  4190. if (local64_add_negative(nr, &hwc->period_left))
  4191. return;
  4192. perf_swevent_overflow(event, 0, nmi, data, regs);
  4193. }
  4194. static int perf_exclude_event(struct perf_event *event,
  4195. struct pt_regs *regs)
  4196. {
  4197. if (event->hw.state & PERF_HES_STOPPED)
  4198. return 1;
  4199. if (regs) {
  4200. if (event->attr.exclude_user && user_mode(regs))
  4201. return 1;
  4202. if (event->attr.exclude_kernel && !user_mode(regs))
  4203. return 1;
  4204. }
  4205. return 0;
  4206. }
  4207. static int perf_swevent_match(struct perf_event *event,
  4208. enum perf_type_id type,
  4209. u32 event_id,
  4210. struct perf_sample_data *data,
  4211. struct pt_regs *regs)
  4212. {
  4213. if (event->attr.type != type)
  4214. return 0;
  4215. if (event->attr.config != event_id)
  4216. return 0;
  4217. if (perf_exclude_event(event, regs))
  4218. return 0;
  4219. return 1;
  4220. }
  4221. static inline u64 swevent_hash(u64 type, u32 event_id)
  4222. {
  4223. u64 val = event_id | (type << 32);
  4224. return hash_64(val, SWEVENT_HLIST_BITS);
  4225. }
  4226. static inline struct hlist_head *
  4227. __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
  4228. {
  4229. u64 hash = swevent_hash(type, event_id);
  4230. return &hlist->heads[hash];
  4231. }
  4232. /* For the read side: events when they trigger */
  4233. static inline struct hlist_head *
  4234. find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
  4235. {
  4236. struct swevent_hlist *hlist;
  4237. hlist = rcu_dereference(swhash->swevent_hlist);
  4238. if (!hlist)
  4239. return NULL;
  4240. return __find_swevent_head(hlist, type, event_id);
  4241. }
  4242. /* For the event head insertion and removal in the hlist */
  4243. static inline struct hlist_head *
  4244. find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
  4245. {
  4246. struct swevent_hlist *hlist;
  4247. u32 event_id = event->attr.config;
  4248. u64 type = event->attr.type;
  4249. /*
  4250. * Event scheduling is always serialized against hlist allocation
  4251. * and release. Which makes the protected version suitable here.
  4252. * The context lock guarantees that.
  4253. */
  4254. hlist = rcu_dereference_protected(swhash->swevent_hlist,
  4255. lockdep_is_held(&event->ctx->lock));
  4256. if (!hlist)
  4257. return NULL;
  4258. return __find_swevent_head(hlist, type, event_id);
  4259. }
  4260. static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
  4261. u64 nr, int nmi,
  4262. struct perf_sample_data *data,
  4263. struct pt_regs *regs)
  4264. {
  4265. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  4266. struct perf_event *event;
  4267. struct hlist_node *node;
  4268. struct hlist_head *head;
  4269. rcu_read_lock();
  4270. head = find_swevent_head_rcu(swhash, type, event_id);
  4271. if (!head)
  4272. goto end;
  4273. hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  4274. if (perf_swevent_match(event, type, event_id, data, regs))
  4275. perf_swevent_event(event, nr, nmi, data, regs);
  4276. }
  4277. end:
  4278. rcu_read_unlock();
  4279. }
  4280. int perf_swevent_get_recursion_context(void)
  4281. {
  4282. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  4283. return get_recursion_context(swhash->recursion);
  4284. }
  4285. EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  4286. inline void perf_swevent_put_recursion_context(int rctx)
  4287. {
  4288. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  4289. put_recursion_context(swhash->recursion, rctx);
  4290. }
  4291. void __perf_sw_event(u32 event_id, u64 nr, int nmi,
  4292. struct pt_regs *regs, u64 addr)
  4293. {
  4294. struct perf_sample_data data;
  4295. int rctx;
  4296. preempt_disable_notrace();
  4297. rctx = perf_swevent_get_recursion_context();
  4298. if (rctx < 0)
  4299. return;
  4300. perf_sample_data_init(&data, addr);
  4301. do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
  4302. perf_swevent_put_recursion_context(rctx);
  4303. preempt_enable_notrace();
  4304. }
  4305. static void perf_swevent_read(struct perf_event *event)
  4306. {
  4307. }
  4308. static int perf_swevent_add(struct perf_event *event, int flags)
  4309. {
  4310. struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  4311. struct hw_perf_event *hwc = &event->hw;
  4312. struct hlist_head *head;
  4313. if (is_sampling_event(event)) {
  4314. hwc->last_period = hwc->sample_period;
  4315. perf_swevent_set_period(event);
  4316. }
  4317. hwc->state = !(flags & PERF_EF_START);
  4318. head = find_swevent_head(swhash, event);
  4319. if (WARN_ON_ONCE(!head))
  4320. return -EINVAL;
  4321. hlist_add_head_rcu(&event->hlist_entry, head);
  4322. return 0;
  4323. }
  4324. static void perf_swevent_del(struct perf_event *event, int flags)
  4325. {
  4326. hlist_del_rcu(&event->hlist_entry);
  4327. }
  4328. static void perf_swevent_start(struct perf_event *event, int flags)
  4329. {
  4330. event->hw.state = 0;
  4331. }
  4332. static void perf_swevent_stop(struct perf_event *event, int flags)
  4333. {
  4334. event->hw.state = PERF_HES_STOPPED;
  4335. }
  4336. /* Deref the hlist from the update side */
  4337. static inline struct swevent_hlist *
  4338. swevent_hlist_deref(struct swevent_htable *swhash)
  4339. {
  4340. return rcu_dereference_protected(swhash->swevent_hlist,
  4341. lockdep_is_held(&swhash->hlist_mutex));
  4342. }
  4343. static void swevent_hlist_release(struct swevent_htable *swhash)
  4344. {
  4345. struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
  4346. if (!hlist)
  4347. return;
  4348. rcu_assign_pointer(swhash->swevent_hlist, NULL);
  4349. kfree_rcu(hlist, rcu_head);
  4350. }
  4351. static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
  4352. {
  4353. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  4354. mutex_lock(&swhash->hlist_mutex);
  4355. if (!--swhash->hlist_refcount)
  4356. swevent_hlist_release(swhash);
  4357. mutex_unlock(&swhash->hlist_mutex);
  4358. }
  4359. static void swevent_hlist_put(struct perf_event *event)
  4360. {
  4361. int cpu;
  4362. if (event->cpu != -1) {
  4363. swevent_hlist_put_cpu(event, event->cpu);
  4364. return;
  4365. }
  4366. for_each_possible_cpu(cpu)
  4367. swevent_hlist_put_cpu(event, cpu);
  4368. }
  4369. static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
  4370. {
  4371. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  4372. int err = 0;
  4373. mutex_lock(&swhash->hlist_mutex);
  4374. if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
  4375. struct swevent_hlist *hlist;
  4376. hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
  4377. if (!hlist) {
  4378. err = -ENOMEM;
  4379. goto exit;
  4380. }
  4381. rcu_assign_pointer(swhash->swevent_hlist, hlist);
  4382. }
  4383. swhash->hlist_refcount++;
  4384. exit:
  4385. mutex_unlock(&swhash->hlist_mutex);
  4386. return err;
  4387. }
  4388. static int swevent_hlist_get(struct perf_event *event)
  4389. {
  4390. int err;
  4391. int cpu, failed_cpu;
  4392. if (event->cpu != -1)
  4393. return swevent_hlist_get_cpu(event, event->cpu);
  4394. get_online_cpus();
  4395. for_each_possible_cpu(cpu) {
  4396. err = swevent_hlist_get_cpu(event, cpu);
  4397. if (err) {
  4398. failed_cpu = cpu;
  4399. goto fail;
  4400. }
  4401. }
  4402. put_online_cpus();
  4403. return 0;
  4404. fail:
  4405. for_each_possible_cpu(cpu) {
  4406. if (cpu == failed_cpu)
  4407. break;
  4408. swevent_hlist_put_cpu(event, cpu);
  4409. }
  4410. put_online_cpus();
  4411. return err;
  4412. }
  4413. struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  4414. static void sw_perf_event_destroy(struct perf_event *event)
  4415. {
  4416. u64 event_id = event->attr.config;
  4417. WARN_ON(event->parent);
  4418. jump_label_dec(&perf_swevent_enabled[event_id]);
  4419. swevent_hlist_put(event);
  4420. }
  4421. static int perf_swevent_init(struct perf_event *event)
  4422. {
  4423. int event_id = event->attr.config;
  4424. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4425. return -ENOENT;
  4426. switch (event_id) {
  4427. case PERF_COUNT_SW_CPU_CLOCK:
  4428. case PERF_COUNT_SW_TASK_CLOCK:
  4429. return -ENOENT;
  4430. default:
  4431. break;
  4432. }
  4433. if (event_id >= PERF_COUNT_SW_MAX)
  4434. return -ENOENT;
  4435. if (!event->parent) {
  4436. int err;
  4437. err = swevent_hlist_get(event);
  4438. if (err)
  4439. return err;
  4440. jump_label_inc(&perf_swevent_enabled[event_id]);
  4441. event->destroy = sw_perf_event_destroy;
  4442. }
  4443. return 0;
  4444. }
  4445. static struct pmu perf_swevent = {
  4446. .task_ctx_nr = perf_sw_context,
  4447. .event_init = perf_swevent_init,
  4448. .add = perf_swevent_add,
  4449. .del = perf_swevent_del,
  4450. .start = perf_swevent_start,
  4451. .stop = perf_swevent_stop,
  4452. .read = perf_swevent_read,
  4453. };
  4454. #ifdef CONFIG_EVENT_TRACING
  4455. static int perf_tp_filter_match(struct perf_event *event,
  4456. struct perf_sample_data *data)
  4457. {
  4458. void *record = data->raw->data;
  4459. if (likely(!event->filter) || filter_match_preds(event->filter, record))
  4460. return 1;
  4461. return 0;
  4462. }
  4463. static int perf_tp_event_match(struct perf_event *event,
  4464. struct perf_sample_data *data,
  4465. struct pt_regs *regs)
  4466. {
  4467. if (event->hw.state & PERF_HES_STOPPED)
  4468. return 0;
  4469. /*
  4470. * All tracepoints are from kernel-space.
  4471. */
  4472. if (event->attr.exclude_kernel)
  4473. return 0;
  4474. if (!perf_tp_filter_match(event, data))
  4475. return 0;
  4476. return 1;
  4477. }
  4478. void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
  4479. struct pt_regs *regs, struct hlist_head *head, int rctx)
  4480. {
  4481. struct perf_sample_data data;
  4482. struct perf_event *event;
  4483. struct hlist_node *node;
  4484. struct perf_raw_record raw = {
  4485. .size = entry_size,
  4486. .data = record,
  4487. };
  4488. perf_sample_data_init(&data, addr);
  4489. data.raw = &raw;
  4490. hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
  4491. if (perf_tp_event_match(event, &data, regs))
  4492. perf_swevent_event(event, count, 1, &data, regs);
  4493. }
  4494. perf_swevent_put_recursion_context(rctx);
  4495. }
  4496. EXPORT_SYMBOL_GPL(perf_tp_event);
  4497. static void tp_perf_event_destroy(struct perf_event *event)
  4498. {
  4499. perf_trace_destroy(event);
  4500. }
  4501. static int perf_tp_event_init(struct perf_event *event)
  4502. {
  4503. int err;
  4504. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  4505. return -ENOENT;
  4506. err = perf_trace_init(event);
  4507. if (err)
  4508. return err;
  4509. event->destroy = tp_perf_event_destroy;
  4510. return 0;
  4511. }
  4512. static struct pmu perf_tracepoint = {
  4513. .task_ctx_nr = perf_sw_context,
  4514. .event_init = perf_tp_event_init,
  4515. .add = perf_trace_add,
  4516. .del = perf_trace_del,
  4517. .start = perf_swevent_start,
  4518. .stop = perf_swevent_stop,
  4519. .read = perf_swevent_read,
  4520. };
  4521. static inline void perf_tp_register(void)
  4522. {
  4523. perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
  4524. }
  4525. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  4526. {
  4527. char *filter_str;
  4528. int ret;
  4529. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  4530. return -EINVAL;
  4531. filter_str = strndup_user(arg, PAGE_SIZE);
  4532. if (IS_ERR(filter_str))
  4533. return PTR_ERR(filter_str);
  4534. ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
  4535. kfree(filter_str);
  4536. return ret;
  4537. }
  4538. static void perf_event_free_filter(struct perf_event *event)
  4539. {
  4540. ftrace_profile_free_filter(event);
  4541. }
  4542. #else
  4543. static inline void perf_tp_register(void)
  4544. {
  4545. }
  4546. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  4547. {
  4548. return -ENOENT;
  4549. }
  4550. static void perf_event_free_filter(struct perf_event *event)
  4551. {
  4552. }
  4553. #endif /* CONFIG_EVENT_TRACING */
  4554. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  4555. void perf_bp_event(struct perf_event *bp, void *data)
  4556. {
  4557. struct perf_sample_data sample;
  4558. struct pt_regs *regs = data;
  4559. perf_sample_data_init(&sample, bp->attr.bp_addr);
  4560. if (!bp->hw.state && !perf_exclude_event(bp, regs))
  4561. perf_swevent_event(bp, 1, 1, &sample, regs);
  4562. }
  4563. #endif
  4564. /*
  4565. * hrtimer based swevent callback
  4566. */
  4567. static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  4568. {
  4569. enum hrtimer_restart ret = HRTIMER_RESTART;
  4570. struct perf_sample_data data;
  4571. struct pt_regs *regs;
  4572. struct perf_event *event;
  4573. u64 period;
  4574. event = container_of(hrtimer, struct perf_event, hw.hrtimer);
  4575. if (event->state != PERF_EVENT_STATE_ACTIVE)
  4576. return HRTIMER_NORESTART;
  4577. event->pmu->read(event);
  4578. perf_sample_data_init(&data, 0);
  4579. data.period = event->hw.last_period;
  4580. regs = get_irq_regs();
  4581. if (regs && !perf_exclude_event(event, regs)) {
  4582. if (!(event->attr.exclude_idle && current->pid == 0))
  4583. if (perf_event_overflow(event, 0, &data, regs))
  4584. ret = HRTIMER_NORESTART;
  4585. }
  4586. period = max_t(u64, 10000, event->hw.sample_period);
  4587. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  4588. return ret;
  4589. }
  4590. static void perf_swevent_start_hrtimer(struct perf_event *event)
  4591. {
  4592. struct hw_perf_event *hwc = &event->hw;
  4593. s64 period;
  4594. if (!is_sampling_event(event))
  4595. return;
  4596. period = local64_read(&hwc->period_left);
  4597. if (period) {
  4598. if (period < 0)
  4599. period = 10000;
  4600. local64_set(&hwc->period_left, 0);
  4601. } else {
  4602. period = max_t(u64, 10000, hwc->sample_period);
  4603. }
  4604. __hrtimer_start_range_ns(&hwc->hrtimer,
  4605. ns_to_ktime(period), 0,
  4606. HRTIMER_MODE_REL_PINNED, 0);
  4607. }
  4608. static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  4609. {
  4610. struct hw_perf_event *hwc = &event->hw;
  4611. if (is_sampling_event(event)) {
  4612. ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
  4613. local64_set(&hwc->period_left, ktime_to_ns(remaining));
  4614. hrtimer_cancel(&hwc->hrtimer);
  4615. }
  4616. }
  4617. static void perf_swevent_init_hrtimer(struct perf_event *event)
  4618. {
  4619. struct hw_perf_event *hwc = &event->hw;
  4620. if (!is_sampling_event(event))
  4621. return;
  4622. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  4623. hwc->hrtimer.function = perf_swevent_hrtimer;
  4624. /*
  4625. * Since hrtimers have a fixed rate, we can do a static freq->period
  4626. * mapping and avoid the whole period adjust feedback stuff.
  4627. */
  4628. if (event->attr.freq) {
  4629. long freq = event->attr.sample_freq;
  4630. event->attr.sample_period = NSEC_PER_SEC / freq;
  4631. hwc->sample_period = event->attr.sample_period;
  4632. local64_set(&hwc->period_left, hwc->sample_period);
  4633. event->attr.freq = 0;
  4634. }
  4635. }
  4636. /*
  4637. * Software event: cpu wall time clock
  4638. */
  4639. static void cpu_clock_event_update(struct perf_event *event)
  4640. {
  4641. s64 prev;
  4642. u64 now;
  4643. now = local_clock();
  4644. prev = local64_xchg(&event->hw.prev_count, now);
  4645. local64_add(now - prev, &event->count);
  4646. }
  4647. static void cpu_clock_event_start(struct perf_event *event, int flags)
  4648. {
  4649. local64_set(&event->hw.prev_count, local_clock());
  4650. perf_swevent_start_hrtimer(event);
  4651. }
  4652. static void cpu_clock_event_stop(struct perf_event *event, int flags)
  4653. {
  4654. perf_swevent_cancel_hrtimer(event);
  4655. cpu_clock_event_update(event);
  4656. }
  4657. static int cpu_clock_event_add(struct perf_event *event, int flags)
  4658. {
  4659. if (flags & PERF_EF_START)
  4660. cpu_clock_event_start(event, flags);
  4661. return 0;
  4662. }
  4663. static void cpu_clock_event_del(struct perf_event *event, int flags)
  4664. {
  4665. cpu_clock_event_stop(event, flags);
  4666. }
  4667. static void cpu_clock_event_read(struct perf_event *event)
  4668. {
  4669. cpu_clock_event_update(event);
  4670. }
  4671. static int cpu_clock_event_init(struct perf_event *event)
  4672. {
  4673. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4674. return -ENOENT;
  4675. if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
  4676. return -ENOENT;
  4677. perf_swevent_init_hrtimer(event);
  4678. return 0;
  4679. }
  4680. static struct pmu perf_cpu_clock = {
  4681. .task_ctx_nr = perf_sw_context,
  4682. .event_init = cpu_clock_event_init,
  4683. .add = cpu_clock_event_add,
  4684. .del = cpu_clock_event_del,
  4685. .start = cpu_clock_event_start,
  4686. .stop = cpu_clock_event_stop,
  4687. .read = cpu_clock_event_read,
  4688. };
  4689. /*
  4690. * Software event: task time clock
  4691. */
  4692. static void task_clock_event_update(struct perf_event *event, u64 now)
  4693. {
  4694. u64 prev;
  4695. s64 delta;
  4696. prev = local64_xchg(&event->hw.prev_count, now);
  4697. delta = now - prev;
  4698. local64_add(delta, &event->count);
  4699. }
  4700. static void task_clock_event_start(struct perf_event *event, int flags)
  4701. {
  4702. local64_set(&event->hw.prev_count, event->ctx->time);
  4703. perf_swevent_start_hrtimer(event);
  4704. }
  4705. static void task_clock_event_stop(struct perf_event *event, int flags)
  4706. {
  4707. perf_swevent_cancel_hrtimer(event);
  4708. task_clock_event_update(event, event->ctx->time);
  4709. }
  4710. static int task_clock_event_add(struct perf_event *event, int flags)
  4711. {
  4712. if (flags & PERF_EF_START)
  4713. task_clock_event_start(event, flags);
  4714. return 0;
  4715. }
  4716. static void task_clock_event_del(struct perf_event *event, int flags)
  4717. {
  4718. task_clock_event_stop(event, PERF_EF_UPDATE);
  4719. }
  4720. static void task_clock_event_read(struct perf_event *event)
  4721. {
  4722. u64 now = perf_clock();
  4723. u64 delta = now - event->ctx->timestamp;
  4724. u64 time = event->ctx->time + delta;
  4725. task_clock_event_update(event, time);
  4726. }
  4727. static int task_clock_event_init(struct perf_event *event)
  4728. {
  4729. if (event->attr.type != PERF_TYPE_SOFTWARE)
  4730. return -ENOENT;
  4731. if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
  4732. return -ENOENT;
  4733. perf_swevent_init_hrtimer(event);
  4734. return 0;
  4735. }
  4736. static struct pmu perf_task_clock = {
  4737. .task_ctx_nr = perf_sw_context,
  4738. .event_init = task_clock_event_init,
  4739. .add = task_clock_event_add,
  4740. .del = task_clock_event_del,
  4741. .start = task_clock_event_start,
  4742. .stop = task_clock_event_stop,
  4743. .read = task_clock_event_read,
  4744. };
  4745. static void perf_pmu_nop_void(struct pmu *pmu)
  4746. {
  4747. }
  4748. static int perf_pmu_nop_int(struct pmu *pmu)
  4749. {
  4750. return 0;
  4751. }
  4752. static void perf_pmu_start_txn(struct pmu *pmu)
  4753. {
  4754. perf_pmu_disable(pmu);
  4755. }
  4756. static int perf_pmu_commit_txn(struct pmu *pmu)
  4757. {
  4758. perf_pmu_enable(pmu);
  4759. return 0;
  4760. }
  4761. static void perf_pmu_cancel_txn(struct pmu *pmu)
  4762. {
  4763. perf_pmu_enable(pmu);
  4764. }
  4765. /*
  4766. * Ensures all contexts with the same task_ctx_nr have the same
  4767. * pmu_cpu_context too.
  4768. */
  4769. static void *find_pmu_context(int ctxn)
  4770. {
  4771. struct pmu *pmu;
  4772. if (ctxn < 0)
  4773. return NULL;
  4774. list_for_each_entry(pmu, &pmus, entry) {
  4775. if (pmu->task_ctx_nr == ctxn)
  4776. return pmu->pmu_cpu_context;
  4777. }
  4778. return NULL;
  4779. }
  4780. static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
  4781. {
  4782. int cpu;
  4783. for_each_possible_cpu(cpu) {
  4784. struct perf_cpu_context *cpuctx;
  4785. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  4786. if (cpuctx->active_pmu == old_pmu)
  4787. cpuctx->active_pmu = pmu;
  4788. }
  4789. }
  4790. static void free_pmu_context(struct pmu *pmu)
  4791. {
  4792. struct pmu *i;
  4793. mutex_lock(&pmus_lock);
  4794. /*
  4795. * Like a real lame refcount.
  4796. */
  4797. list_for_each_entry(i, &pmus, entry) {
  4798. if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
  4799. update_pmu_context(i, pmu);
  4800. goto out;
  4801. }
  4802. }
  4803. free_percpu(pmu->pmu_cpu_context);
  4804. out:
  4805. mutex_unlock(&pmus_lock);
  4806. }
  4807. static struct idr pmu_idr;
  4808. static ssize_t
  4809. type_show(struct device *dev, struct device_attribute *attr, char *page)
  4810. {
  4811. struct pmu *pmu = dev_get_drvdata(dev);
  4812. return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
  4813. }
  4814. static struct device_attribute pmu_dev_attrs[] = {
  4815. __ATTR_RO(type),
  4816. __ATTR_NULL,
  4817. };
  4818. static int pmu_bus_running;
  4819. static struct bus_type pmu_bus = {
  4820. .name = "event_source",
  4821. .dev_attrs = pmu_dev_attrs,
  4822. };
  4823. static void pmu_dev_release(struct device *dev)
  4824. {
  4825. kfree(dev);
  4826. }
  4827. static int pmu_dev_alloc(struct pmu *pmu)
  4828. {
  4829. int ret = -ENOMEM;
  4830. pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  4831. if (!pmu->dev)
  4832. goto out;
  4833. device_initialize(pmu->dev);
  4834. ret = dev_set_name(pmu->dev, "%s", pmu->name);
  4835. if (ret)
  4836. goto free_dev;
  4837. dev_set_drvdata(pmu->dev, pmu);
  4838. pmu->dev->bus = &pmu_bus;
  4839. pmu->dev->release = pmu_dev_release;
  4840. ret = device_add(pmu->dev);
  4841. if (ret)
  4842. goto free_dev;
  4843. out:
  4844. return ret;
  4845. free_dev:
  4846. put_device(pmu->dev);
  4847. goto out;
  4848. }
  4849. static struct lock_class_key cpuctx_mutex;
  4850. static struct lock_class_key cpuctx_lock;
  4851. int perf_pmu_register(struct pmu *pmu, char *name, int type)
  4852. {
  4853. int cpu, ret;
  4854. mutex_lock(&pmus_lock);
  4855. ret = -ENOMEM;
  4856. pmu->pmu_disable_count = alloc_percpu(int);
  4857. if (!pmu->pmu_disable_count)
  4858. goto unlock;
  4859. pmu->type = -1;
  4860. if (!name)
  4861. goto skip_type;
  4862. pmu->name = name;
  4863. if (type < 0) {
  4864. int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
  4865. if (!err)
  4866. goto free_pdc;
  4867. err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
  4868. if (err) {
  4869. ret = err;
  4870. goto free_pdc;
  4871. }
  4872. }
  4873. pmu->type = type;
  4874. if (pmu_bus_running) {
  4875. ret = pmu_dev_alloc(pmu);
  4876. if (ret)
  4877. goto free_idr;
  4878. }
  4879. skip_type:
  4880. pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
  4881. if (pmu->pmu_cpu_context)
  4882. goto got_cpu_context;
  4883. pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
  4884. if (!pmu->pmu_cpu_context)
  4885. goto free_dev;
  4886. for_each_possible_cpu(cpu) {
  4887. struct perf_cpu_context *cpuctx;
  4888. cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
  4889. __perf_event_init_context(&cpuctx->ctx);
  4890. lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
  4891. lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
  4892. cpuctx->ctx.type = cpu_context;
  4893. cpuctx->ctx.pmu = pmu;
  4894. cpuctx->jiffies_interval = 1;
  4895. INIT_LIST_HEAD(&cpuctx->rotation_list);
  4896. cpuctx->active_pmu = pmu;
  4897. }
  4898. got_cpu_context:
  4899. if (!pmu->start_txn) {
  4900. if (pmu->pmu_enable) {
  4901. /*
  4902. * If we have pmu_enable/pmu_disable calls, install
  4903. * transaction stubs that use that to try and batch
  4904. * hardware accesses.
  4905. */
  4906. pmu->start_txn = perf_pmu_start_txn;
  4907. pmu->commit_txn = perf_pmu_commit_txn;
  4908. pmu->cancel_txn = perf_pmu_cancel_txn;
  4909. } else {
  4910. pmu->start_txn = perf_pmu_nop_void;
  4911. pmu->commit_txn = perf_pmu_nop_int;
  4912. pmu->cancel_txn = perf_pmu_nop_void;
  4913. }
  4914. }
  4915. if (!pmu->pmu_enable) {
  4916. pmu->pmu_enable = perf_pmu_nop_void;
  4917. pmu->pmu_disable = perf_pmu_nop_void;
  4918. }
  4919. list_add_rcu(&pmu->entry, &pmus);
  4920. ret = 0;
  4921. unlock:
  4922. mutex_unlock(&pmus_lock);
  4923. return ret;
  4924. free_dev:
  4925. device_del(pmu->dev);
  4926. put_device(pmu->dev);
  4927. free_idr:
  4928. if (pmu->type >= PERF_TYPE_MAX)
  4929. idr_remove(&pmu_idr, pmu->type);
  4930. free_pdc:
  4931. free_percpu(pmu->pmu_disable_count);
  4932. goto unlock;
  4933. }
  4934. void perf_pmu_unregister(struct pmu *pmu)
  4935. {
  4936. mutex_lock(&pmus_lock);
  4937. list_del_rcu(&pmu->entry);
  4938. mutex_unlock(&pmus_lock);
  4939. /*
  4940. * We dereference the pmu list under both SRCU and regular RCU, so
  4941. * synchronize against both of those.
  4942. */
  4943. synchronize_srcu(&pmus_srcu);
  4944. synchronize_rcu();
  4945. free_percpu(pmu->pmu_disable_count);
  4946. if (pmu->type >= PERF_TYPE_MAX)
  4947. idr_remove(&pmu_idr, pmu->type);
  4948. device_del(pmu->dev);
  4949. put_device(pmu->dev);
  4950. free_pmu_context(pmu);
  4951. }
  4952. struct pmu *perf_init_event(struct perf_event *event)
  4953. {
  4954. struct pmu *pmu = NULL;
  4955. int idx;
  4956. int ret;
  4957. idx = srcu_read_lock(&pmus_srcu);
  4958. rcu_read_lock();
  4959. pmu = idr_find(&pmu_idr, event->attr.type);
  4960. rcu_read_unlock();
  4961. if (pmu) {
  4962. ret = pmu->event_init(event);
  4963. if (ret)
  4964. pmu = ERR_PTR(ret);
  4965. goto unlock;
  4966. }
  4967. list_for_each_entry_rcu(pmu, &pmus, entry) {
  4968. ret = pmu->event_init(event);
  4969. if (!ret)
  4970. goto unlock;
  4971. if (ret != -ENOENT) {
  4972. pmu = ERR_PTR(ret);
  4973. goto unlock;
  4974. }
  4975. }
  4976. pmu = ERR_PTR(-ENOENT);
  4977. unlock:
  4978. srcu_read_unlock(&pmus_srcu, idx);
  4979. return pmu;
  4980. }
  4981. /*
  4982. * Allocate and initialize a event structure
  4983. */
  4984. static struct perf_event *
  4985. perf_event_alloc(struct perf_event_attr *attr, int cpu,
  4986. struct task_struct *task,
  4987. struct perf_event *group_leader,
  4988. struct perf_event *parent_event,
  4989. perf_overflow_handler_t overflow_handler)
  4990. {
  4991. struct pmu *pmu;
  4992. struct perf_event *event;
  4993. struct hw_perf_event *hwc;
  4994. long err;
  4995. if ((unsigned)cpu >= nr_cpu_ids) {
  4996. if (!task || cpu != -1)
  4997. return ERR_PTR(-EINVAL);
  4998. }
  4999. event = kzalloc(sizeof(*event), GFP_KERNEL);
  5000. if (!event)
  5001. return ERR_PTR(-ENOMEM);
  5002. /*
  5003. * Single events are their own group leaders, with an
  5004. * empty sibling list:
  5005. */
  5006. if (!group_leader)
  5007. group_leader = event;
  5008. mutex_init(&event->child_mutex);
  5009. INIT_LIST_HEAD(&event->child_list);
  5010. INIT_LIST_HEAD(&event->group_entry);
  5011. INIT_LIST_HEAD(&event->event_entry);
  5012. INIT_LIST_HEAD(&event->sibling_list);
  5013. init_waitqueue_head(&event->waitq);
  5014. init_irq_work(&event->pending, perf_pending_event);
  5015. mutex_init(&event->mmap_mutex);
  5016. event->cpu = cpu;
  5017. event->attr = *attr;
  5018. event->group_leader = group_leader;
  5019. event->pmu = NULL;
  5020. event->oncpu = -1;
  5021. event->parent = parent_event;
  5022. event->ns = get_pid_ns(current->nsproxy->pid_ns);
  5023. event->id = atomic64_inc_return(&perf_event_id);
  5024. event->state = PERF_EVENT_STATE_INACTIVE;
  5025. if (task) {
  5026. event->attach_state = PERF_ATTACH_TASK;
  5027. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  5028. /*
  5029. * hw_breakpoint is a bit difficult here..
  5030. */
  5031. if (attr->type == PERF_TYPE_BREAKPOINT)
  5032. event->hw.bp_target = task;
  5033. #endif
  5034. }
  5035. if (!overflow_handler && parent_event)
  5036. overflow_handler = parent_event->overflow_handler;
  5037. event->overflow_handler = overflow_handler;
  5038. if (attr->disabled)
  5039. event->state = PERF_EVENT_STATE_OFF;
  5040. pmu = NULL;
  5041. hwc = &event->hw;
  5042. hwc->sample_period = attr->sample_period;
  5043. if (attr->freq && attr->sample_freq)
  5044. hwc->sample_period = 1;
  5045. hwc->last_period = hwc->sample_period;
  5046. local64_set(&hwc->period_left, hwc->sample_period);
  5047. /*
  5048. * we currently do not support PERF_FORMAT_GROUP on inherited events
  5049. */
  5050. if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
  5051. goto done;
  5052. pmu = perf_init_event(event);
  5053. done:
  5054. err = 0;
  5055. if (!pmu)
  5056. err = -EINVAL;
  5057. else if (IS_ERR(pmu))
  5058. err = PTR_ERR(pmu);
  5059. if (err) {
  5060. if (event->ns)
  5061. put_pid_ns(event->ns);
  5062. kfree(event);
  5063. return ERR_PTR(err);
  5064. }
  5065. event->pmu = pmu;
  5066. if (!event->parent) {
  5067. if (event->attach_state & PERF_ATTACH_TASK)
  5068. jump_label_inc(&perf_sched_events);
  5069. if (event->attr.mmap || event->attr.mmap_data)
  5070. atomic_inc(&nr_mmap_events);
  5071. if (event->attr.comm)
  5072. atomic_inc(&nr_comm_events);
  5073. if (event->attr.task)
  5074. atomic_inc(&nr_task_events);
  5075. if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
  5076. err = get_callchain_buffers();
  5077. if (err) {
  5078. free_event(event);
  5079. return ERR_PTR(err);
  5080. }
  5081. }
  5082. }
  5083. return event;
  5084. }
  5085. static int perf_copy_attr(struct perf_event_attr __user *uattr,
  5086. struct perf_event_attr *attr)
  5087. {
  5088. u32 size;
  5089. int ret;
  5090. if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  5091. return -EFAULT;
  5092. /*
  5093. * zero the full structure, so that a short copy will be nice.
  5094. */
  5095. memset(attr, 0, sizeof(*attr));
  5096. ret = get_user(size, &uattr->size);
  5097. if (ret)
  5098. return ret;
  5099. if (size > PAGE_SIZE) /* silly large */
  5100. goto err_size;
  5101. if (!size) /* abi compat */
  5102. size = PERF_ATTR_SIZE_VER0;
  5103. if (size < PERF_ATTR_SIZE_VER0)
  5104. goto err_size;
  5105. /*
  5106. * If we're handed a bigger struct than we know of,
  5107. * ensure all the unknown bits are 0 - i.e. new
  5108. * user-space does not rely on any kernel feature
  5109. * extensions we dont know about yet.
  5110. */
  5111. if (size > sizeof(*attr)) {
  5112. unsigned char __user *addr;
  5113. unsigned char __user *end;
  5114. unsigned char val;
  5115. addr = (void __user *)uattr + sizeof(*attr);
  5116. end = (void __user *)uattr + size;
  5117. for (; addr < end; addr++) {
  5118. ret = get_user(val, addr);
  5119. if (ret)
  5120. return ret;
  5121. if (val)
  5122. goto err_size;
  5123. }
  5124. size = sizeof(*attr);
  5125. }
  5126. ret = copy_from_user(attr, uattr, size);
  5127. if (ret)
  5128. return -EFAULT;
  5129. /*
  5130. * If the type exists, the corresponding creation will verify
  5131. * the attr->config.
  5132. */
  5133. if (attr->type >= PERF_TYPE_MAX)
  5134. return -EINVAL;
  5135. if (attr->__reserved_1)
  5136. return -EINVAL;
  5137. if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  5138. return -EINVAL;
  5139. if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  5140. return -EINVAL;
  5141. out:
  5142. return ret;
  5143. err_size:
  5144. put_user(sizeof(*attr), &uattr->size);
  5145. ret = -E2BIG;
  5146. goto out;
  5147. }
  5148. static int
  5149. perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
  5150. {
  5151. struct perf_buffer *buffer = NULL, *old_buffer = NULL;
  5152. int ret = -EINVAL;
  5153. if (!output_event)
  5154. goto set;
  5155. /* don't allow circular references */
  5156. if (event == output_event)
  5157. goto out;
  5158. /*
  5159. * Don't allow cross-cpu buffers
  5160. */
  5161. if (output_event->cpu != event->cpu)
  5162. goto out;
  5163. /*
  5164. * If its not a per-cpu buffer, it must be the same task.
  5165. */
  5166. if (output_event->cpu == -1 && output_event->ctx != event->ctx)
  5167. goto out;
  5168. set:
  5169. mutex_lock(&event->mmap_mutex);
  5170. /* Can't redirect output if we've got an active mmap() */
  5171. if (atomic_read(&event->mmap_count))
  5172. goto unlock;
  5173. if (output_event) {
  5174. /* get the buffer we want to redirect to */
  5175. buffer = perf_buffer_get(output_event);
  5176. if (!buffer)
  5177. goto unlock;
  5178. }
  5179. old_buffer = event->buffer;
  5180. rcu_assign_pointer(event->buffer, buffer);
  5181. ret = 0;
  5182. unlock:
  5183. mutex_unlock(&event->mmap_mutex);
  5184. if (old_buffer)
  5185. perf_buffer_put(old_buffer);
  5186. out:
  5187. return ret;
  5188. }
  5189. /**
  5190. * sys_perf_event_open - open a performance event, associate it to a task/cpu
  5191. *
  5192. * @attr_uptr: event_id type attributes for monitoring/sampling
  5193. * @pid: target pid
  5194. * @cpu: target cpu
  5195. * @group_fd: group leader event fd
  5196. */
  5197. SYSCALL_DEFINE5(perf_event_open,
  5198. struct perf_event_attr __user *, attr_uptr,
  5199. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  5200. {
  5201. struct perf_event *group_leader = NULL, *output_event = NULL;
  5202. struct perf_event *event, *sibling;
  5203. struct perf_event_attr attr;
  5204. struct perf_event_context *ctx;
  5205. struct file *event_file = NULL;
  5206. struct file *group_file = NULL;
  5207. struct task_struct *task = NULL;
  5208. struct pmu *pmu;
  5209. int event_fd;
  5210. int move_group = 0;
  5211. int fput_needed = 0;
  5212. int err;
  5213. /* for future expandability... */
  5214. if (flags & ~PERF_FLAG_ALL)
  5215. return -EINVAL;
  5216. err = perf_copy_attr(attr_uptr, &attr);
  5217. if (err)
  5218. return err;
  5219. if (!attr.exclude_kernel) {
  5220. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  5221. return -EACCES;
  5222. }
  5223. if (attr.freq) {
  5224. if (attr.sample_freq > sysctl_perf_event_sample_rate)
  5225. return -EINVAL;
  5226. }
  5227. /*
  5228. * In cgroup mode, the pid argument is used to pass the fd
  5229. * opened to the cgroup directory in cgroupfs. The cpu argument
  5230. * designates the cpu on which to monitor threads from that
  5231. * cgroup.
  5232. */
  5233. if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
  5234. return -EINVAL;
  5235. event_fd = get_unused_fd_flags(O_RDWR);
  5236. if (event_fd < 0)
  5237. return event_fd;
  5238. if (group_fd != -1) {
  5239. group_leader = perf_fget_light(group_fd, &fput_needed);
  5240. if (IS_ERR(group_leader)) {
  5241. err = PTR_ERR(group_leader);
  5242. goto err_fd;
  5243. }
  5244. group_file = group_leader->filp;
  5245. if (flags & PERF_FLAG_FD_OUTPUT)
  5246. output_event = group_leader;
  5247. if (flags & PERF_FLAG_FD_NO_GROUP)
  5248. group_leader = NULL;
  5249. }
  5250. if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
  5251. task = find_lively_task_by_vpid(pid);
  5252. if (IS_ERR(task)) {
  5253. err = PTR_ERR(task);
  5254. goto err_group_fd;
  5255. }
  5256. }
  5257. event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
  5258. if (IS_ERR(event)) {
  5259. err = PTR_ERR(event);
  5260. goto err_task;
  5261. }
  5262. if (flags & PERF_FLAG_PID_CGROUP) {
  5263. err = perf_cgroup_connect(pid, event, &attr, group_leader);
  5264. if (err)
  5265. goto err_alloc;
  5266. /*
  5267. * one more event:
  5268. * - that has cgroup constraint on event->cpu
  5269. * - that may need work on context switch
  5270. */
  5271. atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
  5272. jump_label_inc(&perf_sched_events);
  5273. }
  5274. /*
  5275. * Special case software events and allow them to be part of
  5276. * any hardware group.
  5277. */
  5278. pmu = event->pmu;
  5279. if (group_leader &&
  5280. (is_software_event(event) != is_software_event(group_leader))) {
  5281. if (is_software_event(event)) {
  5282. /*
  5283. * If event and group_leader are not both a software
  5284. * event, and event is, then group leader is not.
  5285. *
  5286. * Allow the addition of software events to !software
  5287. * groups, this is safe because software events never
  5288. * fail to schedule.
  5289. */
  5290. pmu = group_leader->pmu;
  5291. } else if (is_software_event(group_leader) &&
  5292. (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
  5293. /*
  5294. * In case the group is a pure software group, and we
  5295. * try to add a hardware event, move the whole group to
  5296. * the hardware context.
  5297. */
  5298. move_group = 1;
  5299. }
  5300. }
  5301. /*
  5302. * Get the target context (task or percpu):
  5303. */
  5304. ctx = find_get_context(pmu, task, cpu);
  5305. if (IS_ERR(ctx)) {
  5306. err = PTR_ERR(ctx);
  5307. goto err_alloc;
  5308. }
  5309. if (task) {
  5310. put_task_struct(task);
  5311. task = NULL;
  5312. }
  5313. /*
  5314. * Look up the group leader (we will attach this event to it):
  5315. */
  5316. if (group_leader) {
  5317. err = -EINVAL;
  5318. /*
  5319. * Do not allow a recursive hierarchy (this new sibling
  5320. * becoming part of another group-sibling):
  5321. */
  5322. if (group_leader->group_leader != group_leader)
  5323. goto err_context;
  5324. /*
  5325. * Do not allow to attach to a group in a different
  5326. * task or CPU context:
  5327. */
  5328. if (move_group) {
  5329. if (group_leader->ctx->type != ctx->type)
  5330. goto err_context;
  5331. } else {
  5332. if (group_leader->ctx != ctx)
  5333. goto err_context;
  5334. }
  5335. /*
  5336. * Only a group leader can be exclusive or pinned
  5337. */
  5338. if (attr.exclusive || attr.pinned)
  5339. goto err_context;
  5340. }
  5341. if (output_event) {
  5342. err = perf_event_set_output(event, output_event);
  5343. if (err)
  5344. goto err_context;
  5345. }
  5346. event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
  5347. if (IS_ERR(event_file)) {
  5348. err = PTR_ERR(event_file);
  5349. goto err_context;
  5350. }
  5351. if (move_group) {
  5352. struct perf_event_context *gctx = group_leader->ctx;
  5353. mutex_lock(&gctx->mutex);
  5354. perf_remove_from_context(group_leader);
  5355. list_for_each_entry(sibling, &group_leader->sibling_list,
  5356. group_entry) {
  5357. perf_remove_from_context(sibling);
  5358. put_ctx(gctx);
  5359. }
  5360. mutex_unlock(&gctx->mutex);
  5361. put_ctx(gctx);
  5362. }
  5363. event->filp = event_file;
  5364. WARN_ON_ONCE(ctx->parent_ctx);
  5365. mutex_lock(&ctx->mutex);
  5366. if (move_group) {
  5367. perf_install_in_context(ctx, group_leader, cpu);
  5368. get_ctx(ctx);
  5369. list_for_each_entry(sibling, &group_leader->sibling_list,
  5370. group_entry) {
  5371. perf_install_in_context(ctx, sibling, cpu);
  5372. get_ctx(ctx);
  5373. }
  5374. }
  5375. perf_install_in_context(ctx, event, cpu);
  5376. ++ctx->generation;
  5377. perf_unpin_context(ctx);
  5378. mutex_unlock(&ctx->mutex);
  5379. event->owner = current;
  5380. mutex_lock(&current->perf_event_mutex);
  5381. list_add_tail(&event->owner_entry, &current->perf_event_list);
  5382. mutex_unlock(&current->perf_event_mutex);
  5383. /*
  5384. * Precalculate sample_data sizes
  5385. */
  5386. perf_event__header_size(event);
  5387. perf_event__id_header_size(event);
  5388. /*
  5389. * Drop the reference on the group_event after placing the
  5390. * new event on the sibling_list. This ensures destruction
  5391. * of the group leader will find the pointer to itself in
  5392. * perf_group_detach().
  5393. */
  5394. fput_light(group_file, fput_needed);
  5395. fd_install(event_fd, event_file);
  5396. return event_fd;
  5397. err_context:
  5398. perf_unpin_context(ctx);
  5399. put_ctx(ctx);
  5400. err_alloc:
  5401. free_event(event);
  5402. err_task:
  5403. if (task)
  5404. put_task_struct(task);
  5405. err_group_fd:
  5406. fput_light(group_file, fput_needed);
  5407. err_fd:
  5408. put_unused_fd(event_fd);
  5409. return err;
  5410. }
  5411. /**
  5412. * perf_event_create_kernel_counter
  5413. *
  5414. * @attr: attributes of the counter to create
  5415. * @cpu: cpu in which the counter is bound
  5416. * @task: task to profile (NULL for percpu)
  5417. */
  5418. struct perf_event *
  5419. perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
  5420. struct task_struct *task,
  5421. perf_overflow_handler_t overflow_handler)
  5422. {
  5423. struct perf_event_context *ctx;
  5424. struct perf_event *event;
  5425. int err;
  5426. /*
  5427. * Get the target context (task or percpu):
  5428. */
  5429. event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
  5430. if (IS_ERR(event)) {
  5431. err = PTR_ERR(event);
  5432. goto err;
  5433. }
  5434. ctx = find_get_context(event->pmu, task, cpu);
  5435. if (IS_ERR(ctx)) {
  5436. err = PTR_ERR(ctx);
  5437. goto err_free;
  5438. }
  5439. event->filp = NULL;
  5440. WARN_ON_ONCE(ctx->parent_ctx);
  5441. mutex_lock(&ctx->mutex);
  5442. perf_install_in_context(ctx, event, cpu);
  5443. ++ctx->generation;
  5444. perf_unpin_context(ctx);
  5445. mutex_unlock(&ctx->mutex);
  5446. return event;
  5447. err_free:
  5448. free_event(event);
  5449. err:
  5450. return ERR_PTR(err);
  5451. }
  5452. EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
  5453. static void sync_child_event(struct perf_event *child_event,
  5454. struct task_struct *child)
  5455. {
  5456. struct perf_event *parent_event = child_event->parent;
  5457. u64 child_val;
  5458. if (child_event->attr.inherit_stat)
  5459. perf_event_read_event(child_event, child);
  5460. child_val = perf_event_count(child_event);
  5461. /*
  5462. * Add back the child's count to the parent's count:
  5463. */
  5464. atomic64_add(child_val, &parent_event->child_count);
  5465. atomic64_add(child_event->total_time_enabled,
  5466. &parent_event->child_total_time_enabled);
  5467. atomic64_add(child_event->total_time_running,
  5468. &parent_event->child_total_time_running);
  5469. /*
  5470. * Remove this event from the parent's list
  5471. */
  5472. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  5473. mutex_lock(&parent_event->child_mutex);
  5474. list_del_init(&child_event->child_list);
  5475. mutex_unlock(&parent_event->child_mutex);
  5476. /*
  5477. * Release the parent event, if this was the last
  5478. * reference to it.
  5479. */
  5480. fput(parent_event->filp);
  5481. }
  5482. static void
  5483. __perf_event_exit_task(struct perf_event *child_event,
  5484. struct perf_event_context *child_ctx,
  5485. struct task_struct *child)
  5486. {
  5487. if (child_event->parent) {
  5488. raw_spin_lock_irq(&child_ctx->lock);
  5489. perf_group_detach(child_event);
  5490. raw_spin_unlock_irq(&child_ctx->lock);
  5491. }
  5492. perf_remove_from_context(child_event);
  5493. /*
  5494. * It can happen that the parent exits first, and has events
  5495. * that are still around due to the child reference. These
  5496. * events need to be zapped.
  5497. */
  5498. if (child_event->parent) {
  5499. sync_child_event(child_event, child);
  5500. free_event(child_event);
  5501. }
  5502. }
  5503. static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
  5504. {
  5505. struct perf_event *child_event, *tmp;
  5506. struct perf_event_context *child_ctx;
  5507. unsigned long flags;
  5508. if (likely(!child->perf_event_ctxp[ctxn])) {
  5509. perf_event_task(child, NULL, 0);
  5510. return;
  5511. }
  5512. local_irq_save(flags);
  5513. /*
  5514. * We can't reschedule here because interrupts are disabled,
  5515. * and either child is current or it is a task that can't be
  5516. * scheduled, so we are now safe from rescheduling changing
  5517. * our context.
  5518. */
  5519. child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
  5520. /*
  5521. * Take the context lock here so that if find_get_context is
  5522. * reading child->perf_event_ctxp, we wait until it has
  5523. * incremented the context's refcount before we do put_ctx below.
  5524. */
  5525. raw_spin_lock(&child_ctx->lock);
  5526. task_ctx_sched_out(child_ctx);
  5527. child->perf_event_ctxp[ctxn] = NULL;
  5528. /*
  5529. * If this context is a clone; unclone it so it can't get
  5530. * swapped to another process while we're removing all
  5531. * the events from it.
  5532. */
  5533. unclone_ctx(child_ctx);
  5534. update_context_time(child_ctx);
  5535. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  5536. /*
  5537. * Report the task dead after unscheduling the events so that we
  5538. * won't get any samples after PERF_RECORD_EXIT. We can however still
  5539. * get a few PERF_RECORD_READ events.
  5540. */
  5541. perf_event_task(child, child_ctx, 0);
  5542. /*
  5543. * We can recurse on the same lock type through:
  5544. *
  5545. * __perf_event_exit_task()
  5546. * sync_child_event()
  5547. * fput(parent_event->filp)
  5548. * perf_release()
  5549. * mutex_lock(&ctx->mutex)
  5550. *
  5551. * But since its the parent context it won't be the same instance.
  5552. */
  5553. mutex_lock(&child_ctx->mutex);
  5554. again:
  5555. list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
  5556. group_entry)
  5557. __perf_event_exit_task(child_event, child_ctx, child);
  5558. list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
  5559. group_entry)
  5560. __perf_event_exit_task(child_event, child_ctx, child);
  5561. /*
  5562. * If the last event was a group event, it will have appended all
  5563. * its siblings to the list, but we obtained 'tmp' before that which
  5564. * will still point to the list head terminating the iteration.
  5565. */
  5566. if (!list_empty(&child_ctx->pinned_groups) ||
  5567. !list_empty(&child_ctx->flexible_groups))
  5568. goto again;
  5569. mutex_unlock(&child_ctx->mutex);
  5570. put_ctx(child_ctx);
  5571. }
  5572. /*
  5573. * When a child task exits, feed back event values to parent events.
  5574. */
  5575. void perf_event_exit_task(struct task_struct *child)
  5576. {
  5577. struct perf_event *event, *tmp;
  5578. int ctxn;
  5579. mutex_lock(&child->perf_event_mutex);
  5580. list_for_each_entry_safe(event, tmp, &child->perf_event_list,
  5581. owner_entry) {
  5582. list_del_init(&event->owner_entry);
  5583. /*
  5584. * Ensure the list deletion is visible before we clear
  5585. * the owner, closes a race against perf_release() where
  5586. * we need to serialize on the owner->perf_event_mutex.
  5587. */
  5588. smp_wmb();
  5589. event->owner = NULL;
  5590. }
  5591. mutex_unlock(&child->perf_event_mutex);
  5592. for_each_task_context_nr(ctxn)
  5593. perf_event_exit_task_context(child, ctxn);
  5594. }
  5595. static void perf_free_event(struct perf_event *event,
  5596. struct perf_event_context *ctx)
  5597. {
  5598. struct perf_event *parent = event->parent;
  5599. if (WARN_ON_ONCE(!parent))
  5600. return;
  5601. mutex_lock(&parent->child_mutex);
  5602. list_del_init(&event->child_list);
  5603. mutex_unlock(&parent->child_mutex);
  5604. fput(parent->filp);
  5605. perf_group_detach(event);
  5606. list_del_event(event, ctx);
  5607. free_event(event);
  5608. }
  5609. /*
  5610. * free an unexposed, unused context as created by inheritance by
  5611. * perf_event_init_task below, used by fork() in case of fail.
  5612. */
  5613. void perf_event_free_task(struct task_struct *task)
  5614. {
  5615. struct perf_event_context *ctx;
  5616. struct perf_event *event, *tmp;
  5617. int ctxn;
  5618. for_each_task_context_nr(ctxn) {
  5619. ctx = task->perf_event_ctxp[ctxn];
  5620. if (!ctx)
  5621. continue;
  5622. mutex_lock(&ctx->mutex);
  5623. again:
  5624. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
  5625. group_entry)
  5626. perf_free_event(event, ctx);
  5627. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
  5628. group_entry)
  5629. perf_free_event(event, ctx);
  5630. if (!list_empty(&ctx->pinned_groups) ||
  5631. !list_empty(&ctx->flexible_groups))
  5632. goto again;
  5633. mutex_unlock(&ctx->mutex);
  5634. put_ctx(ctx);
  5635. }
  5636. }
  5637. void perf_event_delayed_put(struct task_struct *task)
  5638. {
  5639. int ctxn;
  5640. for_each_task_context_nr(ctxn)
  5641. WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
  5642. }
  5643. /*
  5644. * inherit a event from parent task to child task:
  5645. */
  5646. static struct perf_event *
  5647. inherit_event(struct perf_event *parent_event,
  5648. struct task_struct *parent,
  5649. struct perf_event_context *parent_ctx,
  5650. struct task_struct *child,
  5651. struct perf_event *group_leader,
  5652. struct perf_event_context *child_ctx)
  5653. {
  5654. struct perf_event *child_event;
  5655. unsigned long flags;
  5656. /*
  5657. * Instead of creating recursive hierarchies of events,
  5658. * we link inherited events back to the original parent,
  5659. * which has a filp for sure, which we use as the reference
  5660. * count:
  5661. */
  5662. if (parent_event->parent)
  5663. parent_event = parent_event->parent;
  5664. child_event = perf_event_alloc(&parent_event->attr,
  5665. parent_event->cpu,
  5666. child,
  5667. group_leader, parent_event,
  5668. NULL);
  5669. if (IS_ERR(child_event))
  5670. return child_event;
  5671. get_ctx(child_ctx);
  5672. /*
  5673. * Make the child state follow the state of the parent event,
  5674. * not its attr.disabled bit. We hold the parent's mutex,
  5675. * so we won't race with perf_event_{en, dis}able_family.
  5676. */
  5677. if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
  5678. child_event->state = PERF_EVENT_STATE_INACTIVE;
  5679. else
  5680. child_event->state = PERF_EVENT_STATE_OFF;
  5681. if (parent_event->attr.freq) {
  5682. u64 sample_period = parent_event->hw.sample_period;
  5683. struct hw_perf_event *hwc = &child_event->hw;
  5684. hwc->sample_period = sample_period;
  5685. hwc->last_period = sample_period;
  5686. local64_set(&hwc->period_left, sample_period);
  5687. }
  5688. child_event->ctx = child_ctx;
  5689. child_event->overflow_handler = parent_event->overflow_handler;
  5690. /*
  5691. * Precalculate sample_data sizes
  5692. */
  5693. perf_event__header_size(child_event);
  5694. perf_event__id_header_size(child_event);
  5695. /*
  5696. * Link it up in the child's context:
  5697. */
  5698. raw_spin_lock_irqsave(&child_ctx->lock, flags);
  5699. add_event_to_ctx(child_event, child_ctx);
  5700. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  5701. /*
  5702. * Get a reference to the parent filp - we will fput it
  5703. * when the child event exits. This is safe to do because
  5704. * we are in the parent and we know that the filp still
  5705. * exists and has a nonzero count:
  5706. */
  5707. atomic_long_inc(&parent_event->filp->f_count);
  5708. /*
  5709. * Link this into the parent event's child list
  5710. */
  5711. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  5712. mutex_lock(&parent_event->child_mutex);
  5713. list_add_tail(&child_event->child_list, &parent_event->child_list);
  5714. mutex_unlock(&parent_event->child_mutex);
  5715. return child_event;
  5716. }
  5717. static int inherit_group(struct perf_event *parent_event,
  5718. struct task_struct *parent,
  5719. struct perf_event_context *parent_ctx,
  5720. struct task_struct *child,
  5721. struct perf_event_context *child_ctx)
  5722. {
  5723. struct perf_event *leader;
  5724. struct perf_event *sub;
  5725. struct perf_event *child_ctr;
  5726. leader = inherit_event(parent_event, parent, parent_ctx,
  5727. child, NULL, child_ctx);
  5728. if (IS_ERR(leader))
  5729. return PTR_ERR(leader);
  5730. list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
  5731. child_ctr = inherit_event(sub, parent, parent_ctx,
  5732. child, leader, child_ctx);
  5733. if (IS_ERR(child_ctr))
  5734. return PTR_ERR(child_ctr);
  5735. }
  5736. return 0;
  5737. }
  5738. static int
  5739. inherit_task_group(struct perf_event *event, struct task_struct *parent,
  5740. struct perf_event_context *parent_ctx,
  5741. struct task_struct *child, int ctxn,
  5742. int *inherited_all)
  5743. {
  5744. int ret;
  5745. struct perf_event_context *child_ctx;
  5746. if (!event->attr.inherit) {
  5747. *inherited_all = 0;
  5748. return 0;
  5749. }
  5750. child_ctx = child->perf_event_ctxp[ctxn];
  5751. if (!child_ctx) {
  5752. /*
  5753. * This is executed from the parent task context, so
  5754. * inherit events that have been marked for cloning.
  5755. * First allocate and initialize a context for the
  5756. * child.
  5757. */
  5758. child_ctx = alloc_perf_context(event->pmu, child);
  5759. if (!child_ctx)
  5760. return -ENOMEM;
  5761. child->perf_event_ctxp[ctxn] = child_ctx;
  5762. }
  5763. ret = inherit_group(event, parent, parent_ctx,
  5764. child, child_ctx);
  5765. if (ret)
  5766. *inherited_all = 0;
  5767. return ret;
  5768. }
  5769. /*
  5770. * Initialize the perf_event context in task_struct
  5771. */
  5772. int perf_event_init_context(struct task_struct *child, int ctxn)
  5773. {
  5774. struct perf_event_context *child_ctx, *parent_ctx;
  5775. struct perf_event_context *cloned_ctx;
  5776. struct perf_event *event;
  5777. struct task_struct *parent = current;
  5778. int inherited_all = 1;
  5779. unsigned long flags;
  5780. int ret = 0;
  5781. if (likely(!parent->perf_event_ctxp[ctxn]))
  5782. return 0;
  5783. /*
  5784. * If the parent's context is a clone, pin it so it won't get
  5785. * swapped under us.
  5786. */
  5787. parent_ctx = perf_pin_task_context(parent, ctxn);
  5788. /*
  5789. * No need to check if parent_ctx != NULL here; since we saw
  5790. * it non-NULL earlier, the only reason for it to become NULL
  5791. * is if we exit, and since we're currently in the middle of
  5792. * a fork we can't be exiting at the same time.
  5793. */
  5794. /*
  5795. * Lock the parent list. No need to lock the child - not PID
  5796. * hashed yet and not running, so nobody can access it.
  5797. */
  5798. mutex_lock(&parent_ctx->mutex);
  5799. /*
  5800. * We dont have to disable NMIs - we are only looking at
  5801. * the list, not manipulating it:
  5802. */
  5803. list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
  5804. ret = inherit_task_group(event, parent, parent_ctx,
  5805. child, ctxn, &inherited_all);
  5806. if (ret)
  5807. break;
  5808. }
  5809. /*
  5810. * We can't hold ctx->lock when iterating the ->flexible_group list due
  5811. * to allocations, but we need to prevent rotation because
  5812. * rotate_ctx() will change the list from interrupt context.
  5813. */
  5814. raw_spin_lock_irqsave(&parent_ctx->lock, flags);
  5815. parent_ctx->rotate_disable = 1;
  5816. raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
  5817. list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
  5818. ret = inherit_task_group(event, parent, parent_ctx,
  5819. child, ctxn, &inherited_all);
  5820. if (ret)
  5821. break;
  5822. }
  5823. raw_spin_lock_irqsave(&parent_ctx->lock, flags);
  5824. parent_ctx->rotate_disable = 0;
  5825. child_ctx = child->perf_event_ctxp[ctxn];
  5826. if (child_ctx && inherited_all) {
  5827. /*
  5828. * Mark the child context as a clone of the parent
  5829. * context, or of whatever the parent is a clone of.
  5830. *
  5831. * Note that if the parent is a clone, the holding of
  5832. * parent_ctx->lock avoids it from being uncloned.
  5833. */
  5834. cloned_ctx = parent_ctx->parent_ctx;
  5835. if (cloned_ctx) {
  5836. child_ctx->parent_ctx = cloned_ctx;
  5837. child_ctx->parent_gen = parent_ctx->parent_gen;
  5838. } else {
  5839. child_ctx->parent_ctx = parent_ctx;
  5840. child_ctx->parent_gen = parent_ctx->generation;
  5841. }
  5842. get_ctx(child_ctx->parent_ctx);
  5843. }
  5844. raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
  5845. mutex_unlock(&parent_ctx->mutex);
  5846. perf_unpin_context(parent_ctx);
  5847. put_ctx(parent_ctx);
  5848. return ret;
  5849. }
  5850. /*
  5851. * Initialize the perf_event context in task_struct
  5852. */
  5853. int perf_event_init_task(struct task_struct *child)
  5854. {
  5855. int ctxn, ret;
  5856. memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
  5857. mutex_init(&child->perf_event_mutex);
  5858. INIT_LIST_HEAD(&child->perf_event_list);
  5859. for_each_task_context_nr(ctxn) {
  5860. ret = perf_event_init_context(child, ctxn);
  5861. if (ret)
  5862. return ret;
  5863. }
  5864. return 0;
  5865. }
  5866. static void __init perf_event_init_all_cpus(void)
  5867. {
  5868. struct swevent_htable *swhash;
  5869. int cpu;
  5870. for_each_possible_cpu(cpu) {
  5871. swhash = &per_cpu(swevent_htable, cpu);
  5872. mutex_init(&swhash->hlist_mutex);
  5873. INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
  5874. }
  5875. }
  5876. static void __cpuinit perf_event_init_cpu(int cpu)
  5877. {
  5878. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  5879. mutex_lock(&swhash->hlist_mutex);
  5880. if (swhash->hlist_refcount > 0) {
  5881. struct swevent_hlist *hlist;
  5882. hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
  5883. WARN_ON(!hlist);
  5884. rcu_assign_pointer(swhash->swevent_hlist, hlist);
  5885. }
  5886. mutex_unlock(&swhash->hlist_mutex);
  5887. }
  5888. #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
  5889. static void perf_pmu_rotate_stop(struct pmu *pmu)
  5890. {
  5891. struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
  5892. WARN_ON(!irqs_disabled());
  5893. list_del_init(&cpuctx->rotation_list);
  5894. }
  5895. static void __perf_event_exit_context(void *__info)
  5896. {
  5897. struct perf_event_context *ctx = __info;
  5898. struct perf_event *event, *tmp;
  5899. perf_pmu_rotate_stop(ctx->pmu);
  5900. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
  5901. __perf_remove_from_context(event);
  5902. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
  5903. __perf_remove_from_context(event);
  5904. }
  5905. static void perf_event_exit_cpu_context(int cpu)
  5906. {
  5907. struct perf_event_context *ctx;
  5908. struct pmu *pmu;
  5909. int idx;
  5910. idx = srcu_read_lock(&pmus_srcu);
  5911. list_for_each_entry_rcu(pmu, &pmus, entry) {
  5912. ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
  5913. mutex_lock(&ctx->mutex);
  5914. smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
  5915. mutex_unlock(&ctx->mutex);
  5916. }
  5917. srcu_read_unlock(&pmus_srcu, idx);
  5918. }
  5919. static void perf_event_exit_cpu(int cpu)
  5920. {
  5921. struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  5922. mutex_lock(&swhash->hlist_mutex);
  5923. swevent_hlist_release(swhash);
  5924. mutex_unlock(&swhash->hlist_mutex);
  5925. perf_event_exit_cpu_context(cpu);
  5926. }
  5927. #else
  5928. static inline void perf_event_exit_cpu(int cpu) { }
  5929. #endif
  5930. static int
  5931. perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
  5932. {
  5933. int cpu;
  5934. for_each_online_cpu(cpu)
  5935. perf_event_exit_cpu(cpu);
  5936. return NOTIFY_OK;
  5937. }
  5938. /*
  5939. * Run the perf reboot notifier at the very last possible moment so that
  5940. * the generic watchdog code runs as long as possible.
  5941. */
  5942. static struct notifier_block perf_reboot_notifier = {
  5943. .notifier_call = perf_reboot,
  5944. .priority = INT_MIN,
  5945. };
  5946. static int __cpuinit
  5947. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  5948. {
  5949. unsigned int cpu = (long)hcpu;
  5950. switch (action & ~CPU_TASKS_FROZEN) {
  5951. case CPU_UP_PREPARE:
  5952. case CPU_DOWN_FAILED:
  5953. perf_event_init_cpu(cpu);
  5954. break;
  5955. case CPU_UP_CANCELED:
  5956. case CPU_DOWN_PREPARE:
  5957. perf_event_exit_cpu(cpu);
  5958. break;
  5959. default:
  5960. break;
  5961. }
  5962. return NOTIFY_OK;
  5963. }
  5964. void __init perf_event_init(void)
  5965. {
  5966. int ret;
  5967. idr_init(&pmu_idr);
  5968. perf_event_init_all_cpus();
  5969. init_srcu_struct(&pmus_srcu);
  5970. perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
  5971. perf_pmu_register(&perf_cpu_clock, NULL, -1);
  5972. perf_pmu_register(&perf_task_clock, NULL, -1);
  5973. perf_tp_register();
  5974. perf_cpu_notifier(perf_cpu_notify);
  5975. register_reboot_notifier(&perf_reboot_notifier);
  5976. ret = init_hw_breakpoint();
  5977. WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
  5978. }
  5979. static int __init perf_event_sysfs_init(void)
  5980. {
  5981. struct pmu *pmu;
  5982. int ret;
  5983. mutex_lock(&pmus_lock);
  5984. ret = bus_register(&pmu_bus);
  5985. if (ret)
  5986. goto unlock;
  5987. list_for_each_entry(pmu, &pmus, entry) {
  5988. if (!pmu->name || pmu->type < 0)
  5989. continue;
  5990. ret = pmu_dev_alloc(pmu);
  5991. WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
  5992. }
  5993. pmu_bus_running = 1;
  5994. ret = 0;
  5995. unlock:
  5996. mutex_unlock(&pmus_lock);
  5997. return ret;
  5998. }
  5999. device_initcall(perf_event_sysfs_init);
  6000. #ifdef CONFIG_CGROUP_PERF
  6001. static struct cgroup_subsys_state *perf_cgroup_create(
  6002. struct cgroup_subsys *ss, struct cgroup *cont)
  6003. {
  6004. struct perf_cgroup *jc;
  6005. jc = kzalloc(sizeof(*jc), GFP_KERNEL);
  6006. if (!jc)
  6007. return ERR_PTR(-ENOMEM);
  6008. jc->info = alloc_percpu(struct perf_cgroup_info);
  6009. if (!jc->info) {
  6010. kfree(jc);
  6011. return ERR_PTR(-ENOMEM);
  6012. }
  6013. return &jc->css;
  6014. }
  6015. static void perf_cgroup_destroy(struct cgroup_subsys *ss,
  6016. struct cgroup *cont)
  6017. {
  6018. struct perf_cgroup *jc;
  6019. jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
  6020. struct perf_cgroup, css);
  6021. free_percpu(jc->info);
  6022. kfree(jc);
  6023. }
  6024. static int __perf_cgroup_move(void *info)
  6025. {
  6026. struct task_struct *task = info;
  6027. perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
  6028. return 0;
  6029. }
  6030. static void perf_cgroup_move(struct task_struct *task)
  6031. {
  6032. task_function_call(task, __perf_cgroup_move, task);
  6033. }
  6034. static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  6035. struct cgroup *old_cgrp, struct task_struct *task,
  6036. bool threadgroup)
  6037. {
  6038. perf_cgroup_move(task);
  6039. if (threadgroup) {
  6040. struct task_struct *c;
  6041. rcu_read_lock();
  6042. list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
  6043. perf_cgroup_move(c);
  6044. }
  6045. rcu_read_unlock();
  6046. }
  6047. }
  6048. static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
  6049. struct cgroup *old_cgrp, struct task_struct *task)
  6050. {
  6051. /*
  6052. * cgroup_exit() is called in the copy_process() failure path.
  6053. * Ignore this case since the task hasn't ran yet, this avoids
  6054. * trying to poke a half freed task state from generic code.
  6055. */
  6056. if (!(task->flags & PF_EXITING))
  6057. return;
  6058. perf_cgroup_move(task);
  6059. }
  6060. struct cgroup_subsys perf_subsys = {
  6061. .name = "perf_event",
  6062. .subsys_id = perf_subsys_id,
  6063. .create = perf_cgroup_create,
  6064. .destroy = perf_cgroup_destroy,
  6065. .exit = perf_cgroup_exit,
  6066. .attach = perf_cgroup_attach,
  6067. };
  6068. #endif /* CONFIG_CGROUP_PERF */