perf_event.c 126 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562
  1. /*
  2. * Performance events core code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/dcache.h>
  19. #include <linux/percpu.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/rculist.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/syscalls.h>
  27. #include <linux/anon_inodes.h>
  28. #include <linux/kernel_stat.h>
  29. #include <linux/perf_event.h>
  30. #include <linux/ftrace_event.h>
  31. #include <linux/hw_breakpoint.h>
  32. #include <asm/irq_regs.h>
  33. /*
  34. * Each CPU has a list of per CPU events:
  35. */
  36. static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  37. int perf_max_events __read_mostly = 1;
  38. static int perf_reserved_percpu __read_mostly;
  39. static int perf_overcommit __read_mostly = 1;
  40. static atomic_t nr_events __read_mostly;
  41. static atomic_t nr_mmap_events __read_mostly;
  42. static atomic_t nr_comm_events __read_mostly;
  43. static atomic_t nr_task_events __read_mostly;
  44. /*
  45. * perf event paranoia level:
  46. * -1 - not paranoid at all
  47. * 0 - disallow raw tracepoint access for unpriv
  48. * 1 - disallow cpu events for unpriv
  49. * 2 - disallow kernel profiling for unpriv
  50. */
  51. int sysctl_perf_event_paranoid __read_mostly = 1;
  52. int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
  53. /*
  54. * max perf event sample rate
  55. */
  56. int sysctl_perf_event_sample_rate __read_mostly = 100000;
  57. static atomic64_t perf_event_id;
  58. /*
  59. * Lock for (sysadmin-configurable) event reservations:
  60. */
  61. static DEFINE_SPINLOCK(perf_resource_lock);
  62. /*
  63. * Architecture provided APIs - weak aliases:
  64. */
  65. extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
  66. {
  67. return NULL;
  68. }
  69. void __weak hw_perf_disable(void) { barrier(); }
  70. void __weak hw_perf_enable(void) { barrier(); }
  71. int __weak
  72. hw_perf_group_sched_in(struct perf_event *group_leader,
  73. struct perf_cpu_context *cpuctx,
  74. struct perf_event_context *ctx)
  75. {
  76. return 0;
  77. }
  78. void __weak perf_event_print_debug(void) { }
  79. static DEFINE_PER_CPU(int, perf_disable_count);
  80. void perf_disable(void)
  81. {
  82. if (!__get_cpu_var(perf_disable_count)++)
  83. hw_perf_disable();
  84. }
  85. void perf_enable(void)
  86. {
  87. if (!--__get_cpu_var(perf_disable_count))
  88. hw_perf_enable();
  89. }
  90. static void get_ctx(struct perf_event_context *ctx)
  91. {
  92. WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
  93. }
  94. static void free_ctx(struct rcu_head *head)
  95. {
  96. struct perf_event_context *ctx;
  97. ctx = container_of(head, struct perf_event_context, rcu_head);
  98. kfree(ctx);
  99. }
  100. static void put_ctx(struct perf_event_context *ctx)
  101. {
  102. if (atomic_dec_and_test(&ctx->refcount)) {
  103. if (ctx->parent_ctx)
  104. put_ctx(ctx->parent_ctx);
  105. if (ctx->task)
  106. put_task_struct(ctx->task);
  107. call_rcu(&ctx->rcu_head, free_ctx);
  108. }
  109. }
  110. static void unclone_ctx(struct perf_event_context *ctx)
  111. {
  112. if (ctx->parent_ctx) {
  113. put_ctx(ctx->parent_ctx);
  114. ctx->parent_ctx = NULL;
  115. }
  116. }
  117. /*
  118. * If we inherit events we want to return the parent event id
  119. * to userspace.
  120. */
  121. static u64 primary_event_id(struct perf_event *event)
  122. {
  123. u64 id = event->id;
  124. if (event->parent)
  125. id = event->parent->id;
  126. return id;
  127. }
  128. /*
  129. * Get the perf_event_context for a task and lock it.
  130. * This has to cope with with the fact that until it is locked,
  131. * the context could get moved to another task.
  132. */
  133. static struct perf_event_context *
  134. perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  135. {
  136. struct perf_event_context *ctx;
  137. rcu_read_lock();
  138. retry:
  139. ctx = rcu_dereference(task->perf_event_ctxp);
  140. if (ctx) {
  141. /*
  142. * If this context is a clone of another, it might
  143. * get swapped for another underneath us by
  144. * perf_event_task_sched_out, though the
  145. * rcu_read_lock() protects us from any context
  146. * getting freed. Lock the context and check if it
  147. * got swapped before we could get the lock, and retry
  148. * if so. If we locked the right context, then it
  149. * can't get swapped on us any more.
  150. */
  151. raw_spin_lock_irqsave(&ctx->lock, *flags);
  152. if (ctx != rcu_dereference(task->perf_event_ctxp)) {
  153. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  154. goto retry;
  155. }
  156. if (!atomic_inc_not_zero(&ctx->refcount)) {
  157. raw_spin_unlock_irqrestore(&ctx->lock, *flags);
  158. ctx = NULL;
  159. }
  160. }
  161. rcu_read_unlock();
  162. return ctx;
  163. }
  164. /*
  165. * Get the context for a task and increment its pin_count so it
  166. * can't get swapped to another task. This also increments its
  167. * reference count so that the context can't get freed.
  168. */
  169. static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
  170. {
  171. struct perf_event_context *ctx;
  172. unsigned long flags;
  173. ctx = perf_lock_task_context(task, &flags);
  174. if (ctx) {
  175. ++ctx->pin_count;
  176. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  177. }
  178. return ctx;
  179. }
  180. static void perf_unpin_context(struct perf_event_context *ctx)
  181. {
  182. unsigned long flags;
  183. raw_spin_lock_irqsave(&ctx->lock, flags);
  184. --ctx->pin_count;
  185. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  186. put_ctx(ctx);
  187. }
  188. static inline u64 perf_clock(void)
  189. {
  190. return cpu_clock(raw_smp_processor_id());
  191. }
  192. /*
  193. * Update the record of the current time in a context.
  194. */
  195. static void update_context_time(struct perf_event_context *ctx)
  196. {
  197. u64 now = perf_clock();
  198. ctx->time += now - ctx->timestamp;
  199. ctx->timestamp = now;
  200. }
  201. /*
  202. * Update the total_time_enabled and total_time_running fields for a event.
  203. */
  204. static void update_event_times(struct perf_event *event)
  205. {
  206. struct perf_event_context *ctx = event->ctx;
  207. u64 run_end;
  208. if (event->state < PERF_EVENT_STATE_INACTIVE ||
  209. event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
  210. return;
  211. if (ctx->is_active)
  212. run_end = ctx->time;
  213. else
  214. run_end = event->tstamp_stopped;
  215. event->total_time_enabled = run_end - event->tstamp_enabled;
  216. if (event->state == PERF_EVENT_STATE_INACTIVE)
  217. run_end = event->tstamp_stopped;
  218. else
  219. run_end = ctx->time;
  220. event->total_time_running = run_end - event->tstamp_running;
  221. }
  222. static struct list_head *
  223. ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
  224. {
  225. if (event->attr.pinned)
  226. return &ctx->pinned_groups;
  227. else
  228. return &ctx->flexible_groups;
  229. }
  230. /*
  231. * Add a event from the lists for its context.
  232. * Must be called with ctx->mutex and ctx->lock held.
  233. */
  234. static void
  235. list_add_event(struct perf_event *event, struct perf_event_context *ctx)
  236. {
  237. struct perf_event *group_leader = event->group_leader;
  238. /*
  239. * Depending on whether it is a standalone or sibling event,
  240. * add it straight to the context's event list, or to the group
  241. * leader's sibling list:
  242. */
  243. if (group_leader == event) {
  244. struct list_head *list;
  245. if (is_software_event(event))
  246. event->group_flags |= PERF_GROUP_SOFTWARE;
  247. list = ctx_group_list(event, ctx);
  248. list_add_tail(&event->group_entry, list);
  249. } else {
  250. if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
  251. !is_software_event(event))
  252. group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
  253. list_add_tail(&event->group_entry, &group_leader->sibling_list);
  254. group_leader->nr_siblings++;
  255. }
  256. list_add_rcu(&event->event_entry, &ctx->event_list);
  257. ctx->nr_events++;
  258. if (event->attr.inherit_stat)
  259. ctx->nr_stat++;
  260. }
  261. /*
  262. * Remove a event from the lists for its context.
  263. * Must be called with ctx->mutex and ctx->lock held.
  264. */
  265. static void
  266. list_del_event(struct perf_event *event, struct perf_event_context *ctx)
  267. {
  268. struct perf_event *sibling, *tmp;
  269. if (list_empty(&event->group_entry))
  270. return;
  271. ctx->nr_events--;
  272. if (event->attr.inherit_stat)
  273. ctx->nr_stat--;
  274. list_del_init(&event->group_entry);
  275. list_del_rcu(&event->event_entry);
  276. if (event->group_leader != event)
  277. event->group_leader->nr_siblings--;
  278. update_event_times(event);
  279. /*
  280. * If event was in error state, then keep it
  281. * that way, otherwise bogus counts will be
  282. * returned on read(). The only way to get out
  283. * of error state is by explicit re-enabling
  284. * of the event
  285. */
  286. if (event->state > PERF_EVENT_STATE_OFF)
  287. event->state = PERF_EVENT_STATE_OFF;
  288. /*
  289. * If this was a group event with sibling events then
  290. * upgrade the siblings to singleton events by adding them
  291. * to the context list directly:
  292. */
  293. list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
  294. struct list_head *list;
  295. list = ctx_group_list(event, ctx);
  296. list_move_tail(&sibling->group_entry, list);
  297. sibling->group_leader = sibling;
  298. /* Inherit group flags from the previous leader */
  299. sibling->group_flags = event->group_flags;
  300. }
  301. }
  302. static void
  303. event_sched_out(struct perf_event *event,
  304. struct perf_cpu_context *cpuctx,
  305. struct perf_event_context *ctx)
  306. {
  307. if (event->state != PERF_EVENT_STATE_ACTIVE)
  308. return;
  309. event->state = PERF_EVENT_STATE_INACTIVE;
  310. if (event->pending_disable) {
  311. event->pending_disable = 0;
  312. event->state = PERF_EVENT_STATE_OFF;
  313. }
  314. event->tstamp_stopped = ctx->time;
  315. event->pmu->disable(event);
  316. event->oncpu = -1;
  317. if (!is_software_event(event))
  318. cpuctx->active_oncpu--;
  319. ctx->nr_active--;
  320. if (event->attr.exclusive || !cpuctx->active_oncpu)
  321. cpuctx->exclusive = 0;
  322. }
  323. static void
  324. group_sched_out(struct perf_event *group_event,
  325. struct perf_cpu_context *cpuctx,
  326. struct perf_event_context *ctx)
  327. {
  328. struct perf_event *event;
  329. if (group_event->state != PERF_EVENT_STATE_ACTIVE)
  330. return;
  331. event_sched_out(group_event, cpuctx, ctx);
  332. /*
  333. * Schedule out siblings (if any):
  334. */
  335. list_for_each_entry(event, &group_event->sibling_list, group_entry)
  336. event_sched_out(event, cpuctx, ctx);
  337. if (group_event->attr.exclusive)
  338. cpuctx->exclusive = 0;
  339. }
  340. /*
  341. * Cross CPU call to remove a performance event
  342. *
  343. * We disable the event on the hardware level first. After that we
  344. * remove it from the context list.
  345. */
  346. static void __perf_event_remove_from_context(void *info)
  347. {
  348. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  349. struct perf_event *event = info;
  350. struct perf_event_context *ctx = event->ctx;
  351. /*
  352. * If this is a task context, we need to check whether it is
  353. * the current task context of this cpu. If not it has been
  354. * scheduled out before the smp call arrived.
  355. */
  356. if (ctx->task && cpuctx->task_ctx != ctx)
  357. return;
  358. raw_spin_lock(&ctx->lock);
  359. /*
  360. * Protect the list operation against NMI by disabling the
  361. * events on a global level.
  362. */
  363. perf_disable();
  364. event_sched_out(event, cpuctx, ctx);
  365. list_del_event(event, ctx);
  366. if (!ctx->task) {
  367. /*
  368. * Allow more per task events with respect to the
  369. * reservation:
  370. */
  371. cpuctx->max_pertask =
  372. min(perf_max_events - ctx->nr_events,
  373. perf_max_events - perf_reserved_percpu);
  374. }
  375. perf_enable();
  376. raw_spin_unlock(&ctx->lock);
  377. }
  378. /*
  379. * Remove the event from a task's (or a CPU's) list of events.
  380. *
  381. * Must be called with ctx->mutex held.
  382. *
  383. * CPU events are removed with a smp call. For task events we only
  384. * call when the task is on a CPU.
  385. *
  386. * If event->ctx is a cloned context, callers must make sure that
  387. * every task struct that event->ctx->task could possibly point to
  388. * remains valid. This is OK when called from perf_release since
  389. * that only calls us on the top-level context, which can't be a clone.
  390. * When called from perf_event_exit_task, it's OK because the
  391. * context has been detached from its task.
  392. */
  393. static void perf_event_remove_from_context(struct perf_event *event)
  394. {
  395. struct perf_event_context *ctx = event->ctx;
  396. struct task_struct *task = ctx->task;
  397. if (!task) {
  398. /*
  399. * Per cpu events are removed via an smp call and
  400. * the removal is always successful.
  401. */
  402. smp_call_function_single(event->cpu,
  403. __perf_event_remove_from_context,
  404. event, 1);
  405. return;
  406. }
  407. retry:
  408. task_oncpu_function_call(task, __perf_event_remove_from_context,
  409. event);
  410. raw_spin_lock_irq(&ctx->lock);
  411. /*
  412. * If the context is active we need to retry the smp call.
  413. */
  414. if (ctx->nr_active && !list_empty(&event->group_entry)) {
  415. raw_spin_unlock_irq(&ctx->lock);
  416. goto retry;
  417. }
  418. /*
  419. * The lock prevents that this context is scheduled in so we
  420. * can remove the event safely, if the call above did not
  421. * succeed.
  422. */
  423. if (!list_empty(&event->group_entry))
  424. list_del_event(event, ctx);
  425. raw_spin_unlock_irq(&ctx->lock);
  426. }
  427. /*
  428. * Update total_time_enabled and total_time_running for all events in a group.
  429. */
  430. static void update_group_times(struct perf_event *leader)
  431. {
  432. struct perf_event *event;
  433. update_event_times(leader);
  434. list_for_each_entry(event, &leader->sibling_list, group_entry)
  435. update_event_times(event);
  436. }
  437. /*
  438. * Cross CPU call to disable a performance event
  439. */
  440. static void __perf_event_disable(void *info)
  441. {
  442. struct perf_event *event = info;
  443. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  444. struct perf_event_context *ctx = event->ctx;
  445. /*
  446. * If this is a per-task event, need to check whether this
  447. * event's task is the current task on this cpu.
  448. */
  449. if (ctx->task && cpuctx->task_ctx != ctx)
  450. return;
  451. raw_spin_lock(&ctx->lock);
  452. /*
  453. * If the event is on, turn it off.
  454. * If it is in error state, leave it in error state.
  455. */
  456. if (event->state >= PERF_EVENT_STATE_INACTIVE) {
  457. update_context_time(ctx);
  458. update_group_times(event);
  459. if (event == event->group_leader)
  460. group_sched_out(event, cpuctx, ctx);
  461. else
  462. event_sched_out(event, cpuctx, ctx);
  463. event->state = PERF_EVENT_STATE_OFF;
  464. }
  465. raw_spin_unlock(&ctx->lock);
  466. }
  467. /*
  468. * Disable a event.
  469. *
  470. * If event->ctx is a cloned context, callers must make sure that
  471. * every task struct that event->ctx->task could possibly point to
  472. * remains valid. This condition is satisifed when called through
  473. * perf_event_for_each_child or perf_event_for_each because they
  474. * hold the top-level event's child_mutex, so any descendant that
  475. * goes to exit will block in sync_child_event.
  476. * When called from perf_pending_event it's OK because event->ctx
  477. * is the current context on this CPU and preemption is disabled,
  478. * hence we can't get into perf_event_task_sched_out for this context.
  479. */
  480. void perf_event_disable(struct perf_event *event)
  481. {
  482. struct perf_event_context *ctx = event->ctx;
  483. struct task_struct *task = ctx->task;
  484. if (!task) {
  485. /*
  486. * Disable the event on the cpu that it's on
  487. */
  488. smp_call_function_single(event->cpu, __perf_event_disable,
  489. event, 1);
  490. return;
  491. }
  492. retry:
  493. task_oncpu_function_call(task, __perf_event_disable, event);
  494. raw_spin_lock_irq(&ctx->lock);
  495. /*
  496. * If the event is still active, we need to retry the cross-call.
  497. */
  498. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  499. raw_spin_unlock_irq(&ctx->lock);
  500. goto retry;
  501. }
  502. /*
  503. * Since we have the lock this context can't be scheduled
  504. * in, so we can change the state safely.
  505. */
  506. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  507. update_group_times(event);
  508. event->state = PERF_EVENT_STATE_OFF;
  509. }
  510. raw_spin_unlock_irq(&ctx->lock);
  511. }
  512. static int
  513. event_sched_in(struct perf_event *event,
  514. struct perf_cpu_context *cpuctx,
  515. struct perf_event_context *ctx)
  516. {
  517. if (event->state <= PERF_EVENT_STATE_OFF)
  518. return 0;
  519. event->state = PERF_EVENT_STATE_ACTIVE;
  520. event->oncpu = smp_processor_id();
  521. /*
  522. * The new state must be visible before we turn it on in the hardware:
  523. */
  524. smp_wmb();
  525. if (event->pmu->enable(event)) {
  526. event->state = PERF_EVENT_STATE_INACTIVE;
  527. event->oncpu = -1;
  528. return -EAGAIN;
  529. }
  530. event->tstamp_running += ctx->time - event->tstamp_stopped;
  531. if (!is_software_event(event))
  532. cpuctx->active_oncpu++;
  533. ctx->nr_active++;
  534. if (event->attr.exclusive)
  535. cpuctx->exclusive = 1;
  536. return 0;
  537. }
  538. static int
  539. group_sched_in(struct perf_event *group_event,
  540. struct perf_cpu_context *cpuctx,
  541. struct perf_event_context *ctx)
  542. {
  543. struct perf_event *event, *partial_group;
  544. int ret;
  545. if (group_event->state == PERF_EVENT_STATE_OFF)
  546. return 0;
  547. ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
  548. if (ret)
  549. return ret < 0 ? ret : 0;
  550. if (event_sched_in(group_event, cpuctx, ctx))
  551. return -EAGAIN;
  552. /*
  553. * Schedule in siblings as one group (if any):
  554. */
  555. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  556. if (event_sched_in(event, cpuctx, ctx)) {
  557. partial_group = event;
  558. goto group_error;
  559. }
  560. }
  561. return 0;
  562. group_error:
  563. /*
  564. * Groups can be scheduled in as one unit only, so undo any
  565. * partial group before returning:
  566. */
  567. list_for_each_entry(event, &group_event->sibling_list, group_entry) {
  568. if (event == partial_group)
  569. break;
  570. event_sched_out(event, cpuctx, ctx);
  571. }
  572. event_sched_out(group_event, cpuctx, ctx);
  573. return -EAGAIN;
  574. }
  575. /*
  576. * Work out whether we can put this event group on the CPU now.
  577. */
  578. static int group_can_go_on(struct perf_event *event,
  579. struct perf_cpu_context *cpuctx,
  580. int can_add_hw)
  581. {
  582. /*
  583. * Groups consisting entirely of software events can always go on.
  584. */
  585. if (event->group_flags & PERF_GROUP_SOFTWARE)
  586. return 1;
  587. /*
  588. * If an exclusive group is already on, no other hardware
  589. * events can go on.
  590. */
  591. if (cpuctx->exclusive)
  592. return 0;
  593. /*
  594. * If this group is exclusive and there are already
  595. * events on the CPU, it can't go on.
  596. */
  597. if (event->attr.exclusive && cpuctx->active_oncpu)
  598. return 0;
  599. /*
  600. * Otherwise, try to add it if all previous groups were able
  601. * to go on.
  602. */
  603. return can_add_hw;
  604. }
  605. static void add_event_to_ctx(struct perf_event *event,
  606. struct perf_event_context *ctx)
  607. {
  608. list_add_event(event, ctx);
  609. event->tstamp_enabled = ctx->time;
  610. event->tstamp_running = ctx->time;
  611. event->tstamp_stopped = ctx->time;
  612. }
  613. /*
  614. * Cross CPU call to install and enable a performance event
  615. *
  616. * Must be called with ctx->mutex held
  617. */
  618. static void __perf_install_in_context(void *info)
  619. {
  620. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  621. struct perf_event *event = info;
  622. struct perf_event_context *ctx = event->ctx;
  623. struct perf_event *leader = event->group_leader;
  624. int err;
  625. /*
  626. * If this is a task context, we need to check whether it is
  627. * the current task context of this cpu. If not it has been
  628. * scheduled out before the smp call arrived.
  629. * Or possibly this is the right context but it isn't
  630. * on this cpu because it had no events.
  631. */
  632. if (ctx->task && cpuctx->task_ctx != ctx) {
  633. if (cpuctx->task_ctx || ctx->task != current)
  634. return;
  635. cpuctx->task_ctx = ctx;
  636. }
  637. raw_spin_lock(&ctx->lock);
  638. ctx->is_active = 1;
  639. update_context_time(ctx);
  640. /*
  641. * Protect the list operation against NMI by disabling the
  642. * events on a global level. NOP for non NMI based events.
  643. */
  644. perf_disable();
  645. add_event_to_ctx(event, ctx);
  646. if (event->cpu != -1 && event->cpu != smp_processor_id())
  647. goto unlock;
  648. /*
  649. * Don't put the event on if it is disabled or if
  650. * it is in a group and the group isn't on.
  651. */
  652. if (event->state != PERF_EVENT_STATE_INACTIVE ||
  653. (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
  654. goto unlock;
  655. /*
  656. * An exclusive event can't go on if there are already active
  657. * hardware events, and no hardware event can go on if there
  658. * is already an exclusive event on.
  659. */
  660. if (!group_can_go_on(event, cpuctx, 1))
  661. err = -EEXIST;
  662. else
  663. err = event_sched_in(event, cpuctx, ctx);
  664. if (err) {
  665. /*
  666. * This event couldn't go on. If it is in a group
  667. * then we have to pull the whole group off.
  668. * If the event group is pinned then put it in error state.
  669. */
  670. if (leader != event)
  671. group_sched_out(leader, cpuctx, ctx);
  672. if (leader->attr.pinned) {
  673. update_group_times(leader);
  674. leader->state = PERF_EVENT_STATE_ERROR;
  675. }
  676. }
  677. if (!err && !ctx->task && cpuctx->max_pertask)
  678. cpuctx->max_pertask--;
  679. unlock:
  680. perf_enable();
  681. raw_spin_unlock(&ctx->lock);
  682. }
  683. /*
  684. * Attach a performance event to a context
  685. *
  686. * First we add the event to the list with the hardware enable bit
  687. * in event->hw_config cleared.
  688. *
  689. * If the event is attached to a task which is on a CPU we use a smp
  690. * call to enable it in the task context. The task might have been
  691. * scheduled away, but we check this in the smp call again.
  692. *
  693. * Must be called with ctx->mutex held.
  694. */
  695. static void
  696. perf_install_in_context(struct perf_event_context *ctx,
  697. struct perf_event *event,
  698. int cpu)
  699. {
  700. struct task_struct *task = ctx->task;
  701. if (!task) {
  702. /*
  703. * Per cpu events are installed via an smp call and
  704. * the install is always successful.
  705. */
  706. smp_call_function_single(cpu, __perf_install_in_context,
  707. event, 1);
  708. return;
  709. }
  710. retry:
  711. task_oncpu_function_call(task, __perf_install_in_context,
  712. event);
  713. raw_spin_lock_irq(&ctx->lock);
  714. /*
  715. * we need to retry the smp call.
  716. */
  717. if (ctx->is_active && list_empty(&event->group_entry)) {
  718. raw_spin_unlock_irq(&ctx->lock);
  719. goto retry;
  720. }
  721. /*
  722. * The lock prevents that this context is scheduled in so we
  723. * can add the event safely, if it the call above did not
  724. * succeed.
  725. */
  726. if (list_empty(&event->group_entry))
  727. add_event_to_ctx(event, ctx);
  728. raw_spin_unlock_irq(&ctx->lock);
  729. }
  730. /*
  731. * Put a event into inactive state and update time fields.
  732. * Enabling the leader of a group effectively enables all
  733. * the group members that aren't explicitly disabled, so we
  734. * have to update their ->tstamp_enabled also.
  735. * Note: this works for group members as well as group leaders
  736. * since the non-leader members' sibling_lists will be empty.
  737. */
  738. static void __perf_event_mark_enabled(struct perf_event *event,
  739. struct perf_event_context *ctx)
  740. {
  741. struct perf_event *sub;
  742. event->state = PERF_EVENT_STATE_INACTIVE;
  743. event->tstamp_enabled = ctx->time - event->total_time_enabled;
  744. list_for_each_entry(sub, &event->sibling_list, group_entry)
  745. if (sub->state >= PERF_EVENT_STATE_INACTIVE)
  746. sub->tstamp_enabled =
  747. ctx->time - sub->total_time_enabled;
  748. }
  749. /*
  750. * Cross CPU call to enable a performance event
  751. */
  752. static void __perf_event_enable(void *info)
  753. {
  754. struct perf_event *event = info;
  755. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  756. struct perf_event_context *ctx = event->ctx;
  757. struct perf_event *leader = event->group_leader;
  758. int err;
  759. /*
  760. * If this is a per-task event, need to check whether this
  761. * event's task is the current task on this cpu.
  762. */
  763. if (ctx->task && cpuctx->task_ctx != ctx) {
  764. if (cpuctx->task_ctx || ctx->task != current)
  765. return;
  766. cpuctx->task_ctx = ctx;
  767. }
  768. raw_spin_lock(&ctx->lock);
  769. ctx->is_active = 1;
  770. update_context_time(ctx);
  771. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  772. goto unlock;
  773. __perf_event_mark_enabled(event, ctx);
  774. if (event->cpu != -1 && event->cpu != smp_processor_id())
  775. goto unlock;
  776. /*
  777. * If the event is in a group and isn't the group leader,
  778. * then don't put it on unless the group is on.
  779. */
  780. if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
  781. goto unlock;
  782. if (!group_can_go_on(event, cpuctx, 1)) {
  783. err = -EEXIST;
  784. } else {
  785. perf_disable();
  786. if (event == leader)
  787. err = group_sched_in(event, cpuctx, ctx);
  788. else
  789. err = event_sched_in(event, cpuctx, ctx);
  790. perf_enable();
  791. }
  792. if (err) {
  793. /*
  794. * If this event can't go on and it's part of a
  795. * group, then the whole group has to come off.
  796. */
  797. if (leader != event)
  798. group_sched_out(leader, cpuctx, ctx);
  799. if (leader->attr.pinned) {
  800. update_group_times(leader);
  801. leader->state = PERF_EVENT_STATE_ERROR;
  802. }
  803. }
  804. unlock:
  805. raw_spin_unlock(&ctx->lock);
  806. }
  807. /*
  808. * Enable a event.
  809. *
  810. * If event->ctx is a cloned context, callers must make sure that
  811. * every task struct that event->ctx->task could possibly point to
  812. * remains valid. This condition is satisfied when called through
  813. * perf_event_for_each_child or perf_event_for_each as described
  814. * for perf_event_disable.
  815. */
  816. void perf_event_enable(struct perf_event *event)
  817. {
  818. struct perf_event_context *ctx = event->ctx;
  819. struct task_struct *task = ctx->task;
  820. if (!task) {
  821. /*
  822. * Enable the event on the cpu that it's on
  823. */
  824. smp_call_function_single(event->cpu, __perf_event_enable,
  825. event, 1);
  826. return;
  827. }
  828. raw_spin_lock_irq(&ctx->lock);
  829. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  830. goto out;
  831. /*
  832. * If the event is in error state, clear that first.
  833. * That way, if we see the event in error state below, we
  834. * know that it has gone back into error state, as distinct
  835. * from the task having been scheduled away before the
  836. * cross-call arrived.
  837. */
  838. if (event->state == PERF_EVENT_STATE_ERROR)
  839. event->state = PERF_EVENT_STATE_OFF;
  840. retry:
  841. raw_spin_unlock_irq(&ctx->lock);
  842. task_oncpu_function_call(task, __perf_event_enable, event);
  843. raw_spin_lock_irq(&ctx->lock);
  844. /*
  845. * If the context is active and the event is still off,
  846. * we need to retry the cross-call.
  847. */
  848. if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
  849. goto retry;
  850. /*
  851. * Since we have the lock this context can't be scheduled
  852. * in, so we can change the state safely.
  853. */
  854. if (event->state == PERF_EVENT_STATE_OFF)
  855. __perf_event_mark_enabled(event, ctx);
  856. out:
  857. raw_spin_unlock_irq(&ctx->lock);
  858. }
  859. static int perf_event_refresh(struct perf_event *event, int refresh)
  860. {
  861. /*
  862. * not supported on inherited events
  863. */
  864. if (event->attr.inherit)
  865. return -EINVAL;
  866. atomic_add(refresh, &event->event_limit);
  867. perf_event_enable(event);
  868. return 0;
  869. }
  870. enum event_type_t {
  871. EVENT_FLEXIBLE = 0x1,
  872. EVENT_PINNED = 0x2,
  873. EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
  874. };
  875. static void ctx_sched_out(struct perf_event_context *ctx,
  876. struct perf_cpu_context *cpuctx,
  877. enum event_type_t event_type)
  878. {
  879. struct perf_event *event;
  880. raw_spin_lock(&ctx->lock);
  881. ctx->is_active = 0;
  882. if (likely(!ctx->nr_events))
  883. goto out;
  884. update_context_time(ctx);
  885. perf_disable();
  886. if (!ctx->nr_active)
  887. goto out_enable;
  888. if (event_type & EVENT_PINNED)
  889. list_for_each_entry(event, &ctx->pinned_groups, group_entry)
  890. group_sched_out(event, cpuctx, ctx);
  891. if (event_type & EVENT_FLEXIBLE)
  892. list_for_each_entry(event, &ctx->flexible_groups, group_entry)
  893. group_sched_out(event, cpuctx, ctx);
  894. out_enable:
  895. perf_enable();
  896. out:
  897. raw_spin_unlock(&ctx->lock);
  898. }
  899. /*
  900. * Test whether two contexts are equivalent, i.e. whether they
  901. * have both been cloned from the same version of the same context
  902. * and they both have the same number of enabled events.
  903. * If the number of enabled events is the same, then the set
  904. * of enabled events should be the same, because these are both
  905. * inherited contexts, therefore we can't access individual events
  906. * in them directly with an fd; we can only enable/disable all
  907. * events via prctl, or enable/disable all events in a family
  908. * via ioctl, which will have the same effect on both contexts.
  909. */
  910. static int context_equiv(struct perf_event_context *ctx1,
  911. struct perf_event_context *ctx2)
  912. {
  913. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  914. && ctx1->parent_gen == ctx2->parent_gen
  915. && !ctx1->pin_count && !ctx2->pin_count;
  916. }
  917. static void __perf_event_sync_stat(struct perf_event *event,
  918. struct perf_event *next_event)
  919. {
  920. u64 value;
  921. if (!event->attr.inherit_stat)
  922. return;
  923. /*
  924. * Update the event value, we cannot use perf_event_read()
  925. * because we're in the middle of a context switch and have IRQs
  926. * disabled, which upsets smp_call_function_single(), however
  927. * we know the event must be on the current CPU, therefore we
  928. * don't need to use it.
  929. */
  930. switch (event->state) {
  931. case PERF_EVENT_STATE_ACTIVE:
  932. event->pmu->read(event);
  933. /* fall-through */
  934. case PERF_EVENT_STATE_INACTIVE:
  935. update_event_times(event);
  936. break;
  937. default:
  938. break;
  939. }
  940. /*
  941. * In order to keep per-task stats reliable we need to flip the event
  942. * values when we flip the contexts.
  943. */
  944. value = atomic64_read(&next_event->count);
  945. value = atomic64_xchg(&event->count, value);
  946. atomic64_set(&next_event->count, value);
  947. swap(event->total_time_enabled, next_event->total_time_enabled);
  948. swap(event->total_time_running, next_event->total_time_running);
  949. /*
  950. * Since we swizzled the values, update the user visible data too.
  951. */
  952. perf_event_update_userpage(event);
  953. perf_event_update_userpage(next_event);
  954. }
  955. #define list_next_entry(pos, member) \
  956. list_entry(pos->member.next, typeof(*pos), member)
  957. static void perf_event_sync_stat(struct perf_event_context *ctx,
  958. struct perf_event_context *next_ctx)
  959. {
  960. struct perf_event *event, *next_event;
  961. if (!ctx->nr_stat)
  962. return;
  963. update_context_time(ctx);
  964. event = list_first_entry(&ctx->event_list,
  965. struct perf_event, event_entry);
  966. next_event = list_first_entry(&next_ctx->event_list,
  967. struct perf_event, event_entry);
  968. while (&event->event_entry != &ctx->event_list &&
  969. &next_event->event_entry != &next_ctx->event_list) {
  970. __perf_event_sync_stat(event, next_event);
  971. event = list_next_entry(event, event_entry);
  972. next_event = list_next_entry(next_event, event_entry);
  973. }
  974. }
  975. /*
  976. * Called from scheduler to remove the events of the current task,
  977. * with interrupts disabled.
  978. *
  979. * We stop each event and update the event value in event->count.
  980. *
  981. * This does not protect us against NMI, but disable()
  982. * sets the disabled bit in the control field of event _before_
  983. * accessing the event control register. If a NMI hits, then it will
  984. * not restart the event.
  985. */
  986. void perf_event_task_sched_out(struct task_struct *task,
  987. struct task_struct *next)
  988. {
  989. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  990. struct perf_event_context *ctx = task->perf_event_ctxp;
  991. struct perf_event_context *next_ctx;
  992. struct perf_event_context *parent;
  993. struct pt_regs *regs;
  994. int do_switch = 1;
  995. regs = task_pt_regs(task);
  996. perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
  997. if (likely(!ctx || !cpuctx->task_ctx))
  998. return;
  999. rcu_read_lock();
  1000. parent = rcu_dereference(ctx->parent_ctx);
  1001. next_ctx = next->perf_event_ctxp;
  1002. if (parent && next_ctx &&
  1003. rcu_dereference(next_ctx->parent_ctx) == parent) {
  1004. /*
  1005. * Looks like the two contexts are clones, so we might be
  1006. * able to optimize the context switch. We lock both
  1007. * contexts and check that they are clones under the
  1008. * lock (including re-checking that neither has been
  1009. * uncloned in the meantime). It doesn't matter which
  1010. * order we take the locks because no other cpu could
  1011. * be trying to lock both of these tasks.
  1012. */
  1013. raw_spin_lock(&ctx->lock);
  1014. raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  1015. if (context_equiv(ctx, next_ctx)) {
  1016. /*
  1017. * XXX do we need a memory barrier of sorts
  1018. * wrt to rcu_dereference() of perf_event_ctxp
  1019. */
  1020. task->perf_event_ctxp = next_ctx;
  1021. next->perf_event_ctxp = ctx;
  1022. ctx->task = next;
  1023. next_ctx->task = task;
  1024. do_switch = 0;
  1025. perf_event_sync_stat(ctx, next_ctx);
  1026. }
  1027. raw_spin_unlock(&next_ctx->lock);
  1028. raw_spin_unlock(&ctx->lock);
  1029. }
  1030. rcu_read_unlock();
  1031. if (do_switch) {
  1032. ctx_sched_out(ctx, cpuctx, EVENT_ALL);
  1033. cpuctx->task_ctx = NULL;
  1034. }
  1035. }
  1036. static void task_ctx_sched_out(struct perf_event_context *ctx,
  1037. enum event_type_t event_type)
  1038. {
  1039. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  1040. if (!cpuctx->task_ctx)
  1041. return;
  1042. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  1043. return;
  1044. ctx_sched_out(ctx, cpuctx, event_type);
  1045. cpuctx->task_ctx = NULL;
  1046. }
  1047. /*
  1048. * Called with IRQs disabled
  1049. */
  1050. static void __perf_event_task_sched_out(struct perf_event_context *ctx)
  1051. {
  1052. task_ctx_sched_out(ctx, EVENT_ALL);
  1053. }
  1054. /*
  1055. * Called with IRQs disabled
  1056. */
  1057. static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
  1058. enum event_type_t event_type)
  1059. {
  1060. ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
  1061. }
  1062. static void
  1063. ctx_pinned_sched_in(struct perf_event_context *ctx,
  1064. struct perf_cpu_context *cpuctx)
  1065. {
  1066. struct perf_event *event;
  1067. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  1068. if (event->state <= PERF_EVENT_STATE_OFF)
  1069. continue;
  1070. if (event->cpu != -1 && event->cpu != smp_processor_id())
  1071. continue;
  1072. if (group_can_go_on(event, cpuctx, 1))
  1073. group_sched_in(event, cpuctx, ctx);
  1074. /*
  1075. * If this pinned group hasn't been scheduled,
  1076. * put it in error state.
  1077. */
  1078. if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1079. update_group_times(event);
  1080. event->state = PERF_EVENT_STATE_ERROR;
  1081. }
  1082. }
  1083. }
  1084. static void
  1085. ctx_flexible_sched_in(struct perf_event_context *ctx,
  1086. struct perf_cpu_context *cpuctx)
  1087. {
  1088. struct perf_event *event;
  1089. int can_add_hw = 1;
  1090. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  1091. /* Ignore events in OFF or ERROR state */
  1092. if (event->state <= PERF_EVENT_STATE_OFF)
  1093. continue;
  1094. /*
  1095. * Listen to the 'cpu' scheduling filter constraint
  1096. * of events:
  1097. */
  1098. if (event->cpu != -1 && event->cpu != smp_processor_id())
  1099. continue;
  1100. if (group_can_go_on(event, cpuctx, can_add_hw))
  1101. if (group_sched_in(event, cpuctx, ctx))
  1102. can_add_hw = 0;
  1103. }
  1104. }
  1105. static void
  1106. ctx_sched_in(struct perf_event_context *ctx,
  1107. struct perf_cpu_context *cpuctx,
  1108. enum event_type_t event_type)
  1109. {
  1110. raw_spin_lock(&ctx->lock);
  1111. ctx->is_active = 1;
  1112. if (likely(!ctx->nr_events))
  1113. goto out;
  1114. ctx->timestamp = perf_clock();
  1115. perf_disable();
  1116. /*
  1117. * First go through the list and put on any pinned groups
  1118. * in order to give them the best chance of going on.
  1119. */
  1120. if (event_type & EVENT_PINNED)
  1121. ctx_pinned_sched_in(ctx, cpuctx);
  1122. /* Then walk through the lower prio flexible groups */
  1123. if (event_type & EVENT_FLEXIBLE)
  1124. ctx_flexible_sched_in(ctx, cpuctx);
  1125. perf_enable();
  1126. out:
  1127. raw_spin_unlock(&ctx->lock);
  1128. }
  1129. static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
  1130. enum event_type_t event_type)
  1131. {
  1132. struct perf_event_context *ctx = &cpuctx->ctx;
  1133. ctx_sched_in(ctx, cpuctx, event_type);
  1134. }
  1135. static void task_ctx_sched_in(struct task_struct *task,
  1136. enum event_type_t event_type)
  1137. {
  1138. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  1139. struct perf_event_context *ctx = task->perf_event_ctxp;
  1140. if (likely(!ctx))
  1141. return;
  1142. if (cpuctx->task_ctx == ctx)
  1143. return;
  1144. ctx_sched_in(ctx, cpuctx, event_type);
  1145. cpuctx->task_ctx = ctx;
  1146. }
  1147. /*
  1148. * Called from scheduler to add the events of the current task
  1149. * with interrupts disabled.
  1150. *
  1151. * We restore the event value and then enable it.
  1152. *
  1153. * This does not protect us against NMI, but enable()
  1154. * sets the enabled bit in the control field of event _before_
  1155. * accessing the event control register. If a NMI hits, then it will
  1156. * keep the event running.
  1157. */
  1158. void perf_event_task_sched_in(struct task_struct *task)
  1159. {
  1160. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  1161. struct perf_event_context *ctx = task->perf_event_ctxp;
  1162. if (likely(!ctx))
  1163. return;
  1164. if (cpuctx->task_ctx == ctx)
  1165. return;
  1166. perf_disable();
  1167. /*
  1168. * We want to keep the following priority order:
  1169. * cpu pinned (that don't need to move), task pinned,
  1170. * cpu flexible, task flexible.
  1171. */
  1172. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1173. ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
  1174. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
  1175. ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
  1176. cpuctx->task_ctx = ctx;
  1177. perf_enable();
  1178. }
  1179. #define MAX_INTERRUPTS (~0ULL)
  1180. static void perf_log_throttle(struct perf_event *event, int enable);
  1181. static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
  1182. {
  1183. u64 frequency = event->attr.sample_freq;
  1184. u64 sec = NSEC_PER_SEC;
  1185. u64 divisor, dividend;
  1186. int count_fls, nsec_fls, frequency_fls, sec_fls;
  1187. count_fls = fls64(count);
  1188. nsec_fls = fls64(nsec);
  1189. frequency_fls = fls64(frequency);
  1190. sec_fls = 30;
  1191. /*
  1192. * We got @count in @nsec, with a target of sample_freq HZ
  1193. * the target period becomes:
  1194. *
  1195. * @count * 10^9
  1196. * period = -------------------
  1197. * @nsec * sample_freq
  1198. *
  1199. */
  1200. /*
  1201. * Reduce accuracy by one bit such that @a and @b converge
  1202. * to a similar magnitude.
  1203. */
  1204. #define REDUCE_FLS(a, b) \
  1205. do { \
  1206. if (a##_fls > b##_fls) { \
  1207. a >>= 1; \
  1208. a##_fls--; \
  1209. } else { \
  1210. b >>= 1; \
  1211. b##_fls--; \
  1212. } \
  1213. } while (0)
  1214. /*
  1215. * Reduce accuracy until either term fits in a u64, then proceed with
  1216. * the other, so that finally we can do a u64/u64 division.
  1217. */
  1218. while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
  1219. REDUCE_FLS(nsec, frequency);
  1220. REDUCE_FLS(sec, count);
  1221. }
  1222. if (count_fls + sec_fls > 64) {
  1223. divisor = nsec * frequency;
  1224. while (count_fls + sec_fls > 64) {
  1225. REDUCE_FLS(count, sec);
  1226. divisor >>= 1;
  1227. }
  1228. dividend = count * sec;
  1229. } else {
  1230. dividend = count * sec;
  1231. while (nsec_fls + frequency_fls > 64) {
  1232. REDUCE_FLS(nsec, frequency);
  1233. dividend >>= 1;
  1234. }
  1235. divisor = nsec * frequency;
  1236. }
  1237. return div64_u64(dividend, divisor);
  1238. }
  1239. static void perf_event_stop(struct perf_event *event)
  1240. {
  1241. if (!event->pmu->stop)
  1242. return event->pmu->disable(event);
  1243. return event->pmu->stop(event);
  1244. }
  1245. static int perf_event_start(struct perf_event *event)
  1246. {
  1247. if (!event->pmu->start)
  1248. return event->pmu->enable(event);
  1249. return event->pmu->start(event);
  1250. }
  1251. static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
  1252. {
  1253. struct hw_perf_event *hwc = &event->hw;
  1254. u64 period, sample_period;
  1255. s64 delta;
  1256. period = perf_calculate_period(event, nsec, count);
  1257. delta = (s64)(period - hwc->sample_period);
  1258. delta = (delta + 7) / 8; /* low pass filter */
  1259. sample_period = hwc->sample_period + delta;
  1260. if (!sample_period)
  1261. sample_period = 1;
  1262. hwc->sample_period = sample_period;
  1263. if (atomic64_read(&hwc->period_left) > 8*sample_period) {
  1264. perf_disable();
  1265. perf_event_stop(event);
  1266. atomic64_set(&hwc->period_left, 0);
  1267. perf_event_start(event);
  1268. perf_enable();
  1269. }
  1270. }
  1271. static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
  1272. {
  1273. struct perf_event *event;
  1274. struct hw_perf_event *hwc;
  1275. u64 interrupts, now;
  1276. s64 delta;
  1277. raw_spin_lock(&ctx->lock);
  1278. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  1279. if (event->state != PERF_EVENT_STATE_ACTIVE)
  1280. continue;
  1281. if (event->cpu != -1 && event->cpu != smp_processor_id())
  1282. continue;
  1283. hwc = &event->hw;
  1284. interrupts = hwc->interrupts;
  1285. hwc->interrupts = 0;
  1286. /*
  1287. * unthrottle events on the tick
  1288. */
  1289. if (interrupts == MAX_INTERRUPTS) {
  1290. perf_log_throttle(event, 1);
  1291. perf_disable();
  1292. event->pmu->unthrottle(event);
  1293. perf_enable();
  1294. }
  1295. if (!event->attr.freq || !event->attr.sample_freq)
  1296. continue;
  1297. perf_disable();
  1298. event->pmu->read(event);
  1299. now = atomic64_read(&event->count);
  1300. delta = now - hwc->freq_count_stamp;
  1301. hwc->freq_count_stamp = now;
  1302. if (delta > 0)
  1303. perf_adjust_period(event, TICK_NSEC, delta);
  1304. perf_enable();
  1305. }
  1306. raw_spin_unlock(&ctx->lock);
  1307. }
  1308. /*
  1309. * Round-robin a context's events:
  1310. */
  1311. static void rotate_ctx(struct perf_event_context *ctx)
  1312. {
  1313. raw_spin_lock(&ctx->lock);
  1314. /* Rotate the first entry last of non-pinned groups */
  1315. list_rotate_left(&ctx->flexible_groups);
  1316. raw_spin_unlock(&ctx->lock);
  1317. }
  1318. void perf_event_task_tick(struct task_struct *curr)
  1319. {
  1320. struct perf_cpu_context *cpuctx;
  1321. struct perf_event_context *ctx;
  1322. int rotate = 0;
  1323. if (!atomic_read(&nr_events))
  1324. return;
  1325. cpuctx = &__get_cpu_var(perf_cpu_context);
  1326. if (cpuctx->ctx.nr_events &&
  1327. cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
  1328. rotate = 1;
  1329. ctx = curr->perf_event_ctxp;
  1330. if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
  1331. rotate = 1;
  1332. perf_ctx_adjust_freq(&cpuctx->ctx);
  1333. if (ctx)
  1334. perf_ctx_adjust_freq(ctx);
  1335. if (!rotate)
  1336. return;
  1337. perf_disable();
  1338. cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
  1339. if (ctx)
  1340. task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
  1341. rotate_ctx(&cpuctx->ctx);
  1342. if (ctx)
  1343. rotate_ctx(ctx);
  1344. cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
  1345. if (ctx)
  1346. task_ctx_sched_in(curr, EVENT_FLEXIBLE);
  1347. perf_enable();
  1348. }
  1349. static int event_enable_on_exec(struct perf_event *event,
  1350. struct perf_event_context *ctx)
  1351. {
  1352. if (!event->attr.enable_on_exec)
  1353. return 0;
  1354. event->attr.enable_on_exec = 0;
  1355. if (event->state >= PERF_EVENT_STATE_INACTIVE)
  1356. return 0;
  1357. __perf_event_mark_enabled(event, ctx);
  1358. return 1;
  1359. }
  1360. /*
  1361. * Enable all of a task's events that have been marked enable-on-exec.
  1362. * This expects task == current.
  1363. */
  1364. static void perf_event_enable_on_exec(struct task_struct *task)
  1365. {
  1366. struct perf_event_context *ctx;
  1367. struct perf_event *event;
  1368. unsigned long flags;
  1369. int enabled = 0;
  1370. int ret;
  1371. local_irq_save(flags);
  1372. ctx = task->perf_event_ctxp;
  1373. if (!ctx || !ctx->nr_events)
  1374. goto out;
  1375. __perf_event_task_sched_out(ctx);
  1376. raw_spin_lock(&ctx->lock);
  1377. list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
  1378. ret = event_enable_on_exec(event, ctx);
  1379. if (ret)
  1380. enabled = 1;
  1381. }
  1382. list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
  1383. ret = event_enable_on_exec(event, ctx);
  1384. if (ret)
  1385. enabled = 1;
  1386. }
  1387. /*
  1388. * Unclone this context if we enabled any event.
  1389. */
  1390. if (enabled)
  1391. unclone_ctx(ctx);
  1392. raw_spin_unlock(&ctx->lock);
  1393. perf_event_task_sched_in(task);
  1394. out:
  1395. local_irq_restore(flags);
  1396. }
  1397. /*
  1398. * Cross CPU call to read the hardware event
  1399. */
  1400. static void __perf_event_read(void *info)
  1401. {
  1402. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  1403. struct perf_event *event = info;
  1404. struct perf_event_context *ctx = event->ctx;
  1405. /*
  1406. * If this is a task context, we need to check whether it is
  1407. * the current task context of this cpu. If not it has been
  1408. * scheduled out before the smp call arrived. In that case
  1409. * event->count would have been updated to a recent sample
  1410. * when the event was scheduled out.
  1411. */
  1412. if (ctx->task && cpuctx->task_ctx != ctx)
  1413. return;
  1414. raw_spin_lock(&ctx->lock);
  1415. update_context_time(ctx);
  1416. update_event_times(event);
  1417. raw_spin_unlock(&ctx->lock);
  1418. event->pmu->read(event);
  1419. }
  1420. static u64 perf_event_read(struct perf_event *event)
  1421. {
  1422. /*
  1423. * If event is enabled and currently active on a CPU, update the
  1424. * value in the event structure:
  1425. */
  1426. if (event->state == PERF_EVENT_STATE_ACTIVE) {
  1427. smp_call_function_single(event->oncpu,
  1428. __perf_event_read, event, 1);
  1429. } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
  1430. struct perf_event_context *ctx = event->ctx;
  1431. unsigned long flags;
  1432. raw_spin_lock_irqsave(&ctx->lock, flags);
  1433. update_context_time(ctx);
  1434. update_event_times(event);
  1435. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  1436. }
  1437. return atomic64_read(&event->count);
  1438. }
  1439. /*
  1440. * Initialize the perf_event context in a task_struct:
  1441. */
  1442. static void
  1443. __perf_event_init_context(struct perf_event_context *ctx,
  1444. struct task_struct *task)
  1445. {
  1446. raw_spin_lock_init(&ctx->lock);
  1447. mutex_init(&ctx->mutex);
  1448. INIT_LIST_HEAD(&ctx->pinned_groups);
  1449. INIT_LIST_HEAD(&ctx->flexible_groups);
  1450. INIT_LIST_HEAD(&ctx->event_list);
  1451. atomic_set(&ctx->refcount, 1);
  1452. ctx->task = task;
  1453. }
  1454. static struct perf_event_context *find_get_context(pid_t pid, int cpu)
  1455. {
  1456. struct perf_event_context *ctx;
  1457. struct perf_cpu_context *cpuctx;
  1458. struct task_struct *task;
  1459. unsigned long flags;
  1460. int err;
  1461. if (pid == -1 && cpu != -1) {
  1462. /* Must be root to operate on a CPU event: */
  1463. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  1464. return ERR_PTR(-EACCES);
  1465. if (cpu < 0 || cpu >= nr_cpumask_bits)
  1466. return ERR_PTR(-EINVAL);
  1467. /*
  1468. * We could be clever and allow to attach a event to an
  1469. * offline CPU and activate it when the CPU comes up, but
  1470. * that's for later.
  1471. */
  1472. if (!cpu_online(cpu))
  1473. return ERR_PTR(-ENODEV);
  1474. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1475. ctx = &cpuctx->ctx;
  1476. get_ctx(ctx);
  1477. return ctx;
  1478. }
  1479. rcu_read_lock();
  1480. if (!pid)
  1481. task = current;
  1482. else
  1483. task = find_task_by_vpid(pid);
  1484. if (task)
  1485. get_task_struct(task);
  1486. rcu_read_unlock();
  1487. if (!task)
  1488. return ERR_PTR(-ESRCH);
  1489. /*
  1490. * Can't attach events to a dying task.
  1491. */
  1492. err = -ESRCH;
  1493. if (task->flags & PF_EXITING)
  1494. goto errout;
  1495. /* Reuse ptrace permission checks for now. */
  1496. err = -EACCES;
  1497. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1498. goto errout;
  1499. retry:
  1500. ctx = perf_lock_task_context(task, &flags);
  1501. if (ctx) {
  1502. unclone_ctx(ctx);
  1503. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  1504. }
  1505. if (!ctx) {
  1506. ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
  1507. err = -ENOMEM;
  1508. if (!ctx)
  1509. goto errout;
  1510. __perf_event_init_context(ctx, task);
  1511. get_ctx(ctx);
  1512. if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
  1513. /*
  1514. * We raced with some other task; use
  1515. * the context they set.
  1516. */
  1517. kfree(ctx);
  1518. goto retry;
  1519. }
  1520. get_task_struct(task);
  1521. }
  1522. put_task_struct(task);
  1523. return ctx;
  1524. errout:
  1525. put_task_struct(task);
  1526. return ERR_PTR(err);
  1527. }
  1528. static void perf_event_free_filter(struct perf_event *event);
  1529. static void free_event_rcu(struct rcu_head *head)
  1530. {
  1531. struct perf_event *event;
  1532. event = container_of(head, struct perf_event, rcu_head);
  1533. if (event->ns)
  1534. put_pid_ns(event->ns);
  1535. perf_event_free_filter(event);
  1536. kfree(event);
  1537. }
  1538. static void perf_pending_sync(struct perf_event *event);
  1539. static void free_event(struct perf_event *event)
  1540. {
  1541. perf_pending_sync(event);
  1542. if (!event->parent) {
  1543. atomic_dec(&nr_events);
  1544. if (event->attr.mmap)
  1545. atomic_dec(&nr_mmap_events);
  1546. if (event->attr.comm)
  1547. atomic_dec(&nr_comm_events);
  1548. if (event->attr.task)
  1549. atomic_dec(&nr_task_events);
  1550. }
  1551. if (event->output) {
  1552. fput(event->output->filp);
  1553. event->output = NULL;
  1554. }
  1555. if (event->destroy)
  1556. event->destroy(event);
  1557. put_ctx(event->ctx);
  1558. call_rcu(&event->rcu_head, free_event_rcu);
  1559. }
  1560. int perf_event_release_kernel(struct perf_event *event)
  1561. {
  1562. struct perf_event_context *ctx = event->ctx;
  1563. WARN_ON_ONCE(ctx->parent_ctx);
  1564. mutex_lock(&ctx->mutex);
  1565. perf_event_remove_from_context(event);
  1566. mutex_unlock(&ctx->mutex);
  1567. mutex_lock(&event->owner->perf_event_mutex);
  1568. list_del_init(&event->owner_entry);
  1569. mutex_unlock(&event->owner->perf_event_mutex);
  1570. put_task_struct(event->owner);
  1571. free_event(event);
  1572. return 0;
  1573. }
  1574. EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  1575. /*
  1576. * Called when the last reference to the file is gone.
  1577. */
  1578. static int perf_release(struct inode *inode, struct file *file)
  1579. {
  1580. struct perf_event *event = file->private_data;
  1581. file->private_data = NULL;
  1582. return perf_event_release_kernel(event);
  1583. }
  1584. static int perf_event_read_size(struct perf_event *event)
  1585. {
  1586. int entry = sizeof(u64); /* value */
  1587. int size = 0;
  1588. int nr = 1;
  1589. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1590. size += sizeof(u64);
  1591. if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1592. size += sizeof(u64);
  1593. if (event->attr.read_format & PERF_FORMAT_ID)
  1594. entry += sizeof(u64);
  1595. if (event->attr.read_format & PERF_FORMAT_GROUP) {
  1596. nr += event->group_leader->nr_siblings;
  1597. size += sizeof(u64);
  1598. }
  1599. size += entry * nr;
  1600. return size;
  1601. }
  1602. u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
  1603. {
  1604. struct perf_event *child;
  1605. u64 total = 0;
  1606. *enabled = 0;
  1607. *running = 0;
  1608. mutex_lock(&event->child_mutex);
  1609. total += perf_event_read(event);
  1610. *enabled += event->total_time_enabled +
  1611. atomic64_read(&event->child_total_time_enabled);
  1612. *running += event->total_time_running +
  1613. atomic64_read(&event->child_total_time_running);
  1614. list_for_each_entry(child, &event->child_list, child_list) {
  1615. total += perf_event_read(child);
  1616. *enabled += child->total_time_enabled;
  1617. *running += child->total_time_running;
  1618. }
  1619. mutex_unlock(&event->child_mutex);
  1620. return total;
  1621. }
  1622. EXPORT_SYMBOL_GPL(perf_event_read_value);
  1623. static int perf_event_read_group(struct perf_event *event,
  1624. u64 read_format, char __user *buf)
  1625. {
  1626. struct perf_event *leader = event->group_leader, *sub;
  1627. int n = 0, size = 0, ret = -EFAULT;
  1628. struct perf_event_context *ctx = leader->ctx;
  1629. u64 values[5];
  1630. u64 count, enabled, running;
  1631. mutex_lock(&ctx->mutex);
  1632. count = perf_event_read_value(leader, &enabled, &running);
  1633. values[n++] = 1 + leader->nr_siblings;
  1634. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1635. values[n++] = enabled;
  1636. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1637. values[n++] = running;
  1638. values[n++] = count;
  1639. if (read_format & PERF_FORMAT_ID)
  1640. values[n++] = primary_event_id(leader);
  1641. size = n * sizeof(u64);
  1642. if (copy_to_user(buf, values, size))
  1643. goto unlock;
  1644. ret = size;
  1645. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  1646. n = 0;
  1647. values[n++] = perf_event_read_value(sub, &enabled, &running);
  1648. if (read_format & PERF_FORMAT_ID)
  1649. values[n++] = primary_event_id(sub);
  1650. size = n * sizeof(u64);
  1651. if (copy_to_user(buf + ret, values, size)) {
  1652. ret = -EFAULT;
  1653. goto unlock;
  1654. }
  1655. ret += size;
  1656. }
  1657. unlock:
  1658. mutex_unlock(&ctx->mutex);
  1659. return ret;
  1660. }
  1661. static int perf_event_read_one(struct perf_event *event,
  1662. u64 read_format, char __user *buf)
  1663. {
  1664. u64 enabled, running;
  1665. u64 values[4];
  1666. int n = 0;
  1667. values[n++] = perf_event_read_value(event, &enabled, &running);
  1668. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1669. values[n++] = enabled;
  1670. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1671. values[n++] = running;
  1672. if (read_format & PERF_FORMAT_ID)
  1673. values[n++] = primary_event_id(event);
  1674. if (copy_to_user(buf, values, n * sizeof(u64)))
  1675. return -EFAULT;
  1676. return n * sizeof(u64);
  1677. }
  1678. /*
  1679. * Read the performance event - simple non blocking version for now
  1680. */
  1681. static ssize_t
  1682. perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
  1683. {
  1684. u64 read_format = event->attr.read_format;
  1685. int ret;
  1686. /*
  1687. * Return end-of-file for a read on a event that is in
  1688. * error state (i.e. because it was pinned but it couldn't be
  1689. * scheduled on to the CPU at some point).
  1690. */
  1691. if (event->state == PERF_EVENT_STATE_ERROR)
  1692. return 0;
  1693. if (count < perf_event_read_size(event))
  1694. return -ENOSPC;
  1695. WARN_ON_ONCE(event->ctx->parent_ctx);
  1696. if (read_format & PERF_FORMAT_GROUP)
  1697. ret = perf_event_read_group(event, read_format, buf);
  1698. else
  1699. ret = perf_event_read_one(event, read_format, buf);
  1700. return ret;
  1701. }
  1702. static ssize_t
  1703. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1704. {
  1705. struct perf_event *event = file->private_data;
  1706. return perf_read_hw(event, buf, count);
  1707. }
  1708. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1709. {
  1710. struct perf_event *event = file->private_data;
  1711. struct perf_mmap_data *data;
  1712. unsigned int events = POLL_HUP;
  1713. rcu_read_lock();
  1714. data = rcu_dereference(event->data);
  1715. if (data)
  1716. events = atomic_xchg(&data->poll, 0);
  1717. rcu_read_unlock();
  1718. poll_wait(file, &event->waitq, wait);
  1719. return events;
  1720. }
  1721. static void perf_event_reset(struct perf_event *event)
  1722. {
  1723. (void)perf_event_read(event);
  1724. atomic64_set(&event->count, 0);
  1725. perf_event_update_userpage(event);
  1726. }
  1727. /*
  1728. * Holding the top-level event's child_mutex means that any
  1729. * descendant process that has inherited this event will block
  1730. * in sync_child_event if it goes to exit, thus satisfying the
  1731. * task existence requirements of perf_event_enable/disable.
  1732. */
  1733. static void perf_event_for_each_child(struct perf_event *event,
  1734. void (*func)(struct perf_event *))
  1735. {
  1736. struct perf_event *child;
  1737. WARN_ON_ONCE(event->ctx->parent_ctx);
  1738. mutex_lock(&event->child_mutex);
  1739. func(event);
  1740. list_for_each_entry(child, &event->child_list, child_list)
  1741. func(child);
  1742. mutex_unlock(&event->child_mutex);
  1743. }
  1744. static void perf_event_for_each(struct perf_event *event,
  1745. void (*func)(struct perf_event *))
  1746. {
  1747. struct perf_event_context *ctx = event->ctx;
  1748. struct perf_event *sibling;
  1749. WARN_ON_ONCE(ctx->parent_ctx);
  1750. mutex_lock(&ctx->mutex);
  1751. event = event->group_leader;
  1752. perf_event_for_each_child(event, func);
  1753. func(event);
  1754. list_for_each_entry(sibling, &event->sibling_list, group_entry)
  1755. perf_event_for_each_child(event, func);
  1756. mutex_unlock(&ctx->mutex);
  1757. }
  1758. static int perf_event_period(struct perf_event *event, u64 __user *arg)
  1759. {
  1760. struct perf_event_context *ctx = event->ctx;
  1761. unsigned long size;
  1762. int ret = 0;
  1763. u64 value;
  1764. if (!event->attr.sample_period)
  1765. return -EINVAL;
  1766. size = copy_from_user(&value, arg, sizeof(value));
  1767. if (size != sizeof(value))
  1768. return -EFAULT;
  1769. if (!value)
  1770. return -EINVAL;
  1771. raw_spin_lock_irq(&ctx->lock);
  1772. if (event->attr.freq) {
  1773. if (value > sysctl_perf_event_sample_rate) {
  1774. ret = -EINVAL;
  1775. goto unlock;
  1776. }
  1777. event->attr.sample_freq = value;
  1778. } else {
  1779. event->attr.sample_period = value;
  1780. event->hw.sample_period = value;
  1781. }
  1782. unlock:
  1783. raw_spin_unlock_irq(&ctx->lock);
  1784. return ret;
  1785. }
  1786. static int perf_event_set_output(struct perf_event *event, int output_fd);
  1787. static int perf_event_set_filter(struct perf_event *event, void __user *arg);
  1788. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1789. {
  1790. struct perf_event *event = file->private_data;
  1791. void (*func)(struct perf_event *);
  1792. u32 flags = arg;
  1793. switch (cmd) {
  1794. case PERF_EVENT_IOC_ENABLE:
  1795. func = perf_event_enable;
  1796. break;
  1797. case PERF_EVENT_IOC_DISABLE:
  1798. func = perf_event_disable;
  1799. break;
  1800. case PERF_EVENT_IOC_RESET:
  1801. func = perf_event_reset;
  1802. break;
  1803. case PERF_EVENT_IOC_REFRESH:
  1804. return perf_event_refresh(event, arg);
  1805. case PERF_EVENT_IOC_PERIOD:
  1806. return perf_event_period(event, (u64 __user *)arg);
  1807. case PERF_EVENT_IOC_SET_OUTPUT:
  1808. return perf_event_set_output(event, arg);
  1809. case PERF_EVENT_IOC_SET_FILTER:
  1810. return perf_event_set_filter(event, (void __user *)arg);
  1811. default:
  1812. return -ENOTTY;
  1813. }
  1814. if (flags & PERF_IOC_FLAG_GROUP)
  1815. perf_event_for_each(event, func);
  1816. else
  1817. perf_event_for_each_child(event, func);
  1818. return 0;
  1819. }
  1820. int perf_event_task_enable(void)
  1821. {
  1822. struct perf_event *event;
  1823. mutex_lock(&current->perf_event_mutex);
  1824. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  1825. perf_event_for_each_child(event, perf_event_enable);
  1826. mutex_unlock(&current->perf_event_mutex);
  1827. return 0;
  1828. }
  1829. int perf_event_task_disable(void)
  1830. {
  1831. struct perf_event *event;
  1832. mutex_lock(&current->perf_event_mutex);
  1833. list_for_each_entry(event, &current->perf_event_list, owner_entry)
  1834. perf_event_for_each_child(event, perf_event_disable);
  1835. mutex_unlock(&current->perf_event_mutex);
  1836. return 0;
  1837. }
  1838. #ifndef PERF_EVENT_INDEX_OFFSET
  1839. # define PERF_EVENT_INDEX_OFFSET 0
  1840. #endif
  1841. static int perf_event_index(struct perf_event *event)
  1842. {
  1843. if (event->state != PERF_EVENT_STATE_ACTIVE)
  1844. return 0;
  1845. return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
  1846. }
  1847. /*
  1848. * Callers need to ensure there can be no nesting of this function, otherwise
  1849. * the seqlock logic goes bad. We can not serialize this because the arch
  1850. * code calls this from NMI context.
  1851. */
  1852. void perf_event_update_userpage(struct perf_event *event)
  1853. {
  1854. struct perf_event_mmap_page *userpg;
  1855. struct perf_mmap_data *data;
  1856. rcu_read_lock();
  1857. data = rcu_dereference(event->data);
  1858. if (!data)
  1859. goto unlock;
  1860. userpg = data->user_page;
  1861. /*
  1862. * Disable preemption so as to not let the corresponding user-space
  1863. * spin too long if we get preempted.
  1864. */
  1865. preempt_disable();
  1866. ++userpg->lock;
  1867. barrier();
  1868. userpg->index = perf_event_index(event);
  1869. userpg->offset = atomic64_read(&event->count);
  1870. if (event->state == PERF_EVENT_STATE_ACTIVE)
  1871. userpg->offset -= atomic64_read(&event->hw.prev_count);
  1872. userpg->time_enabled = event->total_time_enabled +
  1873. atomic64_read(&event->child_total_time_enabled);
  1874. userpg->time_running = event->total_time_running +
  1875. atomic64_read(&event->child_total_time_running);
  1876. barrier();
  1877. ++userpg->lock;
  1878. preempt_enable();
  1879. unlock:
  1880. rcu_read_unlock();
  1881. }
  1882. static unsigned long perf_data_size(struct perf_mmap_data *data)
  1883. {
  1884. return data->nr_pages << (PAGE_SHIFT + data->data_order);
  1885. }
  1886. #ifndef CONFIG_PERF_USE_VMALLOC
  1887. /*
  1888. * Back perf_mmap() with regular GFP_KERNEL-0 pages.
  1889. */
  1890. static struct page *
  1891. perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
  1892. {
  1893. if (pgoff > data->nr_pages)
  1894. return NULL;
  1895. if (pgoff == 0)
  1896. return virt_to_page(data->user_page);
  1897. return virt_to_page(data->data_pages[pgoff - 1]);
  1898. }
  1899. static struct perf_mmap_data *
  1900. perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
  1901. {
  1902. struct perf_mmap_data *data;
  1903. unsigned long size;
  1904. int i;
  1905. WARN_ON(atomic_read(&event->mmap_count));
  1906. size = sizeof(struct perf_mmap_data);
  1907. size += nr_pages * sizeof(void *);
  1908. data = kzalloc(size, GFP_KERNEL);
  1909. if (!data)
  1910. goto fail;
  1911. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1912. if (!data->user_page)
  1913. goto fail_user_page;
  1914. for (i = 0; i < nr_pages; i++) {
  1915. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1916. if (!data->data_pages[i])
  1917. goto fail_data_pages;
  1918. }
  1919. data->data_order = 0;
  1920. data->nr_pages = nr_pages;
  1921. return data;
  1922. fail_data_pages:
  1923. for (i--; i >= 0; i--)
  1924. free_page((unsigned long)data->data_pages[i]);
  1925. free_page((unsigned long)data->user_page);
  1926. fail_user_page:
  1927. kfree(data);
  1928. fail:
  1929. return NULL;
  1930. }
  1931. static void perf_mmap_free_page(unsigned long addr)
  1932. {
  1933. struct page *page = virt_to_page((void *)addr);
  1934. page->mapping = NULL;
  1935. __free_page(page);
  1936. }
  1937. static void perf_mmap_data_free(struct perf_mmap_data *data)
  1938. {
  1939. int i;
  1940. perf_mmap_free_page((unsigned long)data->user_page);
  1941. for (i = 0; i < data->nr_pages; i++)
  1942. perf_mmap_free_page((unsigned long)data->data_pages[i]);
  1943. kfree(data);
  1944. }
  1945. #else
  1946. /*
  1947. * Back perf_mmap() with vmalloc memory.
  1948. *
  1949. * Required for architectures that have d-cache aliasing issues.
  1950. */
  1951. static struct page *
  1952. perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
  1953. {
  1954. if (pgoff > (1UL << data->data_order))
  1955. return NULL;
  1956. return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
  1957. }
  1958. static void perf_mmap_unmark_page(void *addr)
  1959. {
  1960. struct page *page = vmalloc_to_page(addr);
  1961. page->mapping = NULL;
  1962. }
  1963. static void perf_mmap_data_free_work(struct work_struct *work)
  1964. {
  1965. struct perf_mmap_data *data;
  1966. void *base;
  1967. int i, nr;
  1968. data = container_of(work, struct perf_mmap_data, work);
  1969. nr = 1 << data->data_order;
  1970. base = data->user_page;
  1971. for (i = 0; i < nr + 1; i++)
  1972. perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  1973. vfree(base);
  1974. kfree(data);
  1975. }
  1976. static void perf_mmap_data_free(struct perf_mmap_data *data)
  1977. {
  1978. schedule_work(&data->work);
  1979. }
  1980. static struct perf_mmap_data *
  1981. perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
  1982. {
  1983. struct perf_mmap_data *data;
  1984. unsigned long size;
  1985. void *all_buf;
  1986. WARN_ON(atomic_read(&event->mmap_count));
  1987. size = sizeof(struct perf_mmap_data);
  1988. size += sizeof(void *);
  1989. data = kzalloc(size, GFP_KERNEL);
  1990. if (!data)
  1991. goto fail;
  1992. INIT_WORK(&data->work, perf_mmap_data_free_work);
  1993. all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
  1994. if (!all_buf)
  1995. goto fail_all_buf;
  1996. data->user_page = all_buf;
  1997. data->data_pages[0] = all_buf + PAGE_SIZE;
  1998. data->data_order = ilog2(nr_pages);
  1999. data->nr_pages = 1;
  2000. return data;
  2001. fail_all_buf:
  2002. kfree(data);
  2003. fail:
  2004. return NULL;
  2005. }
  2006. #endif
  2007. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  2008. {
  2009. struct perf_event *event = vma->vm_file->private_data;
  2010. struct perf_mmap_data *data;
  2011. int ret = VM_FAULT_SIGBUS;
  2012. if (vmf->flags & FAULT_FLAG_MKWRITE) {
  2013. if (vmf->pgoff == 0)
  2014. ret = 0;
  2015. return ret;
  2016. }
  2017. rcu_read_lock();
  2018. data = rcu_dereference(event->data);
  2019. if (!data)
  2020. goto unlock;
  2021. if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
  2022. goto unlock;
  2023. vmf->page = perf_mmap_to_page(data, vmf->pgoff);
  2024. if (!vmf->page)
  2025. goto unlock;
  2026. get_page(vmf->page);
  2027. vmf->page->mapping = vma->vm_file->f_mapping;
  2028. vmf->page->index = vmf->pgoff;
  2029. ret = 0;
  2030. unlock:
  2031. rcu_read_unlock();
  2032. return ret;
  2033. }
  2034. static void
  2035. perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
  2036. {
  2037. long max_size = perf_data_size(data);
  2038. atomic_set(&data->lock, -1);
  2039. if (event->attr.watermark) {
  2040. data->watermark = min_t(long, max_size,
  2041. event->attr.wakeup_watermark);
  2042. }
  2043. if (!data->watermark)
  2044. data->watermark = max_size / 2;
  2045. rcu_assign_pointer(event->data, data);
  2046. }
  2047. static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
  2048. {
  2049. struct perf_mmap_data *data;
  2050. data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
  2051. perf_mmap_data_free(data);
  2052. }
  2053. static void perf_mmap_data_release(struct perf_event *event)
  2054. {
  2055. struct perf_mmap_data *data = event->data;
  2056. WARN_ON(atomic_read(&event->mmap_count));
  2057. rcu_assign_pointer(event->data, NULL);
  2058. call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
  2059. }
  2060. static void perf_mmap_open(struct vm_area_struct *vma)
  2061. {
  2062. struct perf_event *event = vma->vm_file->private_data;
  2063. atomic_inc(&event->mmap_count);
  2064. }
  2065. static void perf_mmap_close(struct vm_area_struct *vma)
  2066. {
  2067. struct perf_event *event = vma->vm_file->private_data;
  2068. WARN_ON_ONCE(event->ctx->parent_ctx);
  2069. if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
  2070. unsigned long size = perf_data_size(event->data);
  2071. struct user_struct *user = current_user();
  2072. atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
  2073. vma->vm_mm->locked_vm -= event->data->nr_locked;
  2074. perf_mmap_data_release(event);
  2075. mutex_unlock(&event->mmap_mutex);
  2076. }
  2077. }
  2078. static const struct vm_operations_struct perf_mmap_vmops = {
  2079. .open = perf_mmap_open,
  2080. .close = perf_mmap_close,
  2081. .fault = perf_mmap_fault,
  2082. .page_mkwrite = perf_mmap_fault,
  2083. };
  2084. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  2085. {
  2086. struct perf_event *event = file->private_data;
  2087. unsigned long user_locked, user_lock_limit;
  2088. struct user_struct *user = current_user();
  2089. unsigned long locked, lock_limit;
  2090. struct perf_mmap_data *data;
  2091. unsigned long vma_size;
  2092. unsigned long nr_pages;
  2093. long user_extra, extra;
  2094. int ret = 0;
  2095. if (!(vma->vm_flags & VM_SHARED))
  2096. return -EINVAL;
  2097. vma_size = vma->vm_end - vma->vm_start;
  2098. nr_pages = (vma_size / PAGE_SIZE) - 1;
  2099. /*
  2100. * If we have data pages ensure they're a power-of-two number, so we
  2101. * can do bitmasks instead of modulo.
  2102. */
  2103. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  2104. return -EINVAL;
  2105. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  2106. return -EINVAL;
  2107. if (vma->vm_pgoff != 0)
  2108. return -EINVAL;
  2109. WARN_ON_ONCE(event->ctx->parent_ctx);
  2110. mutex_lock(&event->mmap_mutex);
  2111. if (event->output) {
  2112. ret = -EINVAL;
  2113. goto unlock;
  2114. }
  2115. if (atomic_inc_not_zero(&event->mmap_count)) {
  2116. if (nr_pages != event->data->nr_pages)
  2117. ret = -EINVAL;
  2118. goto unlock;
  2119. }
  2120. user_extra = nr_pages + 1;
  2121. user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
  2122. /*
  2123. * Increase the limit linearly with more CPUs:
  2124. */
  2125. user_lock_limit *= num_online_cpus();
  2126. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  2127. extra = 0;
  2128. if (user_locked > user_lock_limit)
  2129. extra = user_locked - user_lock_limit;
  2130. lock_limit = rlimit(RLIMIT_MEMLOCK);
  2131. lock_limit >>= PAGE_SHIFT;
  2132. locked = vma->vm_mm->locked_vm + extra;
  2133. if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
  2134. !capable(CAP_IPC_LOCK)) {
  2135. ret = -EPERM;
  2136. goto unlock;
  2137. }
  2138. WARN_ON(event->data);
  2139. data = perf_mmap_data_alloc(event, nr_pages);
  2140. ret = -ENOMEM;
  2141. if (!data)
  2142. goto unlock;
  2143. ret = 0;
  2144. perf_mmap_data_init(event, data);
  2145. atomic_set(&event->mmap_count, 1);
  2146. atomic_long_add(user_extra, &user->locked_vm);
  2147. vma->vm_mm->locked_vm += extra;
  2148. event->data->nr_locked = extra;
  2149. if (vma->vm_flags & VM_WRITE)
  2150. event->data->writable = 1;
  2151. unlock:
  2152. mutex_unlock(&event->mmap_mutex);
  2153. vma->vm_flags |= VM_RESERVED;
  2154. vma->vm_ops = &perf_mmap_vmops;
  2155. return ret;
  2156. }
  2157. static int perf_fasync(int fd, struct file *filp, int on)
  2158. {
  2159. struct inode *inode = filp->f_path.dentry->d_inode;
  2160. struct perf_event *event = filp->private_data;
  2161. int retval;
  2162. mutex_lock(&inode->i_mutex);
  2163. retval = fasync_helper(fd, filp, on, &event->fasync);
  2164. mutex_unlock(&inode->i_mutex);
  2165. if (retval < 0)
  2166. return retval;
  2167. return 0;
  2168. }
  2169. static const struct file_operations perf_fops = {
  2170. .release = perf_release,
  2171. .read = perf_read,
  2172. .poll = perf_poll,
  2173. .unlocked_ioctl = perf_ioctl,
  2174. .compat_ioctl = perf_ioctl,
  2175. .mmap = perf_mmap,
  2176. .fasync = perf_fasync,
  2177. };
  2178. /*
  2179. * Perf event wakeup
  2180. *
  2181. * If there's data, ensure we set the poll() state and publish everything
  2182. * to user-space before waking everybody up.
  2183. */
  2184. void perf_event_wakeup(struct perf_event *event)
  2185. {
  2186. wake_up_all(&event->waitq);
  2187. if (event->pending_kill) {
  2188. kill_fasync(&event->fasync, SIGIO, event->pending_kill);
  2189. event->pending_kill = 0;
  2190. }
  2191. }
  2192. /*
  2193. * Pending wakeups
  2194. *
  2195. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  2196. *
  2197. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  2198. * single linked list and use cmpxchg() to add entries lockless.
  2199. */
  2200. static void perf_pending_event(struct perf_pending_entry *entry)
  2201. {
  2202. struct perf_event *event = container_of(entry,
  2203. struct perf_event, pending);
  2204. if (event->pending_disable) {
  2205. event->pending_disable = 0;
  2206. __perf_event_disable(event);
  2207. }
  2208. if (event->pending_wakeup) {
  2209. event->pending_wakeup = 0;
  2210. perf_event_wakeup(event);
  2211. }
  2212. }
  2213. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  2214. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  2215. PENDING_TAIL,
  2216. };
  2217. static void perf_pending_queue(struct perf_pending_entry *entry,
  2218. void (*func)(struct perf_pending_entry *))
  2219. {
  2220. struct perf_pending_entry **head;
  2221. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  2222. return;
  2223. entry->func = func;
  2224. head = &get_cpu_var(perf_pending_head);
  2225. do {
  2226. entry->next = *head;
  2227. } while (cmpxchg(head, entry->next, entry) != entry->next);
  2228. set_perf_event_pending();
  2229. put_cpu_var(perf_pending_head);
  2230. }
  2231. static int __perf_pending_run(void)
  2232. {
  2233. struct perf_pending_entry *list;
  2234. int nr = 0;
  2235. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  2236. while (list != PENDING_TAIL) {
  2237. void (*func)(struct perf_pending_entry *);
  2238. struct perf_pending_entry *entry = list;
  2239. list = list->next;
  2240. func = entry->func;
  2241. entry->next = NULL;
  2242. /*
  2243. * Ensure we observe the unqueue before we issue the wakeup,
  2244. * so that we won't be waiting forever.
  2245. * -- see perf_not_pending().
  2246. */
  2247. smp_wmb();
  2248. func(entry);
  2249. nr++;
  2250. }
  2251. return nr;
  2252. }
  2253. static inline int perf_not_pending(struct perf_event *event)
  2254. {
  2255. /*
  2256. * If we flush on whatever cpu we run, there is a chance we don't
  2257. * need to wait.
  2258. */
  2259. get_cpu();
  2260. __perf_pending_run();
  2261. put_cpu();
  2262. /*
  2263. * Ensure we see the proper queue state before going to sleep
  2264. * so that we do not miss the wakeup. -- see perf_pending_handle()
  2265. */
  2266. smp_rmb();
  2267. return event->pending.next == NULL;
  2268. }
  2269. static void perf_pending_sync(struct perf_event *event)
  2270. {
  2271. wait_event(event->waitq, perf_not_pending(event));
  2272. }
  2273. void perf_event_do_pending(void)
  2274. {
  2275. __perf_pending_run();
  2276. }
  2277. /*
  2278. * Callchain support -- arch specific
  2279. */
  2280. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  2281. {
  2282. return NULL;
  2283. }
  2284. #ifdef CONFIG_EVENT_TRACING
  2285. __weak
  2286. void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
  2287. {
  2288. }
  2289. #endif
  2290. /*
  2291. * Output
  2292. */
  2293. static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
  2294. unsigned long offset, unsigned long head)
  2295. {
  2296. unsigned long mask;
  2297. if (!data->writable)
  2298. return true;
  2299. mask = perf_data_size(data) - 1;
  2300. offset = (offset - tail) & mask;
  2301. head = (head - tail) & mask;
  2302. if ((int)(head - offset) < 0)
  2303. return false;
  2304. return true;
  2305. }
  2306. static void perf_output_wakeup(struct perf_output_handle *handle)
  2307. {
  2308. atomic_set(&handle->data->poll, POLL_IN);
  2309. if (handle->nmi) {
  2310. handle->event->pending_wakeup = 1;
  2311. perf_pending_queue(&handle->event->pending,
  2312. perf_pending_event);
  2313. } else
  2314. perf_event_wakeup(handle->event);
  2315. }
  2316. /*
  2317. * Curious locking construct.
  2318. *
  2319. * We need to ensure a later event_id doesn't publish a head when a former
  2320. * event_id isn't done writing. However since we need to deal with NMIs we
  2321. * cannot fully serialize things.
  2322. *
  2323. * What we do is serialize between CPUs so we only have to deal with NMI
  2324. * nesting on a single CPU.
  2325. *
  2326. * We only publish the head (and generate a wakeup) when the outer-most
  2327. * event_id completes.
  2328. */
  2329. static void perf_output_lock(struct perf_output_handle *handle)
  2330. {
  2331. struct perf_mmap_data *data = handle->data;
  2332. int cur, cpu = get_cpu();
  2333. handle->locked = 0;
  2334. for (;;) {
  2335. cur = atomic_cmpxchg(&data->lock, -1, cpu);
  2336. if (cur == -1) {
  2337. handle->locked = 1;
  2338. break;
  2339. }
  2340. if (cur == cpu)
  2341. break;
  2342. cpu_relax();
  2343. }
  2344. }
  2345. static void perf_output_unlock(struct perf_output_handle *handle)
  2346. {
  2347. struct perf_mmap_data *data = handle->data;
  2348. unsigned long head;
  2349. int cpu;
  2350. data->done_head = data->head;
  2351. if (!handle->locked)
  2352. goto out;
  2353. again:
  2354. /*
  2355. * The xchg implies a full barrier that ensures all writes are done
  2356. * before we publish the new head, matched by a rmb() in userspace when
  2357. * reading this position.
  2358. */
  2359. while ((head = atomic_long_xchg(&data->done_head, 0)))
  2360. data->user_page->data_head = head;
  2361. /*
  2362. * NMI can happen here, which means we can miss a done_head update.
  2363. */
  2364. cpu = atomic_xchg(&data->lock, -1);
  2365. WARN_ON_ONCE(cpu != smp_processor_id());
  2366. /*
  2367. * Therefore we have to validate we did not indeed do so.
  2368. */
  2369. if (unlikely(atomic_long_read(&data->done_head))) {
  2370. /*
  2371. * Since we had it locked, we can lock it again.
  2372. */
  2373. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  2374. cpu_relax();
  2375. goto again;
  2376. }
  2377. if (atomic_xchg(&data->wakeup, 0))
  2378. perf_output_wakeup(handle);
  2379. out:
  2380. put_cpu();
  2381. }
  2382. void perf_output_copy(struct perf_output_handle *handle,
  2383. const void *buf, unsigned int len)
  2384. {
  2385. unsigned int pages_mask;
  2386. unsigned long offset;
  2387. unsigned int size;
  2388. void **pages;
  2389. offset = handle->offset;
  2390. pages_mask = handle->data->nr_pages - 1;
  2391. pages = handle->data->data_pages;
  2392. do {
  2393. unsigned long page_offset;
  2394. unsigned long page_size;
  2395. int nr;
  2396. nr = (offset >> PAGE_SHIFT) & pages_mask;
  2397. page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
  2398. page_offset = offset & (page_size - 1);
  2399. size = min_t(unsigned int, page_size - page_offset, len);
  2400. memcpy(pages[nr] + page_offset, buf, size);
  2401. len -= size;
  2402. buf += size;
  2403. offset += size;
  2404. } while (len);
  2405. handle->offset = offset;
  2406. /*
  2407. * Check we didn't copy past our reservation window, taking the
  2408. * possible unsigned int wrap into account.
  2409. */
  2410. WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
  2411. }
  2412. int perf_output_begin(struct perf_output_handle *handle,
  2413. struct perf_event *event, unsigned int size,
  2414. int nmi, int sample)
  2415. {
  2416. struct perf_event *output_event;
  2417. struct perf_mmap_data *data;
  2418. unsigned long tail, offset, head;
  2419. int have_lost;
  2420. struct {
  2421. struct perf_event_header header;
  2422. u64 id;
  2423. u64 lost;
  2424. } lost_event;
  2425. rcu_read_lock();
  2426. /*
  2427. * For inherited events we send all the output towards the parent.
  2428. */
  2429. if (event->parent)
  2430. event = event->parent;
  2431. output_event = rcu_dereference(event->output);
  2432. if (output_event)
  2433. event = output_event;
  2434. data = rcu_dereference(event->data);
  2435. if (!data)
  2436. goto out;
  2437. handle->data = data;
  2438. handle->event = event;
  2439. handle->nmi = nmi;
  2440. handle->sample = sample;
  2441. if (!data->nr_pages)
  2442. goto fail;
  2443. have_lost = atomic_read(&data->lost);
  2444. if (have_lost)
  2445. size += sizeof(lost_event);
  2446. perf_output_lock(handle);
  2447. do {
  2448. /*
  2449. * Userspace could choose to issue a mb() before updating the
  2450. * tail pointer. So that all reads will be completed before the
  2451. * write is issued.
  2452. */
  2453. tail = ACCESS_ONCE(data->user_page->data_tail);
  2454. smp_rmb();
  2455. offset = head = atomic_long_read(&data->head);
  2456. head += size;
  2457. if (unlikely(!perf_output_space(data, tail, offset, head)))
  2458. goto fail;
  2459. } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
  2460. handle->offset = offset;
  2461. handle->head = head;
  2462. if (head - tail > data->watermark)
  2463. atomic_set(&data->wakeup, 1);
  2464. if (have_lost) {
  2465. lost_event.header.type = PERF_RECORD_LOST;
  2466. lost_event.header.misc = 0;
  2467. lost_event.header.size = sizeof(lost_event);
  2468. lost_event.id = event->id;
  2469. lost_event.lost = atomic_xchg(&data->lost, 0);
  2470. perf_output_put(handle, lost_event);
  2471. }
  2472. return 0;
  2473. fail:
  2474. atomic_inc(&data->lost);
  2475. perf_output_unlock(handle);
  2476. out:
  2477. rcu_read_unlock();
  2478. return -ENOSPC;
  2479. }
  2480. void perf_output_end(struct perf_output_handle *handle)
  2481. {
  2482. struct perf_event *event = handle->event;
  2483. struct perf_mmap_data *data = handle->data;
  2484. int wakeup_events = event->attr.wakeup_events;
  2485. if (handle->sample && wakeup_events) {
  2486. int events = atomic_inc_return(&data->events);
  2487. if (events >= wakeup_events) {
  2488. atomic_sub(wakeup_events, &data->events);
  2489. atomic_set(&data->wakeup, 1);
  2490. }
  2491. }
  2492. perf_output_unlock(handle);
  2493. rcu_read_unlock();
  2494. }
  2495. static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
  2496. {
  2497. /*
  2498. * only top level events have the pid namespace they were created in
  2499. */
  2500. if (event->parent)
  2501. event = event->parent;
  2502. return task_tgid_nr_ns(p, event->ns);
  2503. }
  2504. static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
  2505. {
  2506. /*
  2507. * only top level events have the pid namespace they were created in
  2508. */
  2509. if (event->parent)
  2510. event = event->parent;
  2511. return task_pid_nr_ns(p, event->ns);
  2512. }
  2513. static void perf_output_read_one(struct perf_output_handle *handle,
  2514. struct perf_event *event)
  2515. {
  2516. u64 read_format = event->attr.read_format;
  2517. u64 values[4];
  2518. int n = 0;
  2519. values[n++] = atomic64_read(&event->count);
  2520. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  2521. values[n++] = event->total_time_enabled +
  2522. atomic64_read(&event->child_total_time_enabled);
  2523. }
  2524. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  2525. values[n++] = event->total_time_running +
  2526. atomic64_read(&event->child_total_time_running);
  2527. }
  2528. if (read_format & PERF_FORMAT_ID)
  2529. values[n++] = primary_event_id(event);
  2530. perf_output_copy(handle, values, n * sizeof(u64));
  2531. }
  2532. /*
  2533. * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  2534. */
  2535. static void perf_output_read_group(struct perf_output_handle *handle,
  2536. struct perf_event *event)
  2537. {
  2538. struct perf_event *leader = event->group_leader, *sub;
  2539. u64 read_format = event->attr.read_format;
  2540. u64 values[5];
  2541. int n = 0;
  2542. values[n++] = 1 + leader->nr_siblings;
  2543. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  2544. values[n++] = leader->total_time_enabled;
  2545. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  2546. values[n++] = leader->total_time_running;
  2547. if (leader != event)
  2548. leader->pmu->read(leader);
  2549. values[n++] = atomic64_read(&leader->count);
  2550. if (read_format & PERF_FORMAT_ID)
  2551. values[n++] = primary_event_id(leader);
  2552. perf_output_copy(handle, values, n * sizeof(u64));
  2553. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  2554. n = 0;
  2555. if (sub != event)
  2556. sub->pmu->read(sub);
  2557. values[n++] = atomic64_read(&sub->count);
  2558. if (read_format & PERF_FORMAT_ID)
  2559. values[n++] = primary_event_id(sub);
  2560. perf_output_copy(handle, values, n * sizeof(u64));
  2561. }
  2562. }
  2563. static void perf_output_read(struct perf_output_handle *handle,
  2564. struct perf_event *event)
  2565. {
  2566. if (event->attr.read_format & PERF_FORMAT_GROUP)
  2567. perf_output_read_group(handle, event);
  2568. else
  2569. perf_output_read_one(handle, event);
  2570. }
  2571. void perf_output_sample(struct perf_output_handle *handle,
  2572. struct perf_event_header *header,
  2573. struct perf_sample_data *data,
  2574. struct perf_event *event)
  2575. {
  2576. u64 sample_type = data->type;
  2577. perf_output_put(handle, *header);
  2578. if (sample_type & PERF_SAMPLE_IP)
  2579. perf_output_put(handle, data->ip);
  2580. if (sample_type & PERF_SAMPLE_TID)
  2581. perf_output_put(handle, data->tid_entry);
  2582. if (sample_type & PERF_SAMPLE_TIME)
  2583. perf_output_put(handle, data->time);
  2584. if (sample_type & PERF_SAMPLE_ADDR)
  2585. perf_output_put(handle, data->addr);
  2586. if (sample_type & PERF_SAMPLE_ID)
  2587. perf_output_put(handle, data->id);
  2588. if (sample_type & PERF_SAMPLE_STREAM_ID)
  2589. perf_output_put(handle, data->stream_id);
  2590. if (sample_type & PERF_SAMPLE_CPU)
  2591. perf_output_put(handle, data->cpu_entry);
  2592. if (sample_type & PERF_SAMPLE_PERIOD)
  2593. perf_output_put(handle, data->period);
  2594. if (sample_type & PERF_SAMPLE_READ)
  2595. perf_output_read(handle, event);
  2596. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2597. if (data->callchain) {
  2598. int size = 1;
  2599. if (data->callchain)
  2600. size += data->callchain->nr;
  2601. size *= sizeof(u64);
  2602. perf_output_copy(handle, data->callchain, size);
  2603. } else {
  2604. u64 nr = 0;
  2605. perf_output_put(handle, nr);
  2606. }
  2607. }
  2608. if (sample_type & PERF_SAMPLE_RAW) {
  2609. if (data->raw) {
  2610. perf_output_put(handle, data->raw->size);
  2611. perf_output_copy(handle, data->raw->data,
  2612. data->raw->size);
  2613. } else {
  2614. struct {
  2615. u32 size;
  2616. u32 data;
  2617. } raw = {
  2618. .size = sizeof(u32),
  2619. .data = 0,
  2620. };
  2621. perf_output_put(handle, raw);
  2622. }
  2623. }
  2624. }
  2625. void perf_prepare_sample(struct perf_event_header *header,
  2626. struct perf_sample_data *data,
  2627. struct perf_event *event,
  2628. struct pt_regs *regs)
  2629. {
  2630. u64 sample_type = event->attr.sample_type;
  2631. data->type = sample_type;
  2632. header->type = PERF_RECORD_SAMPLE;
  2633. header->size = sizeof(*header);
  2634. header->misc = 0;
  2635. header->misc |= perf_misc_flags(regs);
  2636. if (sample_type & PERF_SAMPLE_IP) {
  2637. data->ip = perf_instruction_pointer(regs);
  2638. header->size += sizeof(data->ip);
  2639. }
  2640. if (sample_type & PERF_SAMPLE_TID) {
  2641. /* namespace issues */
  2642. data->tid_entry.pid = perf_event_pid(event, current);
  2643. data->tid_entry.tid = perf_event_tid(event, current);
  2644. header->size += sizeof(data->tid_entry);
  2645. }
  2646. if (sample_type & PERF_SAMPLE_TIME) {
  2647. data->time = perf_clock();
  2648. header->size += sizeof(data->time);
  2649. }
  2650. if (sample_type & PERF_SAMPLE_ADDR)
  2651. header->size += sizeof(data->addr);
  2652. if (sample_type & PERF_SAMPLE_ID) {
  2653. data->id = primary_event_id(event);
  2654. header->size += sizeof(data->id);
  2655. }
  2656. if (sample_type & PERF_SAMPLE_STREAM_ID) {
  2657. data->stream_id = event->id;
  2658. header->size += sizeof(data->stream_id);
  2659. }
  2660. if (sample_type & PERF_SAMPLE_CPU) {
  2661. data->cpu_entry.cpu = raw_smp_processor_id();
  2662. data->cpu_entry.reserved = 0;
  2663. header->size += sizeof(data->cpu_entry);
  2664. }
  2665. if (sample_type & PERF_SAMPLE_PERIOD)
  2666. header->size += sizeof(data->period);
  2667. if (sample_type & PERF_SAMPLE_READ)
  2668. header->size += perf_event_read_size(event);
  2669. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2670. int size = 1;
  2671. data->callchain = perf_callchain(regs);
  2672. if (data->callchain)
  2673. size += data->callchain->nr;
  2674. header->size += size * sizeof(u64);
  2675. }
  2676. if (sample_type & PERF_SAMPLE_RAW) {
  2677. int size = sizeof(u32);
  2678. if (data->raw)
  2679. size += data->raw->size;
  2680. else
  2681. size += sizeof(u32);
  2682. WARN_ON_ONCE(size & (sizeof(u64)-1));
  2683. header->size += size;
  2684. }
  2685. }
  2686. static void perf_event_output(struct perf_event *event, int nmi,
  2687. struct perf_sample_data *data,
  2688. struct pt_regs *regs)
  2689. {
  2690. struct perf_output_handle handle;
  2691. struct perf_event_header header;
  2692. perf_prepare_sample(&header, data, event, regs);
  2693. if (perf_output_begin(&handle, event, header.size, nmi, 1))
  2694. return;
  2695. perf_output_sample(&handle, &header, data, event);
  2696. perf_output_end(&handle);
  2697. }
  2698. /*
  2699. * read event_id
  2700. */
  2701. struct perf_read_event {
  2702. struct perf_event_header header;
  2703. u32 pid;
  2704. u32 tid;
  2705. };
  2706. static void
  2707. perf_event_read_event(struct perf_event *event,
  2708. struct task_struct *task)
  2709. {
  2710. struct perf_output_handle handle;
  2711. struct perf_read_event read_event = {
  2712. .header = {
  2713. .type = PERF_RECORD_READ,
  2714. .misc = 0,
  2715. .size = sizeof(read_event) + perf_event_read_size(event),
  2716. },
  2717. .pid = perf_event_pid(event, task),
  2718. .tid = perf_event_tid(event, task),
  2719. };
  2720. int ret;
  2721. ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
  2722. if (ret)
  2723. return;
  2724. perf_output_put(&handle, read_event);
  2725. perf_output_read(&handle, event);
  2726. perf_output_end(&handle);
  2727. }
  2728. /*
  2729. * task tracking -- fork/exit
  2730. *
  2731. * enabled by: attr.comm | attr.mmap | attr.task
  2732. */
  2733. struct perf_task_event {
  2734. struct task_struct *task;
  2735. struct perf_event_context *task_ctx;
  2736. struct {
  2737. struct perf_event_header header;
  2738. u32 pid;
  2739. u32 ppid;
  2740. u32 tid;
  2741. u32 ptid;
  2742. u64 time;
  2743. } event_id;
  2744. };
  2745. static void perf_event_task_output(struct perf_event *event,
  2746. struct perf_task_event *task_event)
  2747. {
  2748. struct perf_output_handle handle;
  2749. int size;
  2750. struct task_struct *task = task_event->task;
  2751. int ret;
  2752. size = task_event->event_id.header.size;
  2753. ret = perf_output_begin(&handle, event, size, 0, 0);
  2754. if (ret)
  2755. return;
  2756. task_event->event_id.pid = perf_event_pid(event, task);
  2757. task_event->event_id.ppid = perf_event_pid(event, current);
  2758. task_event->event_id.tid = perf_event_tid(event, task);
  2759. task_event->event_id.ptid = perf_event_tid(event, current);
  2760. perf_output_put(&handle, task_event->event_id);
  2761. perf_output_end(&handle);
  2762. }
  2763. static int perf_event_task_match(struct perf_event *event)
  2764. {
  2765. if (event->state < PERF_EVENT_STATE_INACTIVE)
  2766. return 0;
  2767. if (event->cpu != -1 && event->cpu != smp_processor_id())
  2768. return 0;
  2769. if (event->attr.comm || event->attr.mmap || event->attr.task)
  2770. return 1;
  2771. return 0;
  2772. }
  2773. static void perf_event_task_ctx(struct perf_event_context *ctx,
  2774. struct perf_task_event *task_event)
  2775. {
  2776. struct perf_event *event;
  2777. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  2778. if (perf_event_task_match(event))
  2779. perf_event_task_output(event, task_event);
  2780. }
  2781. }
  2782. static void perf_event_task_event(struct perf_task_event *task_event)
  2783. {
  2784. struct perf_cpu_context *cpuctx;
  2785. struct perf_event_context *ctx = task_event->task_ctx;
  2786. rcu_read_lock();
  2787. cpuctx = &get_cpu_var(perf_cpu_context);
  2788. perf_event_task_ctx(&cpuctx->ctx, task_event);
  2789. if (!ctx)
  2790. ctx = rcu_dereference(current->perf_event_ctxp);
  2791. if (ctx)
  2792. perf_event_task_ctx(ctx, task_event);
  2793. put_cpu_var(perf_cpu_context);
  2794. rcu_read_unlock();
  2795. }
  2796. static void perf_event_task(struct task_struct *task,
  2797. struct perf_event_context *task_ctx,
  2798. int new)
  2799. {
  2800. struct perf_task_event task_event;
  2801. if (!atomic_read(&nr_comm_events) &&
  2802. !atomic_read(&nr_mmap_events) &&
  2803. !atomic_read(&nr_task_events))
  2804. return;
  2805. task_event = (struct perf_task_event){
  2806. .task = task,
  2807. .task_ctx = task_ctx,
  2808. .event_id = {
  2809. .header = {
  2810. .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
  2811. .misc = 0,
  2812. .size = sizeof(task_event.event_id),
  2813. },
  2814. /* .pid */
  2815. /* .ppid */
  2816. /* .tid */
  2817. /* .ptid */
  2818. .time = perf_clock(),
  2819. },
  2820. };
  2821. perf_event_task_event(&task_event);
  2822. }
  2823. void perf_event_fork(struct task_struct *task)
  2824. {
  2825. perf_event_task(task, NULL, 1);
  2826. }
  2827. /*
  2828. * comm tracking
  2829. */
  2830. struct perf_comm_event {
  2831. struct task_struct *task;
  2832. char *comm;
  2833. int comm_size;
  2834. struct {
  2835. struct perf_event_header header;
  2836. u32 pid;
  2837. u32 tid;
  2838. } event_id;
  2839. };
  2840. static void perf_event_comm_output(struct perf_event *event,
  2841. struct perf_comm_event *comm_event)
  2842. {
  2843. struct perf_output_handle handle;
  2844. int size = comm_event->event_id.header.size;
  2845. int ret = perf_output_begin(&handle, event, size, 0, 0);
  2846. if (ret)
  2847. return;
  2848. comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
  2849. comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
  2850. perf_output_put(&handle, comm_event->event_id);
  2851. perf_output_copy(&handle, comm_event->comm,
  2852. comm_event->comm_size);
  2853. perf_output_end(&handle);
  2854. }
  2855. static int perf_event_comm_match(struct perf_event *event)
  2856. {
  2857. if (event->state < PERF_EVENT_STATE_INACTIVE)
  2858. return 0;
  2859. if (event->cpu != -1 && event->cpu != smp_processor_id())
  2860. return 0;
  2861. if (event->attr.comm)
  2862. return 1;
  2863. return 0;
  2864. }
  2865. static void perf_event_comm_ctx(struct perf_event_context *ctx,
  2866. struct perf_comm_event *comm_event)
  2867. {
  2868. struct perf_event *event;
  2869. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  2870. if (perf_event_comm_match(event))
  2871. perf_event_comm_output(event, comm_event);
  2872. }
  2873. }
  2874. static void perf_event_comm_event(struct perf_comm_event *comm_event)
  2875. {
  2876. struct perf_cpu_context *cpuctx;
  2877. struct perf_event_context *ctx;
  2878. unsigned int size;
  2879. char comm[TASK_COMM_LEN];
  2880. memset(comm, 0, sizeof(comm));
  2881. strlcpy(comm, comm_event->task->comm, sizeof(comm));
  2882. size = ALIGN(strlen(comm)+1, sizeof(u64));
  2883. comm_event->comm = comm;
  2884. comm_event->comm_size = size;
  2885. comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  2886. rcu_read_lock();
  2887. cpuctx = &get_cpu_var(perf_cpu_context);
  2888. perf_event_comm_ctx(&cpuctx->ctx, comm_event);
  2889. ctx = rcu_dereference(current->perf_event_ctxp);
  2890. if (ctx)
  2891. perf_event_comm_ctx(ctx, comm_event);
  2892. put_cpu_var(perf_cpu_context);
  2893. rcu_read_unlock();
  2894. }
  2895. void perf_event_comm(struct task_struct *task)
  2896. {
  2897. struct perf_comm_event comm_event;
  2898. if (task->perf_event_ctxp)
  2899. perf_event_enable_on_exec(task);
  2900. if (!atomic_read(&nr_comm_events))
  2901. return;
  2902. comm_event = (struct perf_comm_event){
  2903. .task = task,
  2904. /* .comm */
  2905. /* .comm_size */
  2906. .event_id = {
  2907. .header = {
  2908. .type = PERF_RECORD_COMM,
  2909. .misc = 0,
  2910. /* .size */
  2911. },
  2912. /* .pid */
  2913. /* .tid */
  2914. },
  2915. };
  2916. perf_event_comm_event(&comm_event);
  2917. }
  2918. /*
  2919. * mmap tracking
  2920. */
  2921. struct perf_mmap_event {
  2922. struct vm_area_struct *vma;
  2923. const char *file_name;
  2924. int file_size;
  2925. struct {
  2926. struct perf_event_header header;
  2927. u32 pid;
  2928. u32 tid;
  2929. u64 start;
  2930. u64 len;
  2931. u64 pgoff;
  2932. } event_id;
  2933. };
  2934. static void perf_event_mmap_output(struct perf_event *event,
  2935. struct perf_mmap_event *mmap_event)
  2936. {
  2937. struct perf_output_handle handle;
  2938. int size = mmap_event->event_id.header.size;
  2939. int ret = perf_output_begin(&handle, event, size, 0, 0);
  2940. if (ret)
  2941. return;
  2942. mmap_event->event_id.pid = perf_event_pid(event, current);
  2943. mmap_event->event_id.tid = perf_event_tid(event, current);
  2944. perf_output_put(&handle, mmap_event->event_id);
  2945. perf_output_copy(&handle, mmap_event->file_name,
  2946. mmap_event->file_size);
  2947. perf_output_end(&handle);
  2948. }
  2949. static int perf_event_mmap_match(struct perf_event *event,
  2950. struct perf_mmap_event *mmap_event)
  2951. {
  2952. if (event->state < PERF_EVENT_STATE_INACTIVE)
  2953. return 0;
  2954. if (event->cpu != -1 && event->cpu != smp_processor_id())
  2955. return 0;
  2956. if (event->attr.mmap)
  2957. return 1;
  2958. return 0;
  2959. }
  2960. static void perf_event_mmap_ctx(struct perf_event_context *ctx,
  2961. struct perf_mmap_event *mmap_event)
  2962. {
  2963. struct perf_event *event;
  2964. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  2965. if (perf_event_mmap_match(event, mmap_event))
  2966. perf_event_mmap_output(event, mmap_event);
  2967. }
  2968. }
  2969. static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
  2970. {
  2971. struct perf_cpu_context *cpuctx;
  2972. struct perf_event_context *ctx;
  2973. struct vm_area_struct *vma = mmap_event->vma;
  2974. struct file *file = vma->vm_file;
  2975. unsigned int size;
  2976. char tmp[16];
  2977. char *buf = NULL;
  2978. const char *name;
  2979. memset(tmp, 0, sizeof(tmp));
  2980. if (file) {
  2981. /*
  2982. * d_path works from the end of the buffer backwards, so we
  2983. * need to add enough zero bytes after the string to handle
  2984. * the 64bit alignment we do later.
  2985. */
  2986. buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
  2987. if (!buf) {
  2988. name = strncpy(tmp, "//enomem", sizeof(tmp));
  2989. goto got_name;
  2990. }
  2991. name = d_path(&file->f_path, buf, PATH_MAX);
  2992. if (IS_ERR(name)) {
  2993. name = strncpy(tmp, "//toolong", sizeof(tmp));
  2994. goto got_name;
  2995. }
  2996. } else {
  2997. if (arch_vma_name(mmap_event->vma)) {
  2998. name = strncpy(tmp, arch_vma_name(mmap_event->vma),
  2999. sizeof(tmp));
  3000. goto got_name;
  3001. }
  3002. if (!vma->vm_mm) {
  3003. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  3004. goto got_name;
  3005. }
  3006. name = strncpy(tmp, "//anon", sizeof(tmp));
  3007. goto got_name;
  3008. }
  3009. got_name:
  3010. size = ALIGN(strlen(name)+1, sizeof(u64));
  3011. mmap_event->file_name = name;
  3012. mmap_event->file_size = size;
  3013. mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  3014. rcu_read_lock();
  3015. cpuctx = &get_cpu_var(perf_cpu_context);
  3016. perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
  3017. ctx = rcu_dereference(current->perf_event_ctxp);
  3018. if (ctx)
  3019. perf_event_mmap_ctx(ctx, mmap_event);
  3020. put_cpu_var(perf_cpu_context);
  3021. rcu_read_unlock();
  3022. kfree(buf);
  3023. }
  3024. void __perf_event_mmap(struct vm_area_struct *vma)
  3025. {
  3026. struct perf_mmap_event mmap_event;
  3027. if (!atomic_read(&nr_mmap_events))
  3028. return;
  3029. mmap_event = (struct perf_mmap_event){
  3030. .vma = vma,
  3031. /* .file_name */
  3032. /* .file_size */
  3033. .event_id = {
  3034. .header = {
  3035. .type = PERF_RECORD_MMAP,
  3036. .misc = 0,
  3037. /* .size */
  3038. },
  3039. /* .pid */
  3040. /* .tid */
  3041. .start = vma->vm_start,
  3042. .len = vma->vm_end - vma->vm_start,
  3043. .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
  3044. },
  3045. };
  3046. perf_event_mmap_event(&mmap_event);
  3047. }
  3048. /*
  3049. * IRQ throttle logging
  3050. */
  3051. static void perf_log_throttle(struct perf_event *event, int enable)
  3052. {
  3053. struct perf_output_handle handle;
  3054. int ret;
  3055. struct {
  3056. struct perf_event_header header;
  3057. u64 time;
  3058. u64 id;
  3059. u64 stream_id;
  3060. } throttle_event = {
  3061. .header = {
  3062. .type = PERF_RECORD_THROTTLE,
  3063. .misc = 0,
  3064. .size = sizeof(throttle_event),
  3065. },
  3066. .time = perf_clock(),
  3067. .id = primary_event_id(event),
  3068. .stream_id = event->id,
  3069. };
  3070. if (enable)
  3071. throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
  3072. ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
  3073. if (ret)
  3074. return;
  3075. perf_output_put(&handle, throttle_event);
  3076. perf_output_end(&handle);
  3077. }
  3078. /*
  3079. * Generic event overflow handling, sampling.
  3080. */
  3081. static int __perf_event_overflow(struct perf_event *event, int nmi,
  3082. int throttle, struct perf_sample_data *data,
  3083. struct pt_regs *regs)
  3084. {
  3085. int events = atomic_read(&event->event_limit);
  3086. struct hw_perf_event *hwc = &event->hw;
  3087. int ret = 0;
  3088. throttle = (throttle && event->pmu->unthrottle != NULL);
  3089. if (!throttle) {
  3090. hwc->interrupts++;
  3091. } else {
  3092. if (hwc->interrupts != MAX_INTERRUPTS) {
  3093. hwc->interrupts++;
  3094. if (HZ * hwc->interrupts >
  3095. (u64)sysctl_perf_event_sample_rate) {
  3096. hwc->interrupts = MAX_INTERRUPTS;
  3097. perf_log_throttle(event, 0);
  3098. ret = 1;
  3099. }
  3100. } else {
  3101. /*
  3102. * Keep re-disabling events even though on the previous
  3103. * pass we disabled it - just in case we raced with a
  3104. * sched-in and the event got enabled again:
  3105. */
  3106. ret = 1;
  3107. }
  3108. }
  3109. if (event->attr.freq) {
  3110. u64 now = perf_clock();
  3111. s64 delta = now - hwc->freq_time_stamp;
  3112. hwc->freq_time_stamp = now;
  3113. if (delta > 0 && delta < 2*TICK_NSEC)
  3114. perf_adjust_period(event, delta, hwc->last_period);
  3115. }
  3116. /*
  3117. * XXX event_limit might not quite work as expected on inherited
  3118. * events
  3119. */
  3120. event->pending_kill = POLL_IN;
  3121. if (events && atomic_dec_and_test(&event->event_limit)) {
  3122. ret = 1;
  3123. event->pending_kill = POLL_HUP;
  3124. if (nmi) {
  3125. event->pending_disable = 1;
  3126. perf_pending_queue(&event->pending,
  3127. perf_pending_event);
  3128. } else
  3129. perf_event_disable(event);
  3130. }
  3131. if (event->overflow_handler)
  3132. event->overflow_handler(event, nmi, data, regs);
  3133. else
  3134. perf_event_output(event, nmi, data, regs);
  3135. return ret;
  3136. }
  3137. int perf_event_overflow(struct perf_event *event, int nmi,
  3138. struct perf_sample_data *data,
  3139. struct pt_regs *regs)
  3140. {
  3141. return __perf_event_overflow(event, nmi, 1, data, regs);
  3142. }
  3143. /*
  3144. * Generic software event infrastructure
  3145. */
  3146. /*
  3147. * We directly increment event->count and keep a second value in
  3148. * event->hw.period_left to count intervals. This period event
  3149. * is kept in the range [-sample_period, 0] so that we can use the
  3150. * sign as trigger.
  3151. */
  3152. static u64 perf_swevent_set_period(struct perf_event *event)
  3153. {
  3154. struct hw_perf_event *hwc = &event->hw;
  3155. u64 period = hwc->last_period;
  3156. u64 nr, offset;
  3157. s64 old, val;
  3158. hwc->last_period = hwc->sample_period;
  3159. again:
  3160. old = val = atomic64_read(&hwc->period_left);
  3161. if (val < 0)
  3162. return 0;
  3163. nr = div64_u64(period + val, period);
  3164. offset = nr * period;
  3165. val -= offset;
  3166. if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
  3167. goto again;
  3168. return nr;
  3169. }
  3170. static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
  3171. int nmi, struct perf_sample_data *data,
  3172. struct pt_regs *regs)
  3173. {
  3174. struct hw_perf_event *hwc = &event->hw;
  3175. int throttle = 0;
  3176. data->period = event->hw.last_period;
  3177. if (!overflow)
  3178. overflow = perf_swevent_set_period(event);
  3179. if (hwc->interrupts == MAX_INTERRUPTS)
  3180. return;
  3181. for (; overflow; overflow--) {
  3182. if (__perf_event_overflow(event, nmi, throttle,
  3183. data, regs)) {
  3184. /*
  3185. * We inhibit the overflow from happening when
  3186. * hwc->interrupts == MAX_INTERRUPTS.
  3187. */
  3188. break;
  3189. }
  3190. throttle = 1;
  3191. }
  3192. }
  3193. static void perf_swevent_unthrottle(struct perf_event *event)
  3194. {
  3195. /*
  3196. * Nothing to do, we already reset hwc->interrupts.
  3197. */
  3198. }
  3199. static void perf_swevent_add(struct perf_event *event, u64 nr,
  3200. int nmi, struct perf_sample_data *data,
  3201. struct pt_regs *regs)
  3202. {
  3203. struct hw_perf_event *hwc = &event->hw;
  3204. atomic64_add(nr, &event->count);
  3205. if (!regs)
  3206. return;
  3207. if (!hwc->sample_period)
  3208. return;
  3209. if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
  3210. return perf_swevent_overflow(event, 1, nmi, data, regs);
  3211. if (atomic64_add_negative(nr, &hwc->period_left))
  3212. return;
  3213. perf_swevent_overflow(event, 0, nmi, data, regs);
  3214. }
  3215. static int perf_swevent_is_counting(struct perf_event *event)
  3216. {
  3217. /*
  3218. * The event is active, we're good!
  3219. */
  3220. if (event->state == PERF_EVENT_STATE_ACTIVE)
  3221. return 1;
  3222. /*
  3223. * The event is off/error, not counting.
  3224. */
  3225. if (event->state != PERF_EVENT_STATE_INACTIVE)
  3226. return 0;
  3227. /*
  3228. * The event is inactive, if the context is active
  3229. * we're part of a group that didn't make it on the 'pmu',
  3230. * not counting.
  3231. */
  3232. if (event->ctx->is_active)
  3233. return 0;
  3234. /*
  3235. * We're inactive and the context is too, this means the
  3236. * task is scheduled out, we're counting events that happen
  3237. * to us, like migration events.
  3238. */
  3239. return 1;
  3240. }
  3241. static int perf_tp_event_match(struct perf_event *event,
  3242. struct perf_sample_data *data);
  3243. static int perf_exclude_event(struct perf_event *event,
  3244. struct pt_regs *regs)
  3245. {
  3246. if (regs) {
  3247. if (event->attr.exclude_user && user_mode(regs))
  3248. return 1;
  3249. if (event->attr.exclude_kernel && !user_mode(regs))
  3250. return 1;
  3251. }
  3252. return 0;
  3253. }
  3254. static int perf_swevent_match(struct perf_event *event,
  3255. enum perf_type_id type,
  3256. u32 event_id,
  3257. struct perf_sample_data *data,
  3258. struct pt_regs *regs)
  3259. {
  3260. if (event->cpu != -1 && event->cpu != smp_processor_id())
  3261. return 0;
  3262. if (!perf_swevent_is_counting(event))
  3263. return 0;
  3264. if (event->attr.type != type)
  3265. return 0;
  3266. if (event->attr.config != event_id)
  3267. return 0;
  3268. if (perf_exclude_event(event, regs))
  3269. return 0;
  3270. if (event->attr.type == PERF_TYPE_TRACEPOINT &&
  3271. !perf_tp_event_match(event, data))
  3272. return 0;
  3273. return 1;
  3274. }
  3275. static void perf_swevent_ctx_event(struct perf_event_context *ctx,
  3276. enum perf_type_id type,
  3277. u32 event_id, u64 nr, int nmi,
  3278. struct perf_sample_data *data,
  3279. struct pt_regs *regs)
  3280. {
  3281. struct perf_event *event;
  3282. list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  3283. if (perf_swevent_match(event, type, event_id, data, regs))
  3284. perf_swevent_add(event, nr, nmi, data, regs);
  3285. }
  3286. }
  3287. int perf_swevent_get_recursion_context(void)
  3288. {
  3289. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  3290. int rctx;
  3291. if (in_nmi())
  3292. rctx = 3;
  3293. else if (in_irq())
  3294. rctx = 2;
  3295. else if (in_softirq())
  3296. rctx = 1;
  3297. else
  3298. rctx = 0;
  3299. if (cpuctx->recursion[rctx]) {
  3300. put_cpu_var(perf_cpu_context);
  3301. return -1;
  3302. }
  3303. cpuctx->recursion[rctx]++;
  3304. barrier();
  3305. return rctx;
  3306. }
  3307. EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  3308. void perf_swevent_put_recursion_context(int rctx)
  3309. {
  3310. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  3311. barrier();
  3312. cpuctx->recursion[rctx]--;
  3313. put_cpu_var(perf_cpu_context);
  3314. }
  3315. EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
  3316. static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
  3317. u64 nr, int nmi,
  3318. struct perf_sample_data *data,
  3319. struct pt_regs *regs)
  3320. {
  3321. struct perf_cpu_context *cpuctx;
  3322. struct perf_event_context *ctx;
  3323. cpuctx = &__get_cpu_var(perf_cpu_context);
  3324. rcu_read_lock();
  3325. perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
  3326. nr, nmi, data, regs);
  3327. /*
  3328. * doesn't really matter which of the child contexts the
  3329. * events ends up in.
  3330. */
  3331. ctx = rcu_dereference(current->perf_event_ctxp);
  3332. if (ctx)
  3333. perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
  3334. rcu_read_unlock();
  3335. }
  3336. void __perf_sw_event(u32 event_id, u64 nr, int nmi,
  3337. struct pt_regs *regs, u64 addr)
  3338. {
  3339. struct perf_sample_data data;
  3340. int rctx;
  3341. rctx = perf_swevent_get_recursion_context();
  3342. if (rctx < 0)
  3343. return;
  3344. perf_sample_data_init(&data, addr);
  3345. do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
  3346. perf_swevent_put_recursion_context(rctx);
  3347. }
  3348. static void perf_swevent_read(struct perf_event *event)
  3349. {
  3350. }
  3351. static int perf_swevent_enable(struct perf_event *event)
  3352. {
  3353. struct hw_perf_event *hwc = &event->hw;
  3354. if (hwc->sample_period) {
  3355. hwc->last_period = hwc->sample_period;
  3356. perf_swevent_set_period(event);
  3357. }
  3358. return 0;
  3359. }
  3360. static void perf_swevent_disable(struct perf_event *event)
  3361. {
  3362. }
  3363. static const struct pmu perf_ops_generic = {
  3364. .enable = perf_swevent_enable,
  3365. .disable = perf_swevent_disable,
  3366. .read = perf_swevent_read,
  3367. .unthrottle = perf_swevent_unthrottle,
  3368. };
  3369. /*
  3370. * hrtimer based swevent callback
  3371. */
  3372. static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  3373. {
  3374. enum hrtimer_restart ret = HRTIMER_RESTART;
  3375. struct perf_sample_data data;
  3376. struct pt_regs *regs;
  3377. struct perf_event *event;
  3378. u64 period;
  3379. event = container_of(hrtimer, struct perf_event, hw.hrtimer);
  3380. event->pmu->read(event);
  3381. perf_sample_data_init(&data, 0);
  3382. data.period = event->hw.last_period;
  3383. regs = get_irq_regs();
  3384. /*
  3385. * In case we exclude kernel IPs or are somehow not in interrupt
  3386. * context, provide the next best thing, the user IP.
  3387. */
  3388. if ((event->attr.exclude_kernel || !regs) &&
  3389. !event->attr.exclude_user)
  3390. regs = task_pt_regs(current);
  3391. if (regs) {
  3392. if (!(event->attr.exclude_idle && current->pid == 0))
  3393. if (perf_event_overflow(event, 0, &data, regs))
  3394. ret = HRTIMER_NORESTART;
  3395. }
  3396. period = max_t(u64, 10000, event->hw.sample_period);
  3397. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  3398. return ret;
  3399. }
  3400. static void perf_swevent_start_hrtimer(struct perf_event *event)
  3401. {
  3402. struct hw_perf_event *hwc = &event->hw;
  3403. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3404. hwc->hrtimer.function = perf_swevent_hrtimer;
  3405. if (hwc->sample_period) {
  3406. u64 period;
  3407. if (hwc->remaining) {
  3408. if (hwc->remaining < 0)
  3409. period = 10000;
  3410. else
  3411. period = hwc->remaining;
  3412. hwc->remaining = 0;
  3413. } else {
  3414. period = max_t(u64, 10000, hwc->sample_period);
  3415. }
  3416. __hrtimer_start_range_ns(&hwc->hrtimer,
  3417. ns_to_ktime(period), 0,
  3418. HRTIMER_MODE_REL, 0);
  3419. }
  3420. }
  3421. static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  3422. {
  3423. struct hw_perf_event *hwc = &event->hw;
  3424. if (hwc->sample_period) {
  3425. ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
  3426. hwc->remaining = ktime_to_ns(remaining);
  3427. hrtimer_cancel(&hwc->hrtimer);
  3428. }
  3429. }
  3430. /*
  3431. * Software event: cpu wall time clock
  3432. */
  3433. static void cpu_clock_perf_event_update(struct perf_event *event)
  3434. {
  3435. int cpu = raw_smp_processor_id();
  3436. s64 prev;
  3437. u64 now;
  3438. now = cpu_clock(cpu);
  3439. prev = atomic64_xchg(&event->hw.prev_count, now);
  3440. atomic64_add(now - prev, &event->count);
  3441. }
  3442. static int cpu_clock_perf_event_enable(struct perf_event *event)
  3443. {
  3444. struct hw_perf_event *hwc = &event->hw;
  3445. int cpu = raw_smp_processor_id();
  3446. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  3447. perf_swevent_start_hrtimer(event);
  3448. return 0;
  3449. }
  3450. static void cpu_clock_perf_event_disable(struct perf_event *event)
  3451. {
  3452. perf_swevent_cancel_hrtimer(event);
  3453. cpu_clock_perf_event_update(event);
  3454. }
  3455. static void cpu_clock_perf_event_read(struct perf_event *event)
  3456. {
  3457. cpu_clock_perf_event_update(event);
  3458. }
  3459. static const struct pmu perf_ops_cpu_clock = {
  3460. .enable = cpu_clock_perf_event_enable,
  3461. .disable = cpu_clock_perf_event_disable,
  3462. .read = cpu_clock_perf_event_read,
  3463. };
  3464. /*
  3465. * Software event: task time clock
  3466. */
  3467. static void task_clock_perf_event_update(struct perf_event *event, u64 now)
  3468. {
  3469. u64 prev;
  3470. s64 delta;
  3471. prev = atomic64_xchg(&event->hw.prev_count, now);
  3472. delta = now - prev;
  3473. atomic64_add(delta, &event->count);
  3474. }
  3475. static int task_clock_perf_event_enable(struct perf_event *event)
  3476. {
  3477. struct hw_perf_event *hwc = &event->hw;
  3478. u64 now;
  3479. now = event->ctx->time;
  3480. atomic64_set(&hwc->prev_count, now);
  3481. perf_swevent_start_hrtimer(event);
  3482. return 0;
  3483. }
  3484. static void task_clock_perf_event_disable(struct perf_event *event)
  3485. {
  3486. perf_swevent_cancel_hrtimer(event);
  3487. task_clock_perf_event_update(event, event->ctx->time);
  3488. }
  3489. static void task_clock_perf_event_read(struct perf_event *event)
  3490. {
  3491. u64 time;
  3492. if (!in_nmi()) {
  3493. update_context_time(event->ctx);
  3494. time = event->ctx->time;
  3495. } else {
  3496. u64 now = perf_clock();
  3497. u64 delta = now - event->ctx->timestamp;
  3498. time = event->ctx->time + delta;
  3499. }
  3500. task_clock_perf_event_update(event, time);
  3501. }
  3502. static const struct pmu perf_ops_task_clock = {
  3503. .enable = task_clock_perf_event_enable,
  3504. .disable = task_clock_perf_event_disable,
  3505. .read = task_clock_perf_event_read,
  3506. };
  3507. #ifdef CONFIG_EVENT_TRACING
  3508. void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
  3509. int entry_size, struct pt_regs *regs)
  3510. {
  3511. struct perf_sample_data data;
  3512. struct perf_raw_record raw = {
  3513. .size = entry_size,
  3514. .data = record,
  3515. };
  3516. perf_sample_data_init(&data, addr);
  3517. data.raw = &raw;
  3518. /* Trace events already protected against recursion */
  3519. do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
  3520. &data, regs);
  3521. }
  3522. EXPORT_SYMBOL_GPL(perf_tp_event);
  3523. static int perf_tp_event_match(struct perf_event *event,
  3524. struct perf_sample_data *data)
  3525. {
  3526. void *record = data->raw->data;
  3527. if (likely(!event->filter) || filter_match_preds(event->filter, record))
  3528. return 1;
  3529. return 0;
  3530. }
  3531. static void tp_perf_event_destroy(struct perf_event *event)
  3532. {
  3533. perf_trace_disable(event->attr.config);
  3534. }
  3535. static const struct pmu *tp_perf_event_init(struct perf_event *event)
  3536. {
  3537. /*
  3538. * Raw tracepoint data is a severe data leak, only allow root to
  3539. * have these.
  3540. */
  3541. if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
  3542. perf_paranoid_tracepoint_raw() &&
  3543. !capable(CAP_SYS_ADMIN))
  3544. return ERR_PTR(-EPERM);
  3545. if (perf_trace_enable(event->attr.config))
  3546. return NULL;
  3547. event->destroy = tp_perf_event_destroy;
  3548. return &perf_ops_generic;
  3549. }
  3550. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  3551. {
  3552. char *filter_str;
  3553. int ret;
  3554. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  3555. return -EINVAL;
  3556. filter_str = strndup_user(arg, PAGE_SIZE);
  3557. if (IS_ERR(filter_str))
  3558. return PTR_ERR(filter_str);
  3559. ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
  3560. kfree(filter_str);
  3561. return ret;
  3562. }
  3563. static void perf_event_free_filter(struct perf_event *event)
  3564. {
  3565. ftrace_profile_free_filter(event);
  3566. }
  3567. #else
  3568. static int perf_tp_event_match(struct perf_event *event,
  3569. struct perf_sample_data *data)
  3570. {
  3571. return 1;
  3572. }
  3573. static const struct pmu *tp_perf_event_init(struct perf_event *event)
  3574. {
  3575. return NULL;
  3576. }
  3577. static int perf_event_set_filter(struct perf_event *event, void __user *arg)
  3578. {
  3579. return -ENOENT;
  3580. }
  3581. static void perf_event_free_filter(struct perf_event *event)
  3582. {
  3583. }
  3584. #endif /* CONFIG_EVENT_TRACING */
  3585. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  3586. static void bp_perf_event_destroy(struct perf_event *event)
  3587. {
  3588. release_bp_slot(event);
  3589. }
  3590. static const struct pmu *bp_perf_event_init(struct perf_event *bp)
  3591. {
  3592. int err;
  3593. err = register_perf_hw_breakpoint(bp);
  3594. if (err)
  3595. return ERR_PTR(err);
  3596. bp->destroy = bp_perf_event_destroy;
  3597. return &perf_ops_bp;
  3598. }
  3599. void perf_bp_event(struct perf_event *bp, void *data)
  3600. {
  3601. struct perf_sample_data sample;
  3602. struct pt_regs *regs = data;
  3603. perf_sample_data_init(&sample, bp->attr.bp_addr);
  3604. if (!perf_exclude_event(bp, regs))
  3605. perf_swevent_add(bp, 1, 1, &sample, regs);
  3606. }
  3607. #else
  3608. static const struct pmu *bp_perf_event_init(struct perf_event *bp)
  3609. {
  3610. return NULL;
  3611. }
  3612. void perf_bp_event(struct perf_event *bp, void *regs)
  3613. {
  3614. }
  3615. #endif
  3616. atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
  3617. static void sw_perf_event_destroy(struct perf_event *event)
  3618. {
  3619. u64 event_id = event->attr.config;
  3620. WARN_ON(event->parent);
  3621. atomic_dec(&perf_swevent_enabled[event_id]);
  3622. }
  3623. static const struct pmu *sw_perf_event_init(struct perf_event *event)
  3624. {
  3625. const struct pmu *pmu = NULL;
  3626. u64 event_id = event->attr.config;
  3627. /*
  3628. * Software events (currently) can't in general distinguish
  3629. * between user, kernel and hypervisor events.
  3630. * However, context switches and cpu migrations are considered
  3631. * to be kernel events, and page faults are never hypervisor
  3632. * events.
  3633. */
  3634. switch (event_id) {
  3635. case PERF_COUNT_SW_CPU_CLOCK:
  3636. pmu = &perf_ops_cpu_clock;
  3637. break;
  3638. case PERF_COUNT_SW_TASK_CLOCK:
  3639. /*
  3640. * If the user instantiates this as a per-cpu event,
  3641. * use the cpu_clock event instead.
  3642. */
  3643. if (event->ctx->task)
  3644. pmu = &perf_ops_task_clock;
  3645. else
  3646. pmu = &perf_ops_cpu_clock;
  3647. break;
  3648. case PERF_COUNT_SW_PAGE_FAULTS:
  3649. case PERF_COUNT_SW_PAGE_FAULTS_MIN:
  3650. case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
  3651. case PERF_COUNT_SW_CONTEXT_SWITCHES:
  3652. case PERF_COUNT_SW_CPU_MIGRATIONS:
  3653. case PERF_COUNT_SW_ALIGNMENT_FAULTS:
  3654. case PERF_COUNT_SW_EMULATION_FAULTS:
  3655. if (!event->parent) {
  3656. atomic_inc(&perf_swevent_enabled[event_id]);
  3657. event->destroy = sw_perf_event_destroy;
  3658. }
  3659. pmu = &perf_ops_generic;
  3660. break;
  3661. }
  3662. return pmu;
  3663. }
  3664. /*
  3665. * Allocate and initialize a event structure
  3666. */
  3667. static struct perf_event *
  3668. perf_event_alloc(struct perf_event_attr *attr,
  3669. int cpu,
  3670. struct perf_event_context *ctx,
  3671. struct perf_event *group_leader,
  3672. struct perf_event *parent_event,
  3673. perf_overflow_handler_t overflow_handler,
  3674. gfp_t gfpflags)
  3675. {
  3676. const struct pmu *pmu;
  3677. struct perf_event *event;
  3678. struct hw_perf_event *hwc;
  3679. long err;
  3680. event = kzalloc(sizeof(*event), gfpflags);
  3681. if (!event)
  3682. return ERR_PTR(-ENOMEM);
  3683. /*
  3684. * Single events are their own group leaders, with an
  3685. * empty sibling list:
  3686. */
  3687. if (!group_leader)
  3688. group_leader = event;
  3689. mutex_init(&event->child_mutex);
  3690. INIT_LIST_HEAD(&event->child_list);
  3691. INIT_LIST_HEAD(&event->group_entry);
  3692. INIT_LIST_HEAD(&event->event_entry);
  3693. INIT_LIST_HEAD(&event->sibling_list);
  3694. init_waitqueue_head(&event->waitq);
  3695. mutex_init(&event->mmap_mutex);
  3696. event->cpu = cpu;
  3697. event->attr = *attr;
  3698. event->group_leader = group_leader;
  3699. event->pmu = NULL;
  3700. event->ctx = ctx;
  3701. event->oncpu = -1;
  3702. event->parent = parent_event;
  3703. event->ns = get_pid_ns(current->nsproxy->pid_ns);
  3704. event->id = atomic64_inc_return(&perf_event_id);
  3705. event->state = PERF_EVENT_STATE_INACTIVE;
  3706. if (!overflow_handler && parent_event)
  3707. overflow_handler = parent_event->overflow_handler;
  3708. event->overflow_handler = overflow_handler;
  3709. if (attr->disabled)
  3710. event->state = PERF_EVENT_STATE_OFF;
  3711. pmu = NULL;
  3712. hwc = &event->hw;
  3713. hwc->sample_period = attr->sample_period;
  3714. if (attr->freq && attr->sample_freq)
  3715. hwc->sample_period = 1;
  3716. hwc->last_period = hwc->sample_period;
  3717. atomic64_set(&hwc->period_left, hwc->sample_period);
  3718. /*
  3719. * we currently do not support PERF_FORMAT_GROUP on inherited events
  3720. */
  3721. if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
  3722. goto done;
  3723. switch (attr->type) {
  3724. case PERF_TYPE_RAW:
  3725. case PERF_TYPE_HARDWARE:
  3726. case PERF_TYPE_HW_CACHE:
  3727. pmu = hw_perf_event_init(event);
  3728. break;
  3729. case PERF_TYPE_SOFTWARE:
  3730. pmu = sw_perf_event_init(event);
  3731. break;
  3732. case PERF_TYPE_TRACEPOINT:
  3733. pmu = tp_perf_event_init(event);
  3734. break;
  3735. case PERF_TYPE_BREAKPOINT:
  3736. pmu = bp_perf_event_init(event);
  3737. break;
  3738. default:
  3739. break;
  3740. }
  3741. done:
  3742. err = 0;
  3743. if (!pmu)
  3744. err = -EINVAL;
  3745. else if (IS_ERR(pmu))
  3746. err = PTR_ERR(pmu);
  3747. if (err) {
  3748. if (event->ns)
  3749. put_pid_ns(event->ns);
  3750. kfree(event);
  3751. return ERR_PTR(err);
  3752. }
  3753. event->pmu = pmu;
  3754. if (!event->parent) {
  3755. atomic_inc(&nr_events);
  3756. if (event->attr.mmap)
  3757. atomic_inc(&nr_mmap_events);
  3758. if (event->attr.comm)
  3759. atomic_inc(&nr_comm_events);
  3760. if (event->attr.task)
  3761. atomic_inc(&nr_task_events);
  3762. }
  3763. return event;
  3764. }
  3765. static int perf_copy_attr(struct perf_event_attr __user *uattr,
  3766. struct perf_event_attr *attr)
  3767. {
  3768. u32 size;
  3769. int ret;
  3770. if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  3771. return -EFAULT;
  3772. /*
  3773. * zero the full structure, so that a short copy will be nice.
  3774. */
  3775. memset(attr, 0, sizeof(*attr));
  3776. ret = get_user(size, &uattr->size);
  3777. if (ret)
  3778. return ret;
  3779. if (size > PAGE_SIZE) /* silly large */
  3780. goto err_size;
  3781. if (!size) /* abi compat */
  3782. size = PERF_ATTR_SIZE_VER0;
  3783. if (size < PERF_ATTR_SIZE_VER0)
  3784. goto err_size;
  3785. /*
  3786. * If we're handed a bigger struct than we know of,
  3787. * ensure all the unknown bits are 0 - i.e. new
  3788. * user-space does not rely on any kernel feature
  3789. * extensions we dont know about yet.
  3790. */
  3791. if (size > sizeof(*attr)) {
  3792. unsigned char __user *addr;
  3793. unsigned char __user *end;
  3794. unsigned char val;
  3795. addr = (void __user *)uattr + sizeof(*attr);
  3796. end = (void __user *)uattr + size;
  3797. for (; addr < end; addr++) {
  3798. ret = get_user(val, addr);
  3799. if (ret)
  3800. return ret;
  3801. if (val)
  3802. goto err_size;
  3803. }
  3804. size = sizeof(*attr);
  3805. }
  3806. ret = copy_from_user(attr, uattr, size);
  3807. if (ret)
  3808. return -EFAULT;
  3809. /*
  3810. * If the type exists, the corresponding creation will verify
  3811. * the attr->config.
  3812. */
  3813. if (attr->type >= PERF_TYPE_MAX)
  3814. return -EINVAL;
  3815. if (attr->__reserved_1)
  3816. return -EINVAL;
  3817. if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  3818. return -EINVAL;
  3819. if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  3820. return -EINVAL;
  3821. out:
  3822. return ret;
  3823. err_size:
  3824. put_user(sizeof(*attr), &uattr->size);
  3825. ret = -E2BIG;
  3826. goto out;
  3827. }
  3828. static int perf_event_set_output(struct perf_event *event, int output_fd)
  3829. {
  3830. struct perf_event *output_event = NULL;
  3831. struct file *output_file = NULL;
  3832. struct perf_event *old_output;
  3833. int fput_needed = 0;
  3834. int ret = -EINVAL;
  3835. if (!output_fd)
  3836. goto set;
  3837. output_file = fget_light(output_fd, &fput_needed);
  3838. if (!output_file)
  3839. return -EBADF;
  3840. if (output_file->f_op != &perf_fops)
  3841. goto out;
  3842. output_event = output_file->private_data;
  3843. /* Don't chain output fds */
  3844. if (output_event->output)
  3845. goto out;
  3846. /* Don't set an output fd when we already have an output channel */
  3847. if (event->data)
  3848. goto out;
  3849. atomic_long_inc(&output_file->f_count);
  3850. set:
  3851. mutex_lock(&event->mmap_mutex);
  3852. old_output = event->output;
  3853. rcu_assign_pointer(event->output, output_event);
  3854. mutex_unlock(&event->mmap_mutex);
  3855. if (old_output) {
  3856. /*
  3857. * we need to make sure no existing perf_output_*()
  3858. * is still referencing this event.
  3859. */
  3860. synchronize_rcu();
  3861. fput(old_output->filp);
  3862. }
  3863. ret = 0;
  3864. out:
  3865. fput_light(output_file, fput_needed);
  3866. return ret;
  3867. }
  3868. /**
  3869. * sys_perf_event_open - open a performance event, associate it to a task/cpu
  3870. *
  3871. * @attr_uptr: event_id type attributes for monitoring/sampling
  3872. * @pid: target pid
  3873. * @cpu: target cpu
  3874. * @group_fd: group leader event fd
  3875. */
  3876. SYSCALL_DEFINE5(perf_event_open,
  3877. struct perf_event_attr __user *, attr_uptr,
  3878. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  3879. {
  3880. struct perf_event *event, *group_leader;
  3881. struct perf_event_attr attr;
  3882. struct perf_event_context *ctx;
  3883. struct file *event_file = NULL;
  3884. struct file *group_file = NULL;
  3885. int fput_needed = 0;
  3886. int fput_needed2 = 0;
  3887. int err;
  3888. /* for future expandability... */
  3889. if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
  3890. return -EINVAL;
  3891. err = perf_copy_attr(attr_uptr, &attr);
  3892. if (err)
  3893. return err;
  3894. if (!attr.exclude_kernel) {
  3895. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  3896. return -EACCES;
  3897. }
  3898. if (attr.freq) {
  3899. if (attr.sample_freq > sysctl_perf_event_sample_rate)
  3900. return -EINVAL;
  3901. }
  3902. /*
  3903. * Get the target context (task or percpu):
  3904. */
  3905. ctx = find_get_context(pid, cpu);
  3906. if (IS_ERR(ctx))
  3907. return PTR_ERR(ctx);
  3908. /*
  3909. * Look up the group leader (we will attach this event to it):
  3910. */
  3911. group_leader = NULL;
  3912. if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
  3913. err = -EINVAL;
  3914. group_file = fget_light(group_fd, &fput_needed);
  3915. if (!group_file)
  3916. goto err_put_context;
  3917. if (group_file->f_op != &perf_fops)
  3918. goto err_put_context;
  3919. group_leader = group_file->private_data;
  3920. /*
  3921. * Do not allow a recursive hierarchy (this new sibling
  3922. * becoming part of another group-sibling):
  3923. */
  3924. if (group_leader->group_leader != group_leader)
  3925. goto err_put_context;
  3926. /*
  3927. * Do not allow to attach to a group in a different
  3928. * task or CPU context:
  3929. */
  3930. if (group_leader->ctx != ctx)
  3931. goto err_put_context;
  3932. /*
  3933. * Only a group leader can be exclusive or pinned
  3934. */
  3935. if (attr.exclusive || attr.pinned)
  3936. goto err_put_context;
  3937. }
  3938. event = perf_event_alloc(&attr, cpu, ctx, group_leader,
  3939. NULL, NULL, GFP_KERNEL);
  3940. err = PTR_ERR(event);
  3941. if (IS_ERR(event))
  3942. goto err_put_context;
  3943. err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
  3944. if (err < 0)
  3945. goto err_free_put_context;
  3946. event_file = fget_light(err, &fput_needed2);
  3947. if (!event_file)
  3948. goto err_free_put_context;
  3949. if (flags & PERF_FLAG_FD_OUTPUT) {
  3950. err = perf_event_set_output(event, group_fd);
  3951. if (err)
  3952. goto err_fput_free_put_context;
  3953. }
  3954. event->filp = event_file;
  3955. WARN_ON_ONCE(ctx->parent_ctx);
  3956. mutex_lock(&ctx->mutex);
  3957. perf_install_in_context(ctx, event, cpu);
  3958. ++ctx->generation;
  3959. mutex_unlock(&ctx->mutex);
  3960. event->owner = current;
  3961. get_task_struct(current);
  3962. mutex_lock(&current->perf_event_mutex);
  3963. list_add_tail(&event->owner_entry, &current->perf_event_list);
  3964. mutex_unlock(&current->perf_event_mutex);
  3965. err_fput_free_put_context:
  3966. fput_light(event_file, fput_needed2);
  3967. err_free_put_context:
  3968. if (err < 0)
  3969. kfree(event);
  3970. err_put_context:
  3971. if (err < 0)
  3972. put_ctx(ctx);
  3973. fput_light(group_file, fput_needed);
  3974. return err;
  3975. }
  3976. /**
  3977. * perf_event_create_kernel_counter
  3978. *
  3979. * @attr: attributes of the counter to create
  3980. * @cpu: cpu in which the counter is bound
  3981. * @pid: task to profile
  3982. */
  3983. struct perf_event *
  3984. perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
  3985. pid_t pid,
  3986. perf_overflow_handler_t overflow_handler)
  3987. {
  3988. struct perf_event *event;
  3989. struct perf_event_context *ctx;
  3990. int err;
  3991. /*
  3992. * Get the target context (task or percpu):
  3993. */
  3994. ctx = find_get_context(pid, cpu);
  3995. if (IS_ERR(ctx)) {
  3996. err = PTR_ERR(ctx);
  3997. goto err_exit;
  3998. }
  3999. event = perf_event_alloc(attr, cpu, ctx, NULL,
  4000. NULL, overflow_handler, GFP_KERNEL);
  4001. if (IS_ERR(event)) {
  4002. err = PTR_ERR(event);
  4003. goto err_put_context;
  4004. }
  4005. event->filp = NULL;
  4006. WARN_ON_ONCE(ctx->parent_ctx);
  4007. mutex_lock(&ctx->mutex);
  4008. perf_install_in_context(ctx, event, cpu);
  4009. ++ctx->generation;
  4010. mutex_unlock(&ctx->mutex);
  4011. event->owner = current;
  4012. get_task_struct(current);
  4013. mutex_lock(&current->perf_event_mutex);
  4014. list_add_tail(&event->owner_entry, &current->perf_event_list);
  4015. mutex_unlock(&current->perf_event_mutex);
  4016. return event;
  4017. err_put_context:
  4018. put_ctx(ctx);
  4019. err_exit:
  4020. return ERR_PTR(err);
  4021. }
  4022. EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
  4023. /*
  4024. * inherit a event from parent task to child task:
  4025. */
  4026. static struct perf_event *
  4027. inherit_event(struct perf_event *parent_event,
  4028. struct task_struct *parent,
  4029. struct perf_event_context *parent_ctx,
  4030. struct task_struct *child,
  4031. struct perf_event *group_leader,
  4032. struct perf_event_context *child_ctx)
  4033. {
  4034. struct perf_event *child_event;
  4035. /*
  4036. * Instead of creating recursive hierarchies of events,
  4037. * we link inherited events back to the original parent,
  4038. * which has a filp for sure, which we use as the reference
  4039. * count:
  4040. */
  4041. if (parent_event->parent)
  4042. parent_event = parent_event->parent;
  4043. child_event = perf_event_alloc(&parent_event->attr,
  4044. parent_event->cpu, child_ctx,
  4045. group_leader, parent_event,
  4046. NULL, GFP_KERNEL);
  4047. if (IS_ERR(child_event))
  4048. return child_event;
  4049. get_ctx(child_ctx);
  4050. /*
  4051. * Make the child state follow the state of the parent event,
  4052. * not its attr.disabled bit. We hold the parent's mutex,
  4053. * so we won't race with perf_event_{en, dis}able_family.
  4054. */
  4055. if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
  4056. child_event->state = PERF_EVENT_STATE_INACTIVE;
  4057. else
  4058. child_event->state = PERF_EVENT_STATE_OFF;
  4059. if (parent_event->attr.freq) {
  4060. u64 sample_period = parent_event->hw.sample_period;
  4061. struct hw_perf_event *hwc = &child_event->hw;
  4062. hwc->sample_period = sample_period;
  4063. hwc->last_period = sample_period;
  4064. atomic64_set(&hwc->period_left, sample_period);
  4065. }
  4066. child_event->overflow_handler = parent_event->overflow_handler;
  4067. /*
  4068. * Link it up in the child's context:
  4069. */
  4070. add_event_to_ctx(child_event, child_ctx);
  4071. /*
  4072. * Get a reference to the parent filp - we will fput it
  4073. * when the child event exits. This is safe to do because
  4074. * we are in the parent and we know that the filp still
  4075. * exists and has a nonzero count:
  4076. */
  4077. atomic_long_inc(&parent_event->filp->f_count);
  4078. /*
  4079. * Link this into the parent event's child list
  4080. */
  4081. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  4082. mutex_lock(&parent_event->child_mutex);
  4083. list_add_tail(&child_event->child_list, &parent_event->child_list);
  4084. mutex_unlock(&parent_event->child_mutex);
  4085. return child_event;
  4086. }
  4087. static int inherit_group(struct perf_event *parent_event,
  4088. struct task_struct *parent,
  4089. struct perf_event_context *parent_ctx,
  4090. struct task_struct *child,
  4091. struct perf_event_context *child_ctx)
  4092. {
  4093. struct perf_event *leader;
  4094. struct perf_event *sub;
  4095. struct perf_event *child_ctr;
  4096. leader = inherit_event(parent_event, parent, parent_ctx,
  4097. child, NULL, child_ctx);
  4098. if (IS_ERR(leader))
  4099. return PTR_ERR(leader);
  4100. list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
  4101. child_ctr = inherit_event(sub, parent, parent_ctx,
  4102. child, leader, child_ctx);
  4103. if (IS_ERR(child_ctr))
  4104. return PTR_ERR(child_ctr);
  4105. }
  4106. return 0;
  4107. }
  4108. static void sync_child_event(struct perf_event *child_event,
  4109. struct task_struct *child)
  4110. {
  4111. struct perf_event *parent_event = child_event->parent;
  4112. u64 child_val;
  4113. if (child_event->attr.inherit_stat)
  4114. perf_event_read_event(child_event, child);
  4115. child_val = atomic64_read(&child_event->count);
  4116. /*
  4117. * Add back the child's count to the parent's count:
  4118. */
  4119. atomic64_add(child_val, &parent_event->count);
  4120. atomic64_add(child_event->total_time_enabled,
  4121. &parent_event->child_total_time_enabled);
  4122. atomic64_add(child_event->total_time_running,
  4123. &parent_event->child_total_time_running);
  4124. /*
  4125. * Remove this event from the parent's list
  4126. */
  4127. WARN_ON_ONCE(parent_event->ctx->parent_ctx);
  4128. mutex_lock(&parent_event->child_mutex);
  4129. list_del_init(&child_event->child_list);
  4130. mutex_unlock(&parent_event->child_mutex);
  4131. /*
  4132. * Release the parent event, if this was the last
  4133. * reference to it.
  4134. */
  4135. fput(parent_event->filp);
  4136. }
  4137. static void
  4138. __perf_event_exit_task(struct perf_event *child_event,
  4139. struct perf_event_context *child_ctx,
  4140. struct task_struct *child)
  4141. {
  4142. struct perf_event *parent_event;
  4143. perf_event_remove_from_context(child_event);
  4144. parent_event = child_event->parent;
  4145. /*
  4146. * It can happen that parent exits first, and has events
  4147. * that are still around due to the child reference. These
  4148. * events need to be zapped - but otherwise linger.
  4149. */
  4150. if (parent_event) {
  4151. sync_child_event(child_event, child);
  4152. free_event(child_event);
  4153. }
  4154. }
  4155. /*
  4156. * When a child task exits, feed back event values to parent events.
  4157. */
  4158. void perf_event_exit_task(struct task_struct *child)
  4159. {
  4160. struct perf_event *child_event, *tmp;
  4161. struct perf_event_context *child_ctx;
  4162. unsigned long flags;
  4163. if (likely(!child->perf_event_ctxp)) {
  4164. perf_event_task(child, NULL, 0);
  4165. return;
  4166. }
  4167. local_irq_save(flags);
  4168. /*
  4169. * We can't reschedule here because interrupts are disabled,
  4170. * and either child is current or it is a task that can't be
  4171. * scheduled, so we are now safe from rescheduling changing
  4172. * our context.
  4173. */
  4174. child_ctx = child->perf_event_ctxp;
  4175. __perf_event_task_sched_out(child_ctx);
  4176. /*
  4177. * Take the context lock here so that if find_get_context is
  4178. * reading child->perf_event_ctxp, we wait until it has
  4179. * incremented the context's refcount before we do put_ctx below.
  4180. */
  4181. raw_spin_lock(&child_ctx->lock);
  4182. child->perf_event_ctxp = NULL;
  4183. /*
  4184. * If this context is a clone; unclone it so it can't get
  4185. * swapped to another process while we're removing all
  4186. * the events from it.
  4187. */
  4188. unclone_ctx(child_ctx);
  4189. update_context_time(child_ctx);
  4190. raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
  4191. /*
  4192. * Report the task dead after unscheduling the events so that we
  4193. * won't get any samples after PERF_RECORD_EXIT. We can however still
  4194. * get a few PERF_RECORD_READ events.
  4195. */
  4196. perf_event_task(child, child_ctx, 0);
  4197. /*
  4198. * We can recurse on the same lock type through:
  4199. *
  4200. * __perf_event_exit_task()
  4201. * sync_child_event()
  4202. * fput(parent_event->filp)
  4203. * perf_release()
  4204. * mutex_lock(&ctx->mutex)
  4205. *
  4206. * But since its the parent context it won't be the same instance.
  4207. */
  4208. mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
  4209. again:
  4210. list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
  4211. group_entry)
  4212. __perf_event_exit_task(child_event, child_ctx, child);
  4213. list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
  4214. group_entry)
  4215. __perf_event_exit_task(child_event, child_ctx, child);
  4216. /*
  4217. * If the last event was a group event, it will have appended all
  4218. * its siblings to the list, but we obtained 'tmp' before that which
  4219. * will still point to the list head terminating the iteration.
  4220. */
  4221. if (!list_empty(&child_ctx->pinned_groups) ||
  4222. !list_empty(&child_ctx->flexible_groups))
  4223. goto again;
  4224. mutex_unlock(&child_ctx->mutex);
  4225. put_ctx(child_ctx);
  4226. }
  4227. static void perf_free_event(struct perf_event *event,
  4228. struct perf_event_context *ctx)
  4229. {
  4230. struct perf_event *parent = event->parent;
  4231. if (WARN_ON_ONCE(!parent))
  4232. return;
  4233. mutex_lock(&parent->child_mutex);
  4234. list_del_init(&event->child_list);
  4235. mutex_unlock(&parent->child_mutex);
  4236. fput(parent->filp);
  4237. list_del_event(event, ctx);
  4238. free_event(event);
  4239. }
  4240. /*
  4241. * free an unexposed, unused context as created by inheritance by
  4242. * init_task below, used by fork() in case of fail.
  4243. */
  4244. void perf_event_free_task(struct task_struct *task)
  4245. {
  4246. struct perf_event_context *ctx = task->perf_event_ctxp;
  4247. struct perf_event *event, *tmp;
  4248. if (!ctx)
  4249. return;
  4250. mutex_lock(&ctx->mutex);
  4251. again:
  4252. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
  4253. perf_free_event(event, ctx);
  4254. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
  4255. group_entry)
  4256. perf_free_event(event, ctx);
  4257. if (!list_empty(&ctx->pinned_groups) ||
  4258. !list_empty(&ctx->flexible_groups))
  4259. goto again;
  4260. mutex_unlock(&ctx->mutex);
  4261. put_ctx(ctx);
  4262. }
  4263. static int
  4264. inherit_task_group(struct perf_event *event, struct task_struct *parent,
  4265. struct perf_event_context *parent_ctx,
  4266. struct task_struct *child,
  4267. int *inherited_all)
  4268. {
  4269. int ret;
  4270. struct perf_event_context *child_ctx = child->perf_event_ctxp;
  4271. if (!event->attr.inherit) {
  4272. *inherited_all = 0;
  4273. return 0;
  4274. }
  4275. if (!child_ctx) {
  4276. /*
  4277. * This is executed from the parent task context, so
  4278. * inherit events that have been marked for cloning.
  4279. * First allocate and initialize a context for the
  4280. * child.
  4281. */
  4282. child_ctx = kzalloc(sizeof(struct perf_event_context),
  4283. GFP_KERNEL);
  4284. if (!child_ctx)
  4285. return -ENOMEM;
  4286. __perf_event_init_context(child_ctx, child);
  4287. child->perf_event_ctxp = child_ctx;
  4288. get_task_struct(child);
  4289. }
  4290. ret = inherit_group(event, parent, parent_ctx,
  4291. child, child_ctx);
  4292. if (ret)
  4293. *inherited_all = 0;
  4294. return ret;
  4295. }
  4296. /*
  4297. * Initialize the perf_event context in task_struct
  4298. */
  4299. int perf_event_init_task(struct task_struct *child)
  4300. {
  4301. struct perf_event_context *child_ctx, *parent_ctx;
  4302. struct perf_event_context *cloned_ctx;
  4303. struct perf_event *event;
  4304. struct task_struct *parent = current;
  4305. int inherited_all = 1;
  4306. int ret = 0;
  4307. child->perf_event_ctxp = NULL;
  4308. mutex_init(&child->perf_event_mutex);
  4309. INIT_LIST_HEAD(&child->perf_event_list);
  4310. if (likely(!parent->perf_event_ctxp))
  4311. return 0;
  4312. /*
  4313. * If the parent's context is a clone, pin it so it won't get
  4314. * swapped under us.
  4315. */
  4316. parent_ctx = perf_pin_task_context(parent);
  4317. /*
  4318. * No need to check if parent_ctx != NULL here; since we saw
  4319. * it non-NULL earlier, the only reason for it to become NULL
  4320. * is if we exit, and since we're currently in the middle of
  4321. * a fork we can't be exiting at the same time.
  4322. */
  4323. /*
  4324. * Lock the parent list. No need to lock the child - not PID
  4325. * hashed yet and not running, so nobody can access it.
  4326. */
  4327. mutex_lock(&parent_ctx->mutex);
  4328. /*
  4329. * We dont have to disable NMIs - we are only looking at
  4330. * the list, not manipulating it:
  4331. */
  4332. list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
  4333. ret = inherit_task_group(event, parent, parent_ctx, child,
  4334. &inherited_all);
  4335. if (ret)
  4336. break;
  4337. }
  4338. list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
  4339. ret = inherit_task_group(event, parent, parent_ctx, child,
  4340. &inherited_all);
  4341. if (ret)
  4342. break;
  4343. }
  4344. child_ctx = child->perf_event_ctxp;
  4345. if (child_ctx && inherited_all) {
  4346. /*
  4347. * Mark the child context as a clone of the parent
  4348. * context, or of whatever the parent is a clone of.
  4349. * Note that if the parent is a clone, it could get
  4350. * uncloned at any point, but that doesn't matter
  4351. * because the list of events and the generation
  4352. * count can't have changed since we took the mutex.
  4353. */
  4354. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  4355. if (cloned_ctx) {
  4356. child_ctx->parent_ctx = cloned_ctx;
  4357. child_ctx->parent_gen = parent_ctx->parent_gen;
  4358. } else {
  4359. child_ctx->parent_ctx = parent_ctx;
  4360. child_ctx->parent_gen = parent_ctx->generation;
  4361. }
  4362. get_ctx(child_ctx->parent_ctx);
  4363. }
  4364. mutex_unlock(&parent_ctx->mutex);
  4365. perf_unpin_context(parent_ctx);
  4366. return ret;
  4367. }
  4368. static void __init perf_event_init_all_cpus(void)
  4369. {
  4370. int cpu;
  4371. struct perf_cpu_context *cpuctx;
  4372. for_each_possible_cpu(cpu) {
  4373. cpuctx = &per_cpu(perf_cpu_context, cpu);
  4374. __perf_event_init_context(&cpuctx->ctx, NULL);
  4375. }
  4376. }
  4377. static void __cpuinit perf_event_init_cpu(int cpu)
  4378. {
  4379. struct perf_cpu_context *cpuctx;
  4380. cpuctx = &per_cpu(perf_cpu_context, cpu);
  4381. spin_lock(&perf_resource_lock);
  4382. cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
  4383. spin_unlock(&perf_resource_lock);
  4384. }
  4385. #ifdef CONFIG_HOTPLUG_CPU
  4386. static void __perf_event_exit_cpu(void *info)
  4387. {
  4388. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  4389. struct perf_event_context *ctx = &cpuctx->ctx;
  4390. struct perf_event *event, *tmp;
  4391. list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
  4392. __perf_event_remove_from_context(event);
  4393. list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
  4394. __perf_event_remove_from_context(event);
  4395. }
  4396. static void perf_event_exit_cpu(int cpu)
  4397. {
  4398. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  4399. struct perf_event_context *ctx = &cpuctx->ctx;
  4400. mutex_lock(&ctx->mutex);
  4401. smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
  4402. mutex_unlock(&ctx->mutex);
  4403. }
  4404. #else
  4405. static inline void perf_event_exit_cpu(int cpu) { }
  4406. #endif
  4407. static int __cpuinit
  4408. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  4409. {
  4410. unsigned int cpu = (long)hcpu;
  4411. switch (action) {
  4412. case CPU_UP_PREPARE:
  4413. case CPU_UP_PREPARE_FROZEN:
  4414. perf_event_init_cpu(cpu);
  4415. break;
  4416. case CPU_DOWN_PREPARE:
  4417. case CPU_DOWN_PREPARE_FROZEN:
  4418. perf_event_exit_cpu(cpu);
  4419. break;
  4420. default:
  4421. break;
  4422. }
  4423. return NOTIFY_OK;
  4424. }
  4425. /*
  4426. * This has to have a higher priority than migration_notifier in sched.c.
  4427. */
  4428. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  4429. .notifier_call = perf_cpu_notify,
  4430. .priority = 20,
  4431. };
  4432. void __init perf_event_init(void)
  4433. {
  4434. perf_event_init_all_cpus();
  4435. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  4436. (void *)(long)smp_processor_id());
  4437. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
  4438. (void *)(long)smp_processor_id());
  4439. register_cpu_notifier(&perf_cpu_nb);
  4440. }
  4441. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
  4442. struct sysdev_class_attribute *attr,
  4443. char *buf)
  4444. {
  4445. return sprintf(buf, "%d\n", perf_reserved_percpu);
  4446. }
  4447. static ssize_t
  4448. perf_set_reserve_percpu(struct sysdev_class *class,
  4449. struct sysdev_class_attribute *attr,
  4450. const char *buf,
  4451. size_t count)
  4452. {
  4453. struct perf_cpu_context *cpuctx;
  4454. unsigned long val;
  4455. int err, cpu, mpt;
  4456. err = strict_strtoul(buf, 10, &val);
  4457. if (err)
  4458. return err;
  4459. if (val > perf_max_events)
  4460. return -EINVAL;
  4461. spin_lock(&perf_resource_lock);
  4462. perf_reserved_percpu = val;
  4463. for_each_online_cpu(cpu) {
  4464. cpuctx = &per_cpu(perf_cpu_context, cpu);
  4465. raw_spin_lock_irq(&cpuctx->ctx.lock);
  4466. mpt = min(perf_max_events - cpuctx->ctx.nr_events,
  4467. perf_max_events - perf_reserved_percpu);
  4468. cpuctx->max_pertask = mpt;
  4469. raw_spin_unlock_irq(&cpuctx->ctx.lock);
  4470. }
  4471. spin_unlock(&perf_resource_lock);
  4472. return count;
  4473. }
  4474. static ssize_t perf_show_overcommit(struct sysdev_class *class,
  4475. struct sysdev_class_attribute *attr,
  4476. char *buf)
  4477. {
  4478. return sprintf(buf, "%d\n", perf_overcommit);
  4479. }
  4480. static ssize_t
  4481. perf_set_overcommit(struct sysdev_class *class,
  4482. struct sysdev_class_attribute *attr,
  4483. const char *buf, size_t count)
  4484. {
  4485. unsigned long val;
  4486. int err;
  4487. err = strict_strtoul(buf, 10, &val);
  4488. if (err)
  4489. return err;
  4490. if (val > 1)
  4491. return -EINVAL;
  4492. spin_lock(&perf_resource_lock);
  4493. perf_overcommit = val;
  4494. spin_unlock(&perf_resource_lock);
  4495. return count;
  4496. }
  4497. static SYSDEV_CLASS_ATTR(
  4498. reserve_percpu,
  4499. 0644,
  4500. perf_show_reserve_percpu,
  4501. perf_set_reserve_percpu
  4502. );
  4503. static SYSDEV_CLASS_ATTR(
  4504. overcommit,
  4505. 0644,
  4506. perf_show_overcommit,
  4507. perf_set_overcommit
  4508. );
  4509. static struct attribute *perfclass_attrs[] = {
  4510. &attr_reserve_percpu.attr,
  4511. &attr_overcommit.attr,
  4512. NULL
  4513. };
  4514. static struct attribute_group perfclass_attr_group = {
  4515. .attrs = perfclass_attrs,
  4516. .name = "perf_events",
  4517. };
  4518. static int __init perf_event_sysfs_init(void)
  4519. {
  4520. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  4521. &perfclass_attr_group);
  4522. }
  4523. device_initcall(perf_event_sysfs_init);