perf_counter.c 107 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/dcache.h>
  19. #include <linux/percpu.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/rculist.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/anon_inodes.h>
  27. #include <linux/kernel_stat.h>
  28. #include <linux/perf_counter.h>
  29. #include <asm/irq_regs.h>
  30. /*
  31. * Each CPU has a list of per CPU counters:
  32. */
  33. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  34. int perf_max_counters __read_mostly = 1;
  35. static int perf_reserved_percpu __read_mostly;
  36. static int perf_overcommit __read_mostly = 1;
  37. static atomic_t nr_counters __read_mostly;
  38. static atomic_t nr_mmap_counters __read_mostly;
  39. static atomic_t nr_comm_counters __read_mostly;
  40. /*
  41. * perf counter paranoia level:
  42. * 0 - not paranoid
  43. * 1 - disallow cpu counters to unpriv
  44. * 2 - disallow kernel profiling to unpriv
  45. */
  46. int sysctl_perf_counter_paranoid __read_mostly;
  47. static inline bool perf_paranoid_cpu(void)
  48. {
  49. return sysctl_perf_counter_paranoid > 0;
  50. }
  51. static inline bool perf_paranoid_kernel(void)
  52. {
  53. return sysctl_perf_counter_paranoid > 1;
  54. }
  55. int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
  56. /*
  57. * max perf counter sample rate
  58. */
  59. int sysctl_perf_counter_sample_rate __read_mostly = 100000;
  60. static atomic64_t perf_counter_id;
  61. /*
  62. * Lock for (sysadmin-configurable) counter reservations:
  63. */
  64. static DEFINE_SPINLOCK(perf_resource_lock);
  65. /*
  66. * Architecture provided APIs - weak aliases:
  67. */
  68. extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  69. {
  70. return NULL;
  71. }
  72. void __weak hw_perf_disable(void) { barrier(); }
  73. void __weak hw_perf_enable(void) { barrier(); }
  74. void __weak hw_perf_counter_setup(int cpu) { barrier(); }
  75. int __weak
  76. hw_perf_group_sched_in(struct perf_counter *group_leader,
  77. struct perf_cpu_context *cpuctx,
  78. struct perf_counter_context *ctx, int cpu)
  79. {
  80. return 0;
  81. }
  82. void __weak perf_counter_print_debug(void) { }
  83. static DEFINE_PER_CPU(int, disable_count);
  84. void __perf_disable(void)
  85. {
  86. __get_cpu_var(disable_count)++;
  87. }
  88. bool __perf_enable(void)
  89. {
  90. return !--__get_cpu_var(disable_count);
  91. }
  92. void perf_disable(void)
  93. {
  94. __perf_disable();
  95. hw_perf_disable();
  96. }
  97. void perf_enable(void)
  98. {
  99. if (__perf_enable())
  100. hw_perf_enable();
  101. }
  102. static void get_ctx(struct perf_counter_context *ctx)
  103. {
  104. WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
  105. }
  106. static void free_ctx(struct rcu_head *head)
  107. {
  108. struct perf_counter_context *ctx;
  109. ctx = container_of(head, struct perf_counter_context, rcu_head);
  110. kfree(ctx);
  111. }
  112. static void put_ctx(struct perf_counter_context *ctx)
  113. {
  114. if (atomic_dec_and_test(&ctx->refcount)) {
  115. if (ctx->parent_ctx)
  116. put_ctx(ctx->parent_ctx);
  117. if (ctx->task)
  118. put_task_struct(ctx->task);
  119. call_rcu(&ctx->rcu_head, free_ctx);
  120. }
  121. }
  122. /*
  123. * Get the perf_counter_context for a task and lock it.
  124. * This has to cope with with the fact that until it is locked,
  125. * the context could get moved to another task.
  126. */
  127. static struct perf_counter_context *
  128. perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  129. {
  130. struct perf_counter_context *ctx;
  131. rcu_read_lock();
  132. retry:
  133. ctx = rcu_dereference(task->perf_counter_ctxp);
  134. if (ctx) {
  135. /*
  136. * If this context is a clone of another, it might
  137. * get swapped for another underneath us by
  138. * perf_counter_task_sched_out, though the
  139. * rcu_read_lock() protects us from any context
  140. * getting freed. Lock the context and check if it
  141. * got swapped before we could get the lock, and retry
  142. * if so. If we locked the right context, then it
  143. * can't get swapped on us any more.
  144. */
  145. spin_lock_irqsave(&ctx->lock, *flags);
  146. if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
  147. spin_unlock_irqrestore(&ctx->lock, *flags);
  148. goto retry;
  149. }
  150. if (!atomic_inc_not_zero(&ctx->refcount)) {
  151. spin_unlock_irqrestore(&ctx->lock, *flags);
  152. ctx = NULL;
  153. }
  154. }
  155. rcu_read_unlock();
  156. return ctx;
  157. }
  158. /*
  159. * Get the context for a task and increment its pin_count so it
  160. * can't get swapped to another task. This also increments its
  161. * reference count so that the context can't get freed.
  162. */
  163. static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
  164. {
  165. struct perf_counter_context *ctx;
  166. unsigned long flags;
  167. ctx = perf_lock_task_context(task, &flags);
  168. if (ctx) {
  169. ++ctx->pin_count;
  170. spin_unlock_irqrestore(&ctx->lock, flags);
  171. }
  172. return ctx;
  173. }
  174. static void perf_unpin_context(struct perf_counter_context *ctx)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&ctx->lock, flags);
  178. --ctx->pin_count;
  179. spin_unlock_irqrestore(&ctx->lock, flags);
  180. put_ctx(ctx);
  181. }
  182. /*
  183. * Add a counter from the lists for its context.
  184. * Must be called with ctx->mutex and ctx->lock held.
  185. */
  186. static void
  187. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  188. {
  189. struct perf_counter *group_leader = counter->group_leader;
  190. /*
  191. * Depending on whether it is a standalone or sibling counter,
  192. * add it straight to the context's counter list, or to the group
  193. * leader's sibling list:
  194. */
  195. if (group_leader == counter)
  196. list_add_tail(&counter->list_entry, &ctx->counter_list);
  197. else {
  198. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  199. group_leader->nr_siblings++;
  200. }
  201. list_add_rcu(&counter->event_entry, &ctx->event_list);
  202. ctx->nr_counters++;
  203. if (counter->attr.inherit_stat)
  204. ctx->nr_stat++;
  205. }
  206. /*
  207. * Remove a counter from the lists for its context.
  208. * Must be called with ctx->mutex and ctx->lock held.
  209. */
  210. static void
  211. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  212. {
  213. struct perf_counter *sibling, *tmp;
  214. if (list_empty(&counter->list_entry))
  215. return;
  216. ctx->nr_counters--;
  217. if (counter->attr.inherit_stat)
  218. ctx->nr_stat--;
  219. list_del_init(&counter->list_entry);
  220. list_del_rcu(&counter->event_entry);
  221. if (counter->group_leader != counter)
  222. counter->group_leader->nr_siblings--;
  223. /*
  224. * If this was a group counter with sibling counters then
  225. * upgrade the siblings to singleton counters by adding them
  226. * to the context list directly:
  227. */
  228. list_for_each_entry_safe(sibling, tmp,
  229. &counter->sibling_list, list_entry) {
  230. list_move_tail(&sibling->list_entry, &ctx->counter_list);
  231. sibling->group_leader = sibling;
  232. }
  233. }
  234. static void
  235. counter_sched_out(struct perf_counter *counter,
  236. struct perf_cpu_context *cpuctx,
  237. struct perf_counter_context *ctx)
  238. {
  239. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  240. return;
  241. counter->state = PERF_COUNTER_STATE_INACTIVE;
  242. counter->tstamp_stopped = ctx->time;
  243. counter->pmu->disable(counter);
  244. counter->oncpu = -1;
  245. if (!is_software_counter(counter))
  246. cpuctx->active_oncpu--;
  247. ctx->nr_active--;
  248. if (counter->attr.exclusive || !cpuctx->active_oncpu)
  249. cpuctx->exclusive = 0;
  250. }
  251. static void
  252. group_sched_out(struct perf_counter *group_counter,
  253. struct perf_cpu_context *cpuctx,
  254. struct perf_counter_context *ctx)
  255. {
  256. struct perf_counter *counter;
  257. if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
  258. return;
  259. counter_sched_out(group_counter, cpuctx, ctx);
  260. /*
  261. * Schedule out siblings (if any):
  262. */
  263. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  264. counter_sched_out(counter, cpuctx, ctx);
  265. if (group_counter->attr.exclusive)
  266. cpuctx->exclusive = 0;
  267. }
  268. /*
  269. * Cross CPU call to remove a performance counter
  270. *
  271. * We disable the counter on the hardware level first. After that we
  272. * remove it from the context list.
  273. */
  274. static void __perf_counter_remove_from_context(void *info)
  275. {
  276. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  277. struct perf_counter *counter = info;
  278. struct perf_counter_context *ctx = counter->ctx;
  279. /*
  280. * If this is a task context, we need to check whether it is
  281. * the current task context of this cpu. If not it has been
  282. * scheduled out before the smp call arrived.
  283. */
  284. if (ctx->task && cpuctx->task_ctx != ctx)
  285. return;
  286. spin_lock(&ctx->lock);
  287. /*
  288. * Protect the list operation against NMI by disabling the
  289. * counters on a global level.
  290. */
  291. perf_disable();
  292. counter_sched_out(counter, cpuctx, ctx);
  293. list_del_counter(counter, ctx);
  294. if (!ctx->task) {
  295. /*
  296. * Allow more per task counters with respect to the
  297. * reservation:
  298. */
  299. cpuctx->max_pertask =
  300. min(perf_max_counters - ctx->nr_counters,
  301. perf_max_counters - perf_reserved_percpu);
  302. }
  303. perf_enable();
  304. spin_unlock(&ctx->lock);
  305. }
  306. /*
  307. * Remove the counter from a task's (or a CPU's) list of counters.
  308. *
  309. * Must be called with ctx->mutex held.
  310. *
  311. * CPU counters are removed with a smp call. For task counters we only
  312. * call when the task is on a CPU.
  313. *
  314. * If counter->ctx is a cloned context, callers must make sure that
  315. * every task struct that counter->ctx->task could possibly point to
  316. * remains valid. This is OK when called from perf_release since
  317. * that only calls us on the top-level context, which can't be a clone.
  318. * When called from perf_counter_exit_task, it's OK because the
  319. * context has been detached from its task.
  320. */
  321. static void perf_counter_remove_from_context(struct perf_counter *counter)
  322. {
  323. struct perf_counter_context *ctx = counter->ctx;
  324. struct task_struct *task = ctx->task;
  325. if (!task) {
  326. /*
  327. * Per cpu counters are removed via an smp call and
  328. * the removal is always sucessful.
  329. */
  330. smp_call_function_single(counter->cpu,
  331. __perf_counter_remove_from_context,
  332. counter, 1);
  333. return;
  334. }
  335. retry:
  336. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  337. counter);
  338. spin_lock_irq(&ctx->lock);
  339. /*
  340. * If the context is active we need to retry the smp call.
  341. */
  342. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  343. spin_unlock_irq(&ctx->lock);
  344. goto retry;
  345. }
  346. /*
  347. * The lock prevents that this context is scheduled in so we
  348. * can remove the counter safely, if the call above did not
  349. * succeed.
  350. */
  351. if (!list_empty(&counter->list_entry)) {
  352. list_del_counter(counter, ctx);
  353. }
  354. spin_unlock_irq(&ctx->lock);
  355. }
  356. static inline u64 perf_clock(void)
  357. {
  358. return cpu_clock(smp_processor_id());
  359. }
  360. /*
  361. * Update the record of the current time in a context.
  362. */
  363. static void update_context_time(struct perf_counter_context *ctx)
  364. {
  365. u64 now = perf_clock();
  366. ctx->time += now - ctx->timestamp;
  367. ctx->timestamp = now;
  368. }
  369. /*
  370. * Update the total_time_enabled and total_time_running fields for a counter.
  371. */
  372. static void update_counter_times(struct perf_counter *counter)
  373. {
  374. struct perf_counter_context *ctx = counter->ctx;
  375. u64 run_end;
  376. if (counter->state < PERF_COUNTER_STATE_INACTIVE)
  377. return;
  378. counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
  379. if (counter->state == PERF_COUNTER_STATE_INACTIVE)
  380. run_end = counter->tstamp_stopped;
  381. else
  382. run_end = ctx->time;
  383. counter->total_time_running = run_end - counter->tstamp_running;
  384. }
  385. /*
  386. * Update total_time_enabled and total_time_running for all counters in a group.
  387. */
  388. static void update_group_times(struct perf_counter *leader)
  389. {
  390. struct perf_counter *counter;
  391. update_counter_times(leader);
  392. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  393. update_counter_times(counter);
  394. }
  395. /*
  396. * Cross CPU call to disable a performance counter
  397. */
  398. static void __perf_counter_disable(void *info)
  399. {
  400. struct perf_counter *counter = info;
  401. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  402. struct perf_counter_context *ctx = counter->ctx;
  403. /*
  404. * If this is a per-task counter, need to check whether this
  405. * counter's task is the current task on this cpu.
  406. */
  407. if (ctx->task && cpuctx->task_ctx != ctx)
  408. return;
  409. spin_lock(&ctx->lock);
  410. /*
  411. * If the counter is on, turn it off.
  412. * If it is in error state, leave it in error state.
  413. */
  414. if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
  415. update_context_time(ctx);
  416. update_counter_times(counter);
  417. if (counter == counter->group_leader)
  418. group_sched_out(counter, cpuctx, ctx);
  419. else
  420. counter_sched_out(counter, cpuctx, ctx);
  421. counter->state = PERF_COUNTER_STATE_OFF;
  422. }
  423. spin_unlock(&ctx->lock);
  424. }
  425. /*
  426. * Disable a counter.
  427. *
  428. * If counter->ctx is a cloned context, callers must make sure that
  429. * every task struct that counter->ctx->task could possibly point to
  430. * remains valid. This condition is satisifed when called through
  431. * perf_counter_for_each_child or perf_counter_for_each because they
  432. * hold the top-level counter's child_mutex, so any descendant that
  433. * goes to exit will block in sync_child_counter.
  434. * When called from perf_pending_counter it's OK because counter->ctx
  435. * is the current context on this CPU and preemption is disabled,
  436. * hence we can't get into perf_counter_task_sched_out for this context.
  437. */
  438. static void perf_counter_disable(struct perf_counter *counter)
  439. {
  440. struct perf_counter_context *ctx = counter->ctx;
  441. struct task_struct *task = ctx->task;
  442. if (!task) {
  443. /*
  444. * Disable the counter on the cpu that it's on
  445. */
  446. smp_call_function_single(counter->cpu, __perf_counter_disable,
  447. counter, 1);
  448. return;
  449. }
  450. retry:
  451. task_oncpu_function_call(task, __perf_counter_disable, counter);
  452. spin_lock_irq(&ctx->lock);
  453. /*
  454. * If the counter is still active, we need to retry the cross-call.
  455. */
  456. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  457. spin_unlock_irq(&ctx->lock);
  458. goto retry;
  459. }
  460. /*
  461. * Since we have the lock this context can't be scheduled
  462. * in, so we can change the state safely.
  463. */
  464. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  465. update_counter_times(counter);
  466. counter->state = PERF_COUNTER_STATE_OFF;
  467. }
  468. spin_unlock_irq(&ctx->lock);
  469. }
  470. static int
  471. counter_sched_in(struct perf_counter *counter,
  472. struct perf_cpu_context *cpuctx,
  473. struct perf_counter_context *ctx,
  474. int cpu)
  475. {
  476. if (counter->state <= PERF_COUNTER_STATE_OFF)
  477. return 0;
  478. counter->state = PERF_COUNTER_STATE_ACTIVE;
  479. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  480. /*
  481. * The new state must be visible before we turn it on in the hardware:
  482. */
  483. smp_wmb();
  484. if (counter->pmu->enable(counter)) {
  485. counter->state = PERF_COUNTER_STATE_INACTIVE;
  486. counter->oncpu = -1;
  487. return -EAGAIN;
  488. }
  489. counter->tstamp_running += ctx->time - counter->tstamp_stopped;
  490. if (!is_software_counter(counter))
  491. cpuctx->active_oncpu++;
  492. ctx->nr_active++;
  493. if (counter->attr.exclusive)
  494. cpuctx->exclusive = 1;
  495. return 0;
  496. }
  497. static int
  498. group_sched_in(struct perf_counter *group_counter,
  499. struct perf_cpu_context *cpuctx,
  500. struct perf_counter_context *ctx,
  501. int cpu)
  502. {
  503. struct perf_counter *counter, *partial_group;
  504. int ret;
  505. if (group_counter->state == PERF_COUNTER_STATE_OFF)
  506. return 0;
  507. ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
  508. if (ret)
  509. return ret < 0 ? ret : 0;
  510. if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
  511. return -EAGAIN;
  512. /*
  513. * Schedule in siblings as one group (if any):
  514. */
  515. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  516. if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
  517. partial_group = counter;
  518. goto group_error;
  519. }
  520. }
  521. return 0;
  522. group_error:
  523. /*
  524. * Groups can be scheduled in as one unit only, so undo any
  525. * partial group before returning:
  526. */
  527. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  528. if (counter == partial_group)
  529. break;
  530. counter_sched_out(counter, cpuctx, ctx);
  531. }
  532. counter_sched_out(group_counter, cpuctx, ctx);
  533. return -EAGAIN;
  534. }
  535. /*
  536. * Return 1 for a group consisting entirely of software counters,
  537. * 0 if the group contains any hardware counters.
  538. */
  539. static int is_software_only_group(struct perf_counter *leader)
  540. {
  541. struct perf_counter *counter;
  542. if (!is_software_counter(leader))
  543. return 0;
  544. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  545. if (!is_software_counter(counter))
  546. return 0;
  547. return 1;
  548. }
  549. /*
  550. * Work out whether we can put this counter group on the CPU now.
  551. */
  552. static int group_can_go_on(struct perf_counter *counter,
  553. struct perf_cpu_context *cpuctx,
  554. int can_add_hw)
  555. {
  556. /*
  557. * Groups consisting entirely of software counters can always go on.
  558. */
  559. if (is_software_only_group(counter))
  560. return 1;
  561. /*
  562. * If an exclusive group is already on, no other hardware
  563. * counters can go on.
  564. */
  565. if (cpuctx->exclusive)
  566. return 0;
  567. /*
  568. * If this group is exclusive and there are already
  569. * counters on the CPU, it can't go on.
  570. */
  571. if (counter->attr.exclusive && cpuctx->active_oncpu)
  572. return 0;
  573. /*
  574. * Otherwise, try to add it if all previous groups were able
  575. * to go on.
  576. */
  577. return can_add_hw;
  578. }
  579. static void add_counter_to_ctx(struct perf_counter *counter,
  580. struct perf_counter_context *ctx)
  581. {
  582. list_add_counter(counter, ctx);
  583. counter->tstamp_enabled = ctx->time;
  584. counter->tstamp_running = ctx->time;
  585. counter->tstamp_stopped = ctx->time;
  586. }
  587. /*
  588. * Cross CPU call to install and enable a performance counter
  589. *
  590. * Must be called with ctx->mutex held
  591. */
  592. static void __perf_install_in_context(void *info)
  593. {
  594. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  595. struct perf_counter *counter = info;
  596. struct perf_counter_context *ctx = counter->ctx;
  597. struct perf_counter *leader = counter->group_leader;
  598. int cpu = smp_processor_id();
  599. int err;
  600. /*
  601. * If this is a task context, we need to check whether it is
  602. * the current task context of this cpu. If not it has been
  603. * scheduled out before the smp call arrived.
  604. * Or possibly this is the right context but it isn't
  605. * on this cpu because it had no counters.
  606. */
  607. if (ctx->task && cpuctx->task_ctx != ctx) {
  608. if (cpuctx->task_ctx || ctx->task != current)
  609. return;
  610. cpuctx->task_ctx = ctx;
  611. }
  612. spin_lock(&ctx->lock);
  613. ctx->is_active = 1;
  614. update_context_time(ctx);
  615. /*
  616. * Protect the list operation against NMI by disabling the
  617. * counters on a global level. NOP for non NMI based counters.
  618. */
  619. perf_disable();
  620. add_counter_to_ctx(counter, ctx);
  621. /*
  622. * Don't put the counter on if it is disabled or if
  623. * it is in a group and the group isn't on.
  624. */
  625. if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
  626. (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
  627. goto unlock;
  628. /*
  629. * An exclusive counter can't go on if there are already active
  630. * hardware counters, and no hardware counter can go on if there
  631. * is already an exclusive counter on.
  632. */
  633. if (!group_can_go_on(counter, cpuctx, 1))
  634. err = -EEXIST;
  635. else
  636. err = counter_sched_in(counter, cpuctx, ctx, cpu);
  637. if (err) {
  638. /*
  639. * This counter couldn't go on. If it is in a group
  640. * then we have to pull the whole group off.
  641. * If the counter group is pinned then put it in error state.
  642. */
  643. if (leader != counter)
  644. group_sched_out(leader, cpuctx, ctx);
  645. if (leader->attr.pinned) {
  646. update_group_times(leader);
  647. leader->state = PERF_COUNTER_STATE_ERROR;
  648. }
  649. }
  650. if (!err && !ctx->task && cpuctx->max_pertask)
  651. cpuctx->max_pertask--;
  652. unlock:
  653. perf_enable();
  654. spin_unlock(&ctx->lock);
  655. }
  656. /*
  657. * Attach a performance counter to a context
  658. *
  659. * First we add the counter to the list with the hardware enable bit
  660. * in counter->hw_config cleared.
  661. *
  662. * If the counter is attached to a task which is on a CPU we use a smp
  663. * call to enable it in the task context. The task might have been
  664. * scheduled away, but we check this in the smp call again.
  665. *
  666. * Must be called with ctx->mutex held.
  667. */
  668. static void
  669. perf_install_in_context(struct perf_counter_context *ctx,
  670. struct perf_counter *counter,
  671. int cpu)
  672. {
  673. struct task_struct *task = ctx->task;
  674. if (!task) {
  675. /*
  676. * Per cpu counters are installed via an smp call and
  677. * the install is always sucessful.
  678. */
  679. smp_call_function_single(cpu, __perf_install_in_context,
  680. counter, 1);
  681. return;
  682. }
  683. retry:
  684. task_oncpu_function_call(task, __perf_install_in_context,
  685. counter);
  686. spin_lock_irq(&ctx->lock);
  687. /*
  688. * we need to retry the smp call.
  689. */
  690. if (ctx->is_active && list_empty(&counter->list_entry)) {
  691. spin_unlock_irq(&ctx->lock);
  692. goto retry;
  693. }
  694. /*
  695. * The lock prevents that this context is scheduled in so we
  696. * can add the counter safely, if it the call above did not
  697. * succeed.
  698. */
  699. if (list_empty(&counter->list_entry))
  700. add_counter_to_ctx(counter, ctx);
  701. spin_unlock_irq(&ctx->lock);
  702. }
  703. /*
  704. * Cross CPU call to enable a performance counter
  705. */
  706. static void __perf_counter_enable(void *info)
  707. {
  708. struct perf_counter *counter = info;
  709. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  710. struct perf_counter_context *ctx = counter->ctx;
  711. struct perf_counter *leader = counter->group_leader;
  712. int err;
  713. /*
  714. * If this is a per-task counter, need to check whether this
  715. * counter's task is the current task on this cpu.
  716. */
  717. if (ctx->task && cpuctx->task_ctx != ctx) {
  718. if (cpuctx->task_ctx || ctx->task != current)
  719. return;
  720. cpuctx->task_ctx = ctx;
  721. }
  722. spin_lock(&ctx->lock);
  723. ctx->is_active = 1;
  724. update_context_time(ctx);
  725. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  726. goto unlock;
  727. counter->state = PERF_COUNTER_STATE_INACTIVE;
  728. counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
  729. /*
  730. * If the counter is in a group and isn't the group leader,
  731. * then don't put it on unless the group is on.
  732. */
  733. if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
  734. goto unlock;
  735. if (!group_can_go_on(counter, cpuctx, 1)) {
  736. err = -EEXIST;
  737. } else {
  738. perf_disable();
  739. if (counter == leader)
  740. err = group_sched_in(counter, cpuctx, ctx,
  741. smp_processor_id());
  742. else
  743. err = counter_sched_in(counter, cpuctx, ctx,
  744. smp_processor_id());
  745. perf_enable();
  746. }
  747. if (err) {
  748. /*
  749. * If this counter can't go on and it's part of a
  750. * group, then the whole group has to come off.
  751. */
  752. if (leader != counter)
  753. group_sched_out(leader, cpuctx, ctx);
  754. if (leader->attr.pinned) {
  755. update_group_times(leader);
  756. leader->state = PERF_COUNTER_STATE_ERROR;
  757. }
  758. }
  759. unlock:
  760. spin_unlock(&ctx->lock);
  761. }
  762. /*
  763. * Enable a counter.
  764. *
  765. * If counter->ctx is a cloned context, callers must make sure that
  766. * every task struct that counter->ctx->task could possibly point to
  767. * remains valid. This condition is satisfied when called through
  768. * perf_counter_for_each_child or perf_counter_for_each as described
  769. * for perf_counter_disable.
  770. */
  771. static void perf_counter_enable(struct perf_counter *counter)
  772. {
  773. struct perf_counter_context *ctx = counter->ctx;
  774. struct task_struct *task = ctx->task;
  775. if (!task) {
  776. /*
  777. * Enable the counter on the cpu that it's on
  778. */
  779. smp_call_function_single(counter->cpu, __perf_counter_enable,
  780. counter, 1);
  781. return;
  782. }
  783. spin_lock_irq(&ctx->lock);
  784. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  785. goto out;
  786. /*
  787. * If the counter is in error state, clear that first.
  788. * That way, if we see the counter in error state below, we
  789. * know that it has gone back into error state, as distinct
  790. * from the task having been scheduled away before the
  791. * cross-call arrived.
  792. */
  793. if (counter->state == PERF_COUNTER_STATE_ERROR)
  794. counter->state = PERF_COUNTER_STATE_OFF;
  795. retry:
  796. spin_unlock_irq(&ctx->lock);
  797. task_oncpu_function_call(task, __perf_counter_enable, counter);
  798. spin_lock_irq(&ctx->lock);
  799. /*
  800. * If the context is active and the counter is still off,
  801. * we need to retry the cross-call.
  802. */
  803. if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
  804. goto retry;
  805. /*
  806. * Since we have the lock this context can't be scheduled
  807. * in, so we can change the state safely.
  808. */
  809. if (counter->state == PERF_COUNTER_STATE_OFF) {
  810. counter->state = PERF_COUNTER_STATE_INACTIVE;
  811. counter->tstamp_enabled =
  812. ctx->time - counter->total_time_enabled;
  813. }
  814. out:
  815. spin_unlock_irq(&ctx->lock);
  816. }
  817. static int perf_counter_refresh(struct perf_counter *counter, int refresh)
  818. {
  819. /*
  820. * not supported on inherited counters
  821. */
  822. if (counter->attr.inherit)
  823. return -EINVAL;
  824. atomic_add(refresh, &counter->event_limit);
  825. perf_counter_enable(counter);
  826. return 0;
  827. }
  828. void __perf_counter_sched_out(struct perf_counter_context *ctx,
  829. struct perf_cpu_context *cpuctx)
  830. {
  831. struct perf_counter *counter;
  832. spin_lock(&ctx->lock);
  833. ctx->is_active = 0;
  834. if (likely(!ctx->nr_counters))
  835. goto out;
  836. update_context_time(ctx);
  837. perf_disable();
  838. if (ctx->nr_active) {
  839. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  840. if (counter != counter->group_leader)
  841. counter_sched_out(counter, cpuctx, ctx);
  842. else
  843. group_sched_out(counter, cpuctx, ctx);
  844. }
  845. }
  846. perf_enable();
  847. out:
  848. spin_unlock(&ctx->lock);
  849. }
  850. /*
  851. * Test whether two contexts are equivalent, i.e. whether they
  852. * have both been cloned from the same version of the same context
  853. * and they both have the same number of enabled counters.
  854. * If the number of enabled counters is the same, then the set
  855. * of enabled counters should be the same, because these are both
  856. * inherited contexts, therefore we can't access individual counters
  857. * in them directly with an fd; we can only enable/disable all
  858. * counters via prctl, or enable/disable all counters in a family
  859. * via ioctl, which will have the same effect on both contexts.
  860. */
  861. static int context_equiv(struct perf_counter_context *ctx1,
  862. struct perf_counter_context *ctx2)
  863. {
  864. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  865. && ctx1->parent_gen == ctx2->parent_gen
  866. && !ctx1->pin_count && !ctx2->pin_count;
  867. }
  868. static void __perf_counter_read(void *counter);
  869. static void __perf_counter_sync_stat(struct perf_counter *counter,
  870. struct perf_counter *next_counter)
  871. {
  872. u64 value;
  873. if (!counter->attr.inherit_stat)
  874. return;
  875. /*
  876. * Update the counter value, we cannot use perf_counter_read()
  877. * because we're in the middle of a context switch and have IRQs
  878. * disabled, which upsets smp_call_function_single(), however
  879. * we know the counter must be on the current CPU, therefore we
  880. * don't need to use it.
  881. */
  882. switch (counter->state) {
  883. case PERF_COUNTER_STATE_ACTIVE:
  884. __perf_counter_read(counter);
  885. break;
  886. case PERF_COUNTER_STATE_INACTIVE:
  887. update_counter_times(counter);
  888. break;
  889. default:
  890. break;
  891. }
  892. /*
  893. * In order to keep per-task stats reliable we need to flip the counter
  894. * values when we flip the contexts.
  895. */
  896. value = atomic64_read(&next_counter->count);
  897. value = atomic64_xchg(&counter->count, value);
  898. atomic64_set(&next_counter->count, value);
  899. swap(counter->total_time_enabled, next_counter->total_time_enabled);
  900. swap(counter->total_time_running, next_counter->total_time_running);
  901. /*
  902. * Since we swizzled the values, update the user visible data too.
  903. */
  904. perf_counter_update_userpage(counter);
  905. perf_counter_update_userpage(next_counter);
  906. }
  907. #define list_next_entry(pos, member) \
  908. list_entry(pos->member.next, typeof(*pos), member)
  909. static void perf_counter_sync_stat(struct perf_counter_context *ctx,
  910. struct perf_counter_context *next_ctx)
  911. {
  912. struct perf_counter *counter, *next_counter;
  913. if (!ctx->nr_stat)
  914. return;
  915. counter = list_first_entry(&ctx->event_list,
  916. struct perf_counter, event_entry);
  917. next_counter = list_first_entry(&next_ctx->event_list,
  918. struct perf_counter, event_entry);
  919. while (&counter->event_entry != &ctx->event_list &&
  920. &next_counter->event_entry != &next_ctx->event_list) {
  921. __perf_counter_sync_stat(counter, next_counter);
  922. counter = list_next_entry(counter, event_entry);
  923. next_counter = list_next_entry(counter, event_entry);
  924. }
  925. }
  926. /*
  927. * Called from scheduler to remove the counters of the current task,
  928. * with interrupts disabled.
  929. *
  930. * We stop each counter and update the counter value in counter->count.
  931. *
  932. * This does not protect us against NMI, but disable()
  933. * sets the disabled bit in the control field of counter _before_
  934. * accessing the counter control register. If a NMI hits, then it will
  935. * not restart the counter.
  936. */
  937. void perf_counter_task_sched_out(struct task_struct *task,
  938. struct task_struct *next, int cpu)
  939. {
  940. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  941. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  942. struct perf_counter_context *next_ctx;
  943. struct perf_counter_context *parent;
  944. struct pt_regs *regs;
  945. int do_switch = 1;
  946. regs = task_pt_regs(task);
  947. perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
  948. if (likely(!ctx || !cpuctx->task_ctx))
  949. return;
  950. update_context_time(ctx);
  951. rcu_read_lock();
  952. parent = rcu_dereference(ctx->parent_ctx);
  953. next_ctx = next->perf_counter_ctxp;
  954. if (parent && next_ctx &&
  955. rcu_dereference(next_ctx->parent_ctx) == parent) {
  956. /*
  957. * Looks like the two contexts are clones, so we might be
  958. * able to optimize the context switch. We lock both
  959. * contexts and check that they are clones under the
  960. * lock (including re-checking that neither has been
  961. * uncloned in the meantime). It doesn't matter which
  962. * order we take the locks because no other cpu could
  963. * be trying to lock both of these tasks.
  964. */
  965. spin_lock(&ctx->lock);
  966. spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  967. if (context_equiv(ctx, next_ctx)) {
  968. /*
  969. * XXX do we need a memory barrier of sorts
  970. * wrt to rcu_dereference() of perf_counter_ctxp
  971. */
  972. task->perf_counter_ctxp = next_ctx;
  973. next->perf_counter_ctxp = ctx;
  974. ctx->task = next;
  975. next_ctx->task = task;
  976. do_switch = 0;
  977. perf_counter_sync_stat(ctx, next_ctx);
  978. }
  979. spin_unlock(&next_ctx->lock);
  980. spin_unlock(&ctx->lock);
  981. }
  982. rcu_read_unlock();
  983. if (do_switch) {
  984. __perf_counter_sched_out(ctx, cpuctx);
  985. cpuctx->task_ctx = NULL;
  986. }
  987. }
  988. /*
  989. * Called with IRQs disabled
  990. */
  991. static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
  992. {
  993. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  994. if (!cpuctx->task_ctx)
  995. return;
  996. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  997. return;
  998. __perf_counter_sched_out(ctx, cpuctx);
  999. cpuctx->task_ctx = NULL;
  1000. }
  1001. /*
  1002. * Called with IRQs disabled
  1003. */
  1004. static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
  1005. {
  1006. __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
  1007. }
  1008. static void
  1009. __perf_counter_sched_in(struct perf_counter_context *ctx,
  1010. struct perf_cpu_context *cpuctx, int cpu)
  1011. {
  1012. struct perf_counter *counter;
  1013. int can_add_hw = 1;
  1014. spin_lock(&ctx->lock);
  1015. ctx->is_active = 1;
  1016. if (likely(!ctx->nr_counters))
  1017. goto out;
  1018. ctx->timestamp = perf_clock();
  1019. perf_disable();
  1020. /*
  1021. * First go through the list and put on any pinned groups
  1022. * in order to give them the best chance of going on.
  1023. */
  1024. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1025. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  1026. !counter->attr.pinned)
  1027. continue;
  1028. if (counter->cpu != -1 && counter->cpu != cpu)
  1029. continue;
  1030. if (counter != counter->group_leader)
  1031. counter_sched_in(counter, cpuctx, ctx, cpu);
  1032. else {
  1033. if (group_can_go_on(counter, cpuctx, 1))
  1034. group_sched_in(counter, cpuctx, ctx, cpu);
  1035. }
  1036. /*
  1037. * If this pinned group hasn't been scheduled,
  1038. * put it in error state.
  1039. */
  1040. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  1041. update_group_times(counter);
  1042. counter->state = PERF_COUNTER_STATE_ERROR;
  1043. }
  1044. }
  1045. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1046. /*
  1047. * Ignore counters in OFF or ERROR state, and
  1048. * ignore pinned counters since we did them already.
  1049. */
  1050. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  1051. counter->attr.pinned)
  1052. continue;
  1053. /*
  1054. * Listen to the 'cpu' scheduling filter constraint
  1055. * of counters:
  1056. */
  1057. if (counter->cpu != -1 && counter->cpu != cpu)
  1058. continue;
  1059. if (counter != counter->group_leader) {
  1060. if (counter_sched_in(counter, cpuctx, ctx, cpu))
  1061. can_add_hw = 0;
  1062. } else {
  1063. if (group_can_go_on(counter, cpuctx, can_add_hw)) {
  1064. if (group_sched_in(counter, cpuctx, ctx, cpu))
  1065. can_add_hw = 0;
  1066. }
  1067. }
  1068. }
  1069. perf_enable();
  1070. out:
  1071. spin_unlock(&ctx->lock);
  1072. }
  1073. /*
  1074. * Called from scheduler to add the counters of the current task
  1075. * with interrupts disabled.
  1076. *
  1077. * We restore the counter value and then enable it.
  1078. *
  1079. * This does not protect us against NMI, but enable()
  1080. * sets the enabled bit in the control field of counter _before_
  1081. * accessing the counter control register. If a NMI hits, then it will
  1082. * keep the counter running.
  1083. */
  1084. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  1085. {
  1086. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  1087. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  1088. if (likely(!ctx))
  1089. return;
  1090. if (cpuctx->task_ctx == ctx)
  1091. return;
  1092. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1093. cpuctx->task_ctx = ctx;
  1094. }
  1095. static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
  1096. {
  1097. struct perf_counter_context *ctx = &cpuctx->ctx;
  1098. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1099. }
  1100. #define MAX_INTERRUPTS (~0ULL)
  1101. static void perf_log_throttle(struct perf_counter *counter, int enable);
  1102. static void perf_log_period(struct perf_counter *counter, u64 period);
  1103. static void perf_adjust_period(struct perf_counter *counter, u64 events)
  1104. {
  1105. struct hw_perf_counter *hwc = &counter->hw;
  1106. u64 period, sample_period;
  1107. s64 delta;
  1108. events *= hwc->sample_period;
  1109. period = div64_u64(events, counter->attr.sample_freq);
  1110. delta = (s64)(period - hwc->sample_period);
  1111. delta = (delta + 7) / 8; /* low pass filter */
  1112. sample_period = hwc->sample_period + delta;
  1113. if (!sample_period)
  1114. sample_period = 1;
  1115. perf_log_period(counter, sample_period);
  1116. hwc->sample_period = sample_period;
  1117. }
  1118. static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
  1119. {
  1120. struct perf_counter *counter;
  1121. struct hw_perf_counter *hwc;
  1122. u64 interrupts, freq;
  1123. spin_lock(&ctx->lock);
  1124. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1125. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1126. continue;
  1127. hwc = &counter->hw;
  1128. interrupts = hwc->interrupts;
  1129. hwc->interrupts = 0;
  1130. /*
  1131. * unthrottle counters on the tick
  1132. */
  1133. if (interrupts == MAX_INTERRUPTS) {
  1134. perf_log_throttle(counter, 1);
  1135. counter->pmu->unthrottle(counter);
  1136. interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
  1137. }
  1138. if (!counter->attr.freq || !counter->attr.sample_freq)
  1139. continue;
  1140. /*
  1141. * if the specified freq < HZ then we need to skip ticks
  1142. */
  1143. if (counter->attr.sample_freq < HZ) {
  1144. freq = counter->attr.sample_freq;
  1145. hwc->freq_count += freq;
  1146. hwc->freq_interrupts += interrupts;
  1147. if (hwc->freq_count < HZ)
  1148. continue;
  1149. interrupts = hwc->freq_interrupts;
  1150. hwc->freq_interrupts = 0;
  1151. hwc->freq_count -= HZ;
  1152. } else
  1153. freq = HZ;
  1154. perf_adjust_period(counter, freq * interrupts);
  1155. /*
  1156. * In order to avoid being stalled by an (accidental) huge
  1157. * sample period, force reset the sample period if we didn't
  1158. * get any events in this freq period.
  1159. */
  1160. if (!interrupts) {
  1161. perf_disable();
  1162. counter->pmu->disable(counter);
  1163. atomic64_set(&hwc->period_left, 0);
  1164. counter->pmu->enable(counter);
  1165. perf_enable();
  1166. }
  1167. }
  1168. spin_unlock(&ctx->lock);
  1169. }
  1170. /*
  1171. * Round-robin a context's counters:
  1172. */
  1173. static void rotate_ctx(struct perf_counter_context *ctx)
  1174. {
  1175. struct perf_counter *counter;
  1176. if (!ctx->nr_counters)
  1177. return;
  1178. spin_lock(&ctx->lock);
  1179. /*
  1180. * Rotate the first entry last (works just fine for group counters too):
  1181. */
  1182. perf_disable();
  1183. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1184. list_move_tail(&counter->list_entry, &ctx->counter_list);
  1185. break;
  1186. }
  1187. perf_enable();
  1188. spin_unlock(&ctx->lock);
  1189. }
  1190. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  1191. {
  1192. struct perf_cpu_context *cpuctx;
  1193. struct perf_counter_context *ctx;
  1194. if (!atomic_read(&nr_counters))
  1195. return;
  1196. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1197. ctx = curr->perf_counter_ctxp;
  1198. perf_ctx_adjust_freq(&cpuctx->ctx);
  1199. if (ctx)
  1200. perf_ctx_adjust_freq(ctx);
  1201. perf_counter_cpu_sched_out(cpuctx);
  1202. if (ctx)
  1203. __perf_counter_task_sched_out(ctx);
  1204. rotate_ctx(&cpuctx->ctx);
  1205. if (ctx)
  1206. rotate_ctx(ctx);
  1207. perf_counter_cpu_sched_in(cpuctx, cpu);
  1208. if (ctx)
  1209. perf_counter_task_sched_in(curr, cpu);
  1210. }
  1211. /*
  1212. * Enable all of a task's counters that have been marked enable-on-exec.
  1213. * This expects task == current.
  1214. */
  1215. static void perf_counter_enable_on_exec(struct task_struct *task)
  1216. {
  1217. struct perf_counter_context *ctx;
  1218. struct perf_counter *counter;
  1219. unsigned long flags;
  1220. int enabled = 0;
  1221. local_irq_save(flags);
  1222. ctx = task->perf_counter_ctxp;
  1223. if (!ctx || !ctx->nr_counters)
  1224. goto out;
  1225. __perf_counter_task_sched_out(ctx);
  1226. spin_lock(&ctx->lock);
  1227. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1228. if (!counter->attr.enable_on_exec)
  1229. continue;
  1230. counter->attr.enable_on_exec = 0;
  1231. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  1232. continue;
  1233. counter->state = PERF_COUNTER_STATE_INACTIVE;
  1234. counter->tstamp_enabled =
  1235. ctx->time - counter->total_time_enabled;
  1236. enabled = 1;
  1237. }
  1238. /*
  1239. * Unclone this context if we enabled any counter.
  1240. */
  1241. if (enabled && ctx->parent_ctx) {
  1242. put_ctx(ctx->parent_ctx);
  1243. ctx->parent_ctx = NULL;
  1244. }
  1245. spin_unlock(&ctx->lock);
  1246. perf_counter_task_sched_in(task, smp_processor_id());
  1247. out:
  1248. local_irq_restore(flags);
  1249. }
  1250. /*
  1251. * Cross CPU call to read the hardware counter
  1252. */
  1253. static void __perf_counter_read(void *info)
  1254. {
  1255. struct perf_counter *counter = info;
  1256. struct perf_counter_context *ctx = counter->ctx;
  1257. unsigned long flags;
  1258. local_irq_save(flags);
  1259. if (ctx->is_active)
  1260. update_context_time(ctx);
  1261. counter->pmu->read(counter);
  1262. update_counter_times(counter);
  1263. local_irq_restore(flags);
  1264. }
  1265. static u64 perf_counter_read(struct perf_counter *counter)
  1266. {
  1267. /*
  1268. * If counter is enabled and currently active on a CPU, update the
  1269. * value in the counter structure:
  1270. */
  1271. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  1272. smp_call_function_single(counter->oncpu,
  1273. __perf_counter_read, counter, 1);
  1274. } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  1275. update_counter_times(counter);
  1276. }
  1277. return atomic64_read(&counter->count);
  1278. }
  1279. /*
  1280. * Initialize the perf_counter context in a task_struct:
  1281. */
  1282. static void
  1283. __perf_counter_init_context(struct perf_counter_context *ctx,
  1284. struct task_struct *task)
  1285. {
  1286. memset(ctx, 0, sizeof(*ctx));
  1287. spin_lock_init(&ctx->lock);
  1288. mutex_init(&ctx->mutex);
  1289. INIT_LIST_HEAD(&ctx->counter_list);
  1290. INIT_LIST_HEAD(&ctx->event_list);
  1291. atomic_set(&ctx->refcount, 1);
  1292. ctx->task = task;
  1293. }
  1294. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  1295. {
  1296. struct perf_counter_context *parent_ctx;
  1297. struct perf_counter_context *ctx;
  1298. struct perf_cpu_context *cpuctx;
  1299. struct task_struct *task;
  1300. unsigned long flags;
  1301. int err;
  1302. /*
  1303. * If cpu is not a wildcard then this is a percpu counter:
  1304. */
  1305. if (cpu != -1) {
  1306. /* Must be root to operate on a CPU counter: */
  1307. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  1308. return ERR_PTR(-EACCES);
  1309. if (cpu < 0 || cpu > num_possible_cpus())
  1310. return ERR_PTR(-EINVAL);
  1311. /*
  1312. * We could be clever and allow to attach a counter to an
  1313. * offline CPU and activate it when the CPU comes up, but
  1314. * that's for later.
  1315. */
  1316. if (!cpu_isset(cpu, cpu_online_map))
  1317. return ERR_PTR(-ENODEV);
  1318. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1319. ctx = &cpuctx->ctx;
  1320. get_ctx(ctx);
  1321. return ctx;
  1322. }
  1323. rcu_read_lock();
  1324. if (!pid)
  1325. task = current;
  1326. else
  1327. task = find_task_by_vpid(pid);
  1328. if (task)
  1329. get_task_struct(task);
  1330. rcu_read_unlock();
  1331. if (!task)
  1332. return ERR_PTR(-ESRCH);
  1333. /*
  1334. * Can't attach counters to a dying task.
  1335. */
  1336. err = -ESRCH;
  1337. if (task->flags & PF_EXITING)
  1338. goto errout;
  1339. /* Reuse ptrace permission checks for now. */
  1340. err = -EACCES;
  1341. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1342. goto errout;
  1343. retry:
  1344. ctx = perf_lock_task_context(task, &flags);
  1345. if (ctx) {
  1346. parent_ctx = ctx->parent_ctx;
  1347. if (parent_ctx) {
  1348. put_ctx(parent_ctx);
  1349. ctx->parent_ctx = NULL; /* no longer a clone */
  1350. }
  1351. spin_unlock_irqrestore(&ctx->lock, flags);
  1352. }
  1353. if (!ctx) {
  1354. ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  1355. err = -ENOMEM;
  1356. if (!ctx)
  1357. goto errout;
  1358. __perf_counter_init_context(ctx, task);
  1359. get_ctx(ctx);
  1360. if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
  1361. /*
  1362. * We raced with some other task; use
  1363. * the context they set.
  1364. */
  1365. kfree(ctx);
  1366. goto retry;
  1367. }
  1368. get_task_struct(task);
  1369. }
  1370. put_task_struct(task);
  1371. return ctx;
  1372. errout:
  1373. put_task_struct(task);
  1374. return ERR_PTR(err);
  1375. }
  1376. static void free_counter_rcu(struct rcu_head *head)
  1377. {
  1378. struct perf_counter *counter;
  1379. counter = container_of(head, struct perf_counter, rcu_head);
  1380. if (counter->ns)
  1381. put_pid_ns(counter->ns);
  1382. kfree(counter);
  1383. }
  1384. static void perf_pending_sync(struct perf_counter *counter);
  1385. static void free_counter(struct perf_counter *counter)
  1386. {
  1387. perf_pending_sync(counter);
  1388. if (!counter->parent) {
  1389. atomic_dec(&nr_counters);
  1390. if (counter->attr.mmap)
  1391. atomic_dec(&nr_mmap_counters);
  1392. if (counter->attr.comm)
  1393. atomic_dec(&nr_comm_counters);
  1394. }
  1395. if (counter->destroy)
  1396. counter->destroy(counter);
  1397. put_ctx(counter->ctx);
  1398. call_rcu(&counter->rcu_head, free_counter_rcu);
  1399. }
  1400. /*
  1401. * Called when the last reference to the file is gone.
  1402. */
  1403. static int perf_release(struct inode *inode, struct file *file)
  1404. {
  1405. struct perf_counter *counter = file->private_data;
  1406. struct perf_counter_context *ctx = counter->ctx;
  1407. file->private_data = NULL;
  1408. WARN_ON_ONCE(ctx->parent_ctx);
  1409. mutex_lock(&ctx->mutex);
  1410. perf_counter_remove_from_context(counter);
  1411. mutex_unlock(&ctx->mutex);
  1412. mutex_lock(&counter->owner->perf_counter_mutex);
  1413. list_del_init(&counter->owner_entry);
  1414. mutex_unlock(&counter->owner->perf_counter_mutex);
  1415. put_task_struct(counter->owner);
  1416. free_counter(counter);
  1417. return 0;
  1418. }
  1419. /*
  1420. * Read the performance counter - simple non blocking version for now
  1421. */
  1422. static ssize_t
  1423. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  1424. {
  1425. u64 values[4];
  1426. int n;
  1427. /*
  1428. * Return end-of-file for a read on a counter that is in
  1429. * error state (i.e. because it was pinned but it couldn't be
  1430. * scheduled on to the CPU at some point).
  1431. */
  1432. if (counter->state == PERF_COUNTER_STATE_ERROR)
  1433. return 0;
  1434. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1435. mutex_lock(&counter->child_mutex);
  1436. values[0] = perf_counter_read(counter);
  1437. n = 1;
  1438. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1439. values[n++] = counter->total_time_enabled +
  1440. atomic64_read(&counter->child_total_time_enabled);
  1441. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1442. values[n++] = counter->total_time_running +
  1443. atomic64_read(&counter->child_total_time_running);
  1444. if (counter->attr.read_format & PERF_FORMAT_ID)
  1445. values[n++] = counter->id;
  1446. mutex_unlock(&counter->child_mutex);
  1447. if (count < n * sizeof(u64))
  1448. return -EINVAL;
  1449. count = n * sizeof(u64);
  1450. if (copy_to_user(buf, values, count))
  1451. return -EFAULT;
  1452. return count;
  1453. }
  1454. static ssize_t
  1455. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1456. {
  1457. struct perf_counter *counter = file->private_data;
  1458. return perf_read_hw(counter, buf, count);
  1459. }
  1460. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1461. {
  1462. struct perf_counter *counter = file->private_data;
  1463. struct perf_mmap_data *data;
  1464. unsigned int events = POLL_HUP;
  1465. rcu_read_lock();
  1466. data = rcu_dereference(counter->data);
  1467. if (data)
  1468. events = atomic_xchg(&data->poll, 0);
  1469. rcu_read_unlock();
  1470. poll_wait(file, &counter->waitq, wait);
  1471. return events;
  1472. }
  1473. static void perf_counter_reset(struct perf_counter *counter)
  1474. {
  1475. (void)perf_counter_read(counter);
  1476. atomic64_set(&counter->count, 0);
  1477. perf_counter_update_userpage(counter);
  1478. }
  1479. /*
  1480. * Holding the top-level counter's child_mutex means that any
  1481. * descendant process that has inherited this counter will block
  1482. * in sync_child_counter if it goes to exit, thus satisfying the
  1483. * task existence requirements of perf_counter_enable/disable.
  1484. */
  1485. static void perf_counter_for_each_child(struct perf_counter *counter,
  1486. void (*func)(struct perf_counter *))
  1487. {
  1488. struct perf_counter *child;
  1489. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1490. mutex_lock(&counter->child_mutex);
  1491. func(counter);
  1492. list_for_each_entry(child, &counter->child_list, child_list)
  1493. func(child);
  1494. mutex_unlock(&counter->child_mutex);
  1495. }
  1496. static void perf_counter_for_each(struct perf_counter *counter,
  1497. void (*func)(struct perf_counter *))
  1498. {
  1499. struct perf_counter_context *ctx = counter->ctx;
  1500. struct perf_counter *sibling;
  1501. WARN_ON_ONCE(ctx->parent_ctx);
  1502. mutex_lock(&ctx->mutex);
  1503. counter = counter->group_leader;
  1504. perf_counter_for_each_child(counter, func);
  1505. func(counter);
  1506. list_for_each_entry(sibling, &counter->sibling_list, list_entry)
  1507. perf_counter_for_each_child(counter, func);
  1508. mutex_unlock(&ctx->mutex);
  1509. }
  1510. static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
  1511. {
  1512. struct perf_counter_context *ctx = counter->ctx;
  1513. unsigned long size;
  1514. int ret = 0;
  1515. u64 value;
  1516. if (!counter->attr.sample_period)
  1517. return -EINVAL;
  1518. size = copy_from_user(&value, arg, sizeof(value));
  1519. if (size != sizeof(value))
  1520. return -EFAULT;
  1521. if (!value)
  1522. return -EINVAL;
  1523. spin_lock_irq(&ctx->lock);
  1524. if (counter->attr.freq) {
  1525. if (value > sysctl_perf_counter_sample_rate) {
  1526. ret = -EINVAL;
  1527. goto unlock;
  1528. }
  1529. counter->attr.sample_freq = value;
  1530. } else {
  1531. perf_log_period(counter, value);
  1532. counter->attr.sample_period = value;
  1533. counter->hw.sample_period = value;
  1534. }
  1535. unlock:
  1536. spin_unlock_irq(&ctx->lock);
  1537. return ret;
  1538. }
  1539. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1540. {
  1541. struct perf_counter *counter = file->private_data;
  1542. void (*func)(struct perf_counter *);
  1543. u32 flags = arg;
  1544. switch (cmd) {
  1545. case PERF_COUNTER_IOC_ENABLE:
  1546. func = perf_counter_enable;
  1547. break;
  1548. case PERF_COUNTER_IOC_DISABLE:
  1549. func = perf_counter_disable;
  1550. break;
  1551. case PERF_COUNTER_IOC_RESET:
  1552. func = perf_counter_reset;
  1553. break;
  1554. case PERF_COUNTER_IOC_REFRESH:
  1555. return perf_counter_refresh(counter, arg);
  1556. case PERF_COUNTER_IOC_PERIOD:
  1557. return perf_counter_period(counter, (u64 __user *)arg);
  1558. default:
  1559. return -ENOTTY;
  1560. }
  1561. if (flags & PERF_IOC_FLAG_GROUP)
  1562. perf_counter_for_each(counter, func);
  1563. else
  1564. perf_counter_for_each_child(counter, func);
  1565. return 0;
  1566. }
  1567. int perf_counter_task_enable(void)
  1568. {
  1569. struct perf_counter *counter;
  1570. mutex_lock(&current->perf_counter_mutex);
  1571. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1572. perf_counter_for_each_child(counter, perf_counter_enable);
  1573. mutex_unlock(&current->perf_counter_mutex);
  1574. return 0;
  1575. }
  1576. int perf_counter_task_disable(void)
  1577. {
  1578. struct perf_counter *counter;
  1579. mutex_lock(&current->perf_counter_mutex);
  1580. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1581. perf_counter_for_each_child(counter, perf_counter_disable);
  1582. mutex_unlock(&current->perf_counter_mutex);
  1583. return 0;
  1584. }
  1585. static int perf_counter_index(struct perf_counter *counter)
  1586. {
  1587. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1588. return 0;
  1589. return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
  1590. }
  1591. /*
  1592. * Callers need to ensure there can be no nesting of this function, otherwise
  1593. * the seqlock logic goes bad. We can not serialize this because the arch
  1594. * code calls this from NMI context.
  1595. */
  1596. void perf_counter_update_userpage(struct perf_counter *counter)
  1597. {
  1598. struct perf_counter_mmap_page *userpg;
  1599. struct perf_mmap_data *data;
  1600. rcu_read_lock();
  1601. data = rcu_dereference(counter->data);
  1602. if (!data)
  1603. goto unlock;
  1604. userpg = data->user_page;
  1605. /*
  1606. * Disable preemption so as to not let the corresponding user-space
  1607. * spin too long if we get preempted.
  1608. */
  1609. preempt_disable();
  1610. ++userpg->lock;
  1611. barrier();
  1612. userpg->index = perf_counter_index(counter);
  1613. userpg->offset = atomic64_read(&counter->count);
  1614. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  1615. userpg->offset -= atomic64_read(&counter->hw.prev_count);
  1616. userpg->time_enabled = counter->total_time_enabled +
  1617. atomic64_read(&counter->child_total_time_enabled);
  1618. userpg->time_running = counter->total_time_running +
  1619. atomic64_read(&counter->child_total_time_running);
  1620. barrier();
  1621. ++userpg->lock;
  1622. preempt_enable();
  1623. unlock:
  1624. rcu_read_unlock();
  1625. }
  1626. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1627. {
  1628. struct perf_counter *counter = vma->vm_file->private_data;
  1629. struct perf_mmap_data *data;
  1630. int ret = VM_FAULT_SIGBUS;
  1631. if (vmf->flags & FAULT_FLAG_MKWRITE) {
  1632. if (vmf->pgoff == 0)
  1633. ret = 0;
  1634. return ret;
  1635. }
  1636. rcu_read_lock();
  1637. data = rcu_dereference(counter->data);
  1638. if (!data)
  1639. goto unlock;
  1640. if (vmf->pgoff == 0) {
  1641. vmf->page = virt_to_page(data->user_page);
  1642. } else {
  1643. int nr = vmf->pgoff - 1;
  1644. if ((unsigned)nr > data->nr_pages)
  1645. goto unlock;
  1646. if (vmf->flags & FAULT_FLAG_WRITE)
  1647. goto unlock;
  1648. vmf->page = virt_to_page(data->data_pages[nr]);
  1649. }
  1650. get_page(vmf->page);
  1651. vmf->page->mapping = vma->vm_file->f_mapping;
  1652. vmf->page->index = vmf->pgoff;
  1653. ret = 0;
  1654. unlock:
  1655. rcu_read_unlock();
  1656. return ret;
  1657. }
  1658. static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
  1659. {
  1660. struct perf_mmap_data *data;
  1661. unsigned long size;
  1662. int i;
  1663. WARN_ON(atomic_read(&counter->mmap_count));
  1664. size = sizeof(struct perf_mmap_data);
  1665. size += nr_pages * sizeof(void *);
  1666. data = kzalloc(size, GFP_KERNEL);
  1667. if (!data)
  1668. goto fail;
  1669. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1670. if (!data->user_page)
  1671. goto fail_user_page;
  1672. for (i = 0; i < nr_pages; i++) {
  1673. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1674. if (!data->data_pages[i])
  1675. goto fail_data_pages;
  1676. }
  1677. data->nr_pages = nr_pages;
  1678. atomic_set(&data->lock, -1);
  1679. rcu_assign_pointer(counter->data, data);
  1680. return 0;
  1681. fail_data_pages:
  1682. for (i--; i >= 0; i--)
  1683. free_page((unsigned long)data->data_pages[i]);
  1684. free_page((unsigned long)data->user_page);
  1685. fail_user_page:
  1686. kfree(data);
  1687. fail:
  1688. return -ENOMEM;
  1689. }
  1690. static void perf_mmap_free_page(unsigned long addr)
  1691. {
  1692. struct page *page = virt_to_page(addr);
  1693. page->mapping = NULL;
  1694. __free_page(page);
  1695. }
  1696. static void __perf_mmap_data_free(struct rcu_head *rcu_head)
  1697. {
  1698. struct perf_mmap_data *data;
  1699. int i;
  1700. data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
  1701. perf_mmap_free_page((unsigned long)data->user_page);
  1702. for (i = 0; i < data->nr_pages; i++)
  1703. perf_mmap_free_page((unsigned long)data->data_pages[i]);
  1704. kfree(data);
  1705. }
  1706. static void perf_mmap_data_free(struct perf_counter *counter)
  1707. {
  1708. struct perf_mmap_data *data = counter->data;
  1709. WARN_ON(atomic_read(&counter->mmap_count));
  1710. rcu_assign_pointer(counter->data, NULL);
  1711. call_rcu(&data->rcu_head, __perf_mmap_data_free);
  1712. }
  1713. static void perf_mmap_open(struct vm_area_struct *vma)
  1714. {
  1715. struct perf_counter *counter = vma->vm_file->private_data;
  1716. atomic_inc(&counter->mmap_count);
  1717. }
  1718. static void perf_mmap_close(struct vm_area_struct *vma)
  1719. {
  1720. struct perf_counter *counter = vma->vm_file->private_data;
  1721. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1722. if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
  1723. struct user_struct *user = current_user();
  1724. atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
  1725. vma->vm_mm->locked_vm -= counter->data->nr_locked;
  1726. perf_mmap_data_free(counter);
  1727. mutex_unlock(&counter->mmap_mutex);
  1728. }
  1729. }
  1730. static struct vm_operations_struct perf_mmap_vmops = {
  1731. .open = perf_mmap_open,
  1732. .close = perf_mmap_close,
  1733. .fault = perf_mmap_fault,
  1734. .page_mkwrite = perf_mmap_fault,
  1735. };
  1736. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  1737. {
  1738. struct perf_counter *counter = file->private_data;
  1739. unsigned long user_locked, user_lock_limit;
  1740. struct user_struct *user = current_user();
  1741. unsigned long locked, lock_limit;
  1742. unsigned long vma_size;
  1743. unsigned long nr_pages;
  1744. long user_extra, extra;
  1745. int ret = 0;
  1746. if (!(vma->vm_flags & VM_SHARED))
  1747. return -EINVAL;
  1748. vma_size = vma->vm_end - vma->vm_start;
  1749. nr_pages = (vma_size / PAGE_SIZE) - 1;
  1750. /*
  1751. * If we have data pages ensure they're a power-of-two number, so we
  1752. * can do bitmasks instead of modulo.
  1753. */
  1754. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  1755. return -EINVAL;
  1756. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  1757. return -EINVAL;
  1758. if (vma->vm_pgoff != 0)
  1759. return -EINVAL;
  1760. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1761. mutex_lock(&counter->mmap_mutex);
  1762. if (atomic_inc_not_zero(&counter->mmap_count)) {
  1763. if (nr_pages != counter->data->nr_pages)
  1764. ret = -EINVAL;
  1765. goto unlock;
  1766. }
  1767. user_extra = nr_pages + 1;
  1768. user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
  1769. /*
  1770. * Increase the limit linearly with more CPUs:
  1771. */
  1772. user_lock_limit *= num_online_cpus();
  1773. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  1774. extra = 0;
  1775. if (user_locked > user_lock_limit)
  1776. extra = user_locked - user_lock_limit;
  1777. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  1778. lock_limit >>= PAGE_SHIFT;
  1779. locked = vma->vm_mm->locked_vm + extra;
  1780. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  1781. ret = -EPERM;
  1782. goto unlock;
  1783. }
  1784. WARN_ON(counter->data);
  1785. ret = perf_mmap_data_alloc(counter, nr_pages);
  1786. if (ret)
  1787. goto unlock;
  1788. atomic_set(&counter->mmap_count, 1);
  1789. atomic_long_add(user_extra, &user->locked_vm);
  1790. vma->vm_mm->locked_vm += extra;
  1791. counter->data->nr_locked = extra;
  1792. if (vma->vm_flags & VM_WRITE)
  1793. counter->data->writable = 1;
  1794. unlock:
  1795. mutex_unlock(&counter->mmap_mutex);
  1796. vma->vm_flags |= VM_RESERVED;
  1797. vma->vm_ops = &perf_mmap_vmops;
  1798. return ret;
  1799. }
  1800. static int perf_fasync(int fd, struct file *filp, int on)
  1801. {
  1802. struct inode *inode = filp->f_path.dentry->d_inode;
  1803. struct perf_counter *counter = filp->private_data;
  1804. int retval;
  1805. mutex_lock(&inode->i_mutex);
  1806. retval = fasync_helper(fd, filp, on, &counter->fasync);
  1807. mutex_unlock(&inode->i_mutex);
  1808. if (retval < 0)
  1809. return retval;
  1810. return 0;
  1811. }
  1812. static const struct file_operations perf_fops = {
  1813. .release = perf_release,
  1814. .read = perf_read,
  1815. .poll = perf_poll,
  1816. .unlocked_ioctl = perf_ioctl,
  1817. .compat_ioctl = perf_ioctl,
  1818. .mmap = perf_mmap,
  1819. .fasync = perf_fasync,
  1820. };
  1821. /*
  1822. * Perf counter wakeup
  1823. *
  1824. * If there's data, ensure we set the poll() state and publish everything
  1825. * to user-space before waking everybody up.
  1826. */
  1827. void perf_counter_wakeup(struct perf_counter *counter)
  1828. {
  1829. wake_up_all(&counter->waitq);
  1830. if (counter->pending_kill) {
  1831. kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
  1832. counter->pending_kill = 0;
  1833. }
  1834. }
  1835. /*
  1836. * Pending wakeups
  1837. *
  1838. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  1839. *
  1840. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  1841. * single linked list and use cmpxchg() to add entries lockless.
  1842. */
  1843. static void perf_pending_counter(struct perf_pending_entry *entry)
  1844. {
  1845. struct perf_counter *counter = container_of(entry,
  1846. struct perf_counter, pending);
  1847. if (counter->pending_disable) {
  1848. counter->pending_disable = 0;
  1849. perf_counter_disable(counter);
  1850. }
  1851. if (counter->pending_wakeup) {
  1852. counter->pending_wakeup = 0;
  1853. perf_counter_wakeup(counter);
  1854. }
  1855. }
  1856. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  1857. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  1858. PENDING_TAIL,
  1859. };
  1860. static void perf_pending_queue(struct perf_pending_entry *entry,
  1861. void (*func)(struct perf_pending_entry *))
  1862. {
  1863. struct perf_pending_entry **head;
  1864. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  1865. return;
  1866. entry->func = func;
  1867. head = &get_cpu_var(perf_pending_head);
  1868. do {
  1869. entry->next = *head;
  1870. } while (cmpxchg(head, entry->next, entry) != entry->next);
  1871. set_perf_counter_pending();
  1872. put_cpu_var(perf_pending_head);
  1873. }
  1874. static int __perf_pending_run(void)
  1875. {
  1876. struct perf_pending_entry *list;
  1877. int nr = 0;
  1878. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  1879. while (list != PENDING_TAIL) {
  1880. void (*func)(struct perf_pending_entry *);
  1881. struct perf_pending_entry *entry = list;
  1882. list = list->next;
  1883. func = entry->func;
  1884. entry->next = NULL;
  1885. /*
  1886. * Ensure we observe the unqueue before we issue the wakeup,
  1887. * so that we won't be waiting forever.
  1888. * -- see perf_not_pending().
  1889. */
  1890. smp_wmb();
  1891. func(entry);
  1892. nr++;
  1893. }
  1894. return nr;
  1895. }
  1896. static inline int perf_not_pending(struct perf_counter *counter)
  1897. {
  1898. /*
  1899. * If we flush on whatever cpu we run, there is a chance we don't
  1900. * need to wait.
  1901. */
  1902. get_cpu();
  1903. __perf_pending_run();
  1904. put_cpu();
  1905. /*
  1906. * Ensure we see the proper queue state before going to sleep
  1907. * so that we do not miss the wakeup. -- see perf_pending_handle()
  1908. */
  1909. smp_rmb();
  1910. return counter->pending.next == NULL;
  1911. }
  1912. static void perf_pending_sync(struct perf_counter *counter)
  1913. {
  1914. wait_event(counter->waitq, perf_not_pending(counter));
  1915. }
  1916. void perf_counter_do_pending(void)
  1917. {
  1918. __perf_pending_run();
  1919. }
  1920. /*
  1921. * Callchain support -- arch specific
  1922. */
  1923. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1924. {
  1925. return NULL;
  1926. }
  1927. /*
  1928. * Output
  1929. */
  1930. struct perf_output_handle {
  1931. struct perf_counter *counter;
  1932. struct perf_mmap_data *data;
  1933. unsigned long head;
  1934. unsigned long offset;
  1935. int nmi;
  1936. int sample;
  1937. int locked;
  1938. unsigned long flags;
  1939. };
  1940. static bool perf_output_space(struct perf_mmap_data *data,
  1941. unsigned int offset, unsigned int head)
  1942. {
  1943. unsigned long tail;
  1944. unsigned long mask;
  1945. if (!data->writable)
  1946. return true;
  1947. mask = (data->nr_pages << PAGE_SHIFT) - 1;
  1948. /*
  1949. * Userspace could choose to issue a mb() before updating the tail
  1950. * pointer. So that all reads will be completed before the write is
  1951. * issued.
  1952. */
  1953. tail = ACCESS_ONCE(data->user_page->data_tail);
  1954. smp_rmb();
  1955. offset = (offset - tail) & mask;
  1956. head = (head - tail) & mask;
  1957. if ((int)(head - offset) < 0)
  1958. return false;
  1959. return true;
  1960. }
  1961. static void perf_output_wakeup(struct perf_output_handle *handle)
  1962. {
  1963. atomic_set(&handle->data->poll, POLL_IN);
  1964. if (handle->nmi) {
  1965. handle->counter->pending_wakeup = 1;
  1966. perf_pending_queue(&handle->counter->pending,
  1967. perf_pending_counter);
  1968. } else
  1969. perf_counter_wakeup(handle->counter);
  1970. }
  1971. /*
  1972. * Curious locking construct.
  1973. *
  1974. * We need to ensure a later event doesn't publish a head when a former
  1975. * event isn't done writing. However since we need to deal with NMIs we
  1976. * cannot fully serialize things.
  1977. *
  1978. * What we do is serialize between CPUs so we only have to deal with NMI
  1979. * nesting on a single CPU.
  1980. *
  1981. * We only publish the head (and generate a wakeup) when the outer-most
  1982. * event completes.
  1983. */
  1984. static void perf_output_lock(struct perf_output_handle *handle)
  1985. {
  1986. struct perf_mmap_data *data = handle->data;
  1987. int cpu;
  1988. handle->locked = 0;
  1989. local_irq_save(handle->flags);
  1990. cpu = smp_processor_id();
  1991. if (in_nmi() && atomic_read(&data->lock) == cpu)
  1992. return;
  1993. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1994. cpu_relax();
  1995. handle->locked = 1;
  1996. }
  1997. static void perf_output_unlock(struct perf_output_handle *handle)
  1998. {
  1999. struct perf_mmap_data *data = handle->data;
  2000. unsigned long head;
  2001. int cpu;
  2002. data->done_head = data->head;
  2003. if (!handle->locked)
  2004. goto out;
  2005. again:
  2006. /*
  2007. * The xchg implies a full barrier that ensures all writes are done
  2008. * before we publish the new head, matched by a rmb() in userspace when
  2009. * reading this position.
  2010. */
  2011. while ((head = atomic_long_xchg(&data->done_head, 0)))
  2012. data->user_page->data_head = head;
  2013. /*
  2014. * NMI can happen here, which means we can miss a done_head update.
  2015. */
  2016. cpu = atomic_xchg(&data->lock, -1);
  2017. WARN_ON_ONCE(cpu != smp_processor_id());
  2018. /*
  2019. * Therefore we have to validate we did not indeed do so.
  2020. */
  2021. if (unlikely(atomic_long_read(&data->done_head))) {
  2022. /*
  2023. * Since we had it locked, we can lock it again.
  2024. */
  2025. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  2026. cpu_relax();
  2027. goto again;
  2028. }
  2029. if (atomic_xchg(&data->wakeup, 0))
  2030. perf_output_wakeup(handle);
  2031. out:
  2032. local_irq_restore(handle->flags);
  2033. }
  2034. static void perf_output_copy(struct perf_output_handle *handle,
  2035. const void *buf, unsigned int len)
  2036. {
  2037. unsigned int pages_mask;
  2038. unsigned int offset;
  2039. unsigned int size;
  2040. void **pages;
  2041. offset = handle->offset;
  2042. pages_mask = handle->data->nr_pages - 1;
  2043. pages = handle->data->data_pages;
  2044. do {
  2045. unsigned int page_offset;
  2046. int nr;
  2047. nr = (offset >> PAGE_SHIFT) & pages_mask;
  2048. page_offset = offset & (PAGE_SIZE - 1);
  2049. size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
  2050. memcpy(pages[nr] + page_offset, buf, size);
  2051. len -= size;
  2052. buf += size;
  2053. offset += size;
  2054. } while (len);
  2055. handle->offset = offset;
  2056. /*
  2057. * Check we didn't copy past our reservation window, taking the
  2058. * possible unsigned int wrap into account.
  2059. */
  2060. WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
  2061. }
  2062. #define perf_output_put(handle, x) \
  2063. perf_output_copy((handle), &(x), sizeof(x))
  2064. static int perf_output_begin(struct perf_output_handle *handle,
  2065. struct perf_counter *counter, unsigned int size,
  2066. int nmi, int sample)
  2067. {
  2068. struct perf_mmap_data *data;
  2069. unsigned int offset, head;
  2070. int have_lost;
  2071. struct {
  2072. struct perf_event_header header;
  2073. u64 id;
  2074. u64 lost;
  2075. } lost_event;
  2076. /*
  2077. * For inherited counters we send all the output towards the parent.
  2078. */
  2079. if (counter->parent)
  2080. counter = counter->parent;
  2081. rcu_read_lock();
  2082. data = rcu_dereference(counter->data);
  2083. if (!data)
  2084. goto out;
  2085. handle->data = data;
  2086. handle->counter = counter;
  2087. handle->nmi = nmi;
  2088. handle->sample = sample;
  2089. if (!data->nr_pages)
  2090. goto fail;
  2091. have_lost = atomic_read(&data->lost);
  2092. if (have_lost)
  2093. size += sizeof(lost_event);
  2094. perf_output_lock(handle);
  2095. do {
  2096. offset = head = atomic_long_read(&data->head);
  2097. head += size;
  2098. if (unlikely(!perf_output_space(data, offset, head)))
  2099. goto fail;
  2100. } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
  2101. handle->offset = offset;
  2102. handle->head = head;
  2103. if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
  2104. atomic_set(&data->wakeup, 1);
  2105. if (have_lost) {
  2106. lost_event.header.type = PERF_EVENT_LOST;
  2107. lost_event.header.misc = 0;
  2108. lost_event.header.size = sizeof(lost_event);
  2109. lost_event.id = counter->id;
  2110. lost_event.lost = atomic_xchg(&data->lost, 0);
  2111. perf_output_put(handle, lost_event);
  2112. }
  2113. return 0;
  2114. fail:
  2115. atomic_inc(&data->lost);
  2116. perf_output_unlock(handle);
  2117. out:
  2118. rcu_read_unlock();
  2119. return -ENOSPC;
  2120. }
  2121. static void perf_output_end(struct perf_output_handle *handle)
  2122. {
  2123. struct perf_counter *counter = handle->counter;
  2124. struct perf_mmap_data *data = handle->data;
  2125. int wakeup_events = counter->attr.wakeup_events;
  2126. if (handle->sample && wakeup_events) {
  2127. int events = atomic_inc_return(&data->events);
  2128. if (events >= wakeup_events) {
  2129. atomic_sub(wakeup_events, &data->events);
  2130. atomic_set(&data->wakeup, 1);
  2131. }
  2132. }
  2133. perf_output_unlock(handle);
  2134. rcu_read_unlock();
  2135. }
  2136. static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
  2137. {
  2138. /*
  2139. * only top level counters have the pid namespace they were created in
  2140. */
  2141. if (counter->parent)
  2142. counter = counter->parent;
  2143. return task_tgid_nr_ns(p, counter->ns);
  2144. }
  2145. static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
  2146. {
  2147. /*
  2148. * only top level counters have the pid namespace they were created in
  2149. */
  2150. if (counter->parent)
  2151. counter = counter->parent;
  2152. return task_pid_nr_ns(p, counter->ns);
  2153. }
  2154. static void perf_counter_output(struct perf_counter *counter, int nmi,
  2155. struct perf_sample_data *data)
  2156. {
  2157. int ret;
  2158. u64 sample_type = counter->attr.sample_type;
  2159. struct perf_output_handle handle;
  2160. struct perf_event_header header;
  2161. u64 ip;
  2162. struct {
  2163. u32 pid, tid;
  2164. } tid_entry;
  2165. struct {
  2166. u64 id;
  2167. u64 counter;
  2168. } group_entry;
  2169. struct perf_callchain_entry *callchain = NULL;
  2170. int callchain_size = 0;
  2171. u64 time;
  2172. struct {
  2173. u32 cpu, reserved;
  2174. } cpu_entry;
  2175. header.type = PERF_EVENT_SAMPLE;
  2176. header.size = sizeof(header);
  2177. header.misc = 0;
  2178. header.misc |= perf_misc_flags(data->regs);
  2179. if (sample_type & PERF_SAMPLE_IP) {
  2180. ip = perf_instruction_pointer(data->regs);
  2181. header.size += sizeof(ip);
  2182. }
  2183. if (sample_type & PERF_SAMPLE_TID) {
  2184. /* namespace issues */
  2185. tid_entry.pid = perf_counter_pid(counter, current);
  2186. tid_entry.tid = perf_counter_tid(counter, current);
  2187. header.size += sizeof(tid_entry);
  2188. }
  2189. if (sample_type & PERF_SAMPLE_TIME) {
  2190. /*
  2191. * Maybe do better on x86 and provide cpu_clock_nmi()
  2192. */
  2193. time = sched_clock();
  2194. header.size += sizeof(u64);
  2195. }
  2196. if (sample_type & PERF_SAMPLE_ADDR)
  2197. header.size += sizeof(u64);
  2198. if (sample_type & PERF_SAMPLE_ID)
  2199. header.size += sizeof(u64);
  2200. if (sample_type & PERF_SAMPLE_CPU) {
  2201. header.size += sizeof(cpu_entry);
  2202. cpu_entry.cpu = raw_smp_processor_id();
  2203. }
  2204. if (sample_type & PERF_SAMPLE_PERIOD)
  2205. header.size += sizeof(u64);
  2206. if (sample_type & PERF_SAMPLE_GROUP) {
  2207. header.size += sizeof(u64) +
  2208. counter->nr_siblings * sizeof(group_entry);
  2209. }
  2210. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2211. callchain = perf_callchain(data->regs);
  2212. if (callchain) {
  2213. callchain_size = (1 + callchain->nr) * sizeof(u64);
  2214. header.size += callchain_size;
  2215. } else
  2216. header.size += sizeof(u64);
  2217. }
  2218. ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
  2219. if (ret)
  2220. return;
  2221. perf_output_put(&handle, header);
  2222. if (sample_type & PERF_SAMPLE_IP)
  2223. perf_output_put(&handle, ip);
  2224. if (sample_type & PERF_SAMPLE_TID)
  2225. perf_output_put(&handle, tid_entry);
  2226. if (sample_type & PERF_SAMPLE_TIME)
  2227. perf_output_put(&handle, time);
  2228. if (sample_type & PERF_SAMPLE_ADDR)
  2229. perf_output_put(&handle, data->addr);
  2230. if (sample_type & PERF_SAMPLE_ID)
  2231. perf_output_put(&handle, counter->id);
  2232. if (sample_type & PERF_SAMPLE_CPU)
  2233. perf_output_put(&handle, cpu_entry);
  2234. if (sample_type & PERF_SAMPLE_PERIOD)
  2235. perf_output_put(&handle, data->period);
  2236. /*
  2237. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
  2238. */
  2239. if (sample_type & PERF_SAMPLE_GROUP) {
  2240. struct perf_counter *leader, *sub;
  2241. u64 nr = counter->nr_siblings;
  2242. perf_output_put(&handle, nr);
  2243. leader = counter->group_leader;
  2244. list_for_each_entry(sub, &leader->sibling_list, list_entry) {
  2245. if (sub != counter)
  2246. sub->pmu->read(sub);
  2247. group_entry.id = sub->id;
  2248. group_entry.counter = atomic64_read(&sub->count);
  2249. perf_output_put(&handle, group_entry);
  2250. }
  2251. }
  2252. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2253. if (callchain)
  2254. perf_output_copy(&handle, callchain, callchain_size);
  2255. else {
  2256. u64 nr = 0;
  2257. perf_output_put(&handle, nr);
  2258. }
  2259. }
  2260. perf_output_end(&handle);
  2261. }
  2262. /*
  2263. * read event
  2264. */
  2265. struct perf_read_event {
  2266. struct perf_event_header header;
  2267. u32 pid;
  2268. u32 tid;
  2269. u64 value;
  2270. u64 format[3];
  2271. };
  2272. static void
  2273. perf_counter_read_event(struct perf_counter *counter,
  2274. struct task_struct *task)
  2275. {
  2276. struct perf_output_handle handle;
  2277. struct perf_read_event event = {
  2278. .header = {
  2279. .type = PERF_EVENT_READ,
  2280. .misc = 0,
  2281. .size = sizeof(event) - sizeof(event.format),
  2282. },
  2283. .pid = perf_counter_pid(counter, task),
  2284. .tid = perf_counter_tid(counter, task),
  2285. .value = atomic64_read(&counter->count),
  2286. };
  2287. int ret, i = 0;
  2288. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  2289. event.header.size += sizeof(u64);
  2290. event.format[i++] = counter->total_time_enabled;
  2291. }
  2292. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  2293. event.header.size += sizeof(u64);
  2294. event.format[i++] = counter->total_time_running;
  2295. }
  2296. if (counter->attr.read_format & PERF_FORMAT_ID) {
  2297. u64 id;
  2298. event.header.size += sizeof(u64);
  2299. if (counter->parent)
  2300. id = counter->parent->id;
  2301. else
  2302. id = counter->id;
  2303. event.format[i++] = id;
  2304. }
  2305. ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
  2306. if (ret)
  2307. return;
  2308. perf_output_copy(&handle, &event, event.header.size);
  2309. perf_output_end(&handle);
  2310. }
  2311. /*
  2312. * fork tracking
  2313. */
  2314. struct perf_fork_event {
  2315. struct task_struct *task;
  2316. struct {
  2317. struct perf_event_header header;
  2318. u32 pid;
  2319. u32 ppid;
  2320. } event;
  2321. };
  2322. static void perf_counter_fork_output(struct perf_counter *counter,
  2323. struct perf_fork_event *fork_event)
  2324. {
  2325. struct perf_output_handle handle;
  2326. int size = fork_event->event.header.size;
  2327. struct task_struct *task = fork_event->task;
  2328. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2329. if (ret)
  2330. return;
  2331. fork_event->event.pid = perf_counter_pid(counter, task);
  2332. fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
  2333. perf_output_put(&handle, fork_event->event);
  2334. perf_output_end(&handle);
  2335. }
  2336. static int perf_counter_fork_match(struct perf_counter *counter)
  2337. {
  2338. if (counter->attr.comm || counter->attr.mmap)
  2339. return 1;
  2340. return 0;
  2341. }
  2342. static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
  2343. struct perf_fork_event *fork_event)
  2344. {
  2345. struct perf_counter *counter;
  2346. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2347. return;
  2348. rcu_read_lock();
  2349. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2350. if (perf_counter_fork_match(counter))
  2351. perf_counter_fork_output(counter, fork_event);
  2352. }
  2353. rcu_read_unlock();
  2354. }
  2355. static void perf_counter_fork_event(struct perf_fork_event *fork_event)
  2356. {
  2357. struct perf_cpu_context *cpuctx;
  2358. struct perf_counter_context *ctx;
  2359. cpuctx = &get_cpu_var(perf_cpu_context);
  2360. perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
  2361. put_cpu_var(perf_cpu_context);
  2362. rcu_read_lock();
  2363. /*
  2364. * doesn't really matter which of the child contexts the
  2365. * events ends up in.
  2366. */
  2367. ctx = rcu_dereference(current->perf_counter_ctxp);
  2368. if (ctx)
  2369. perf_counter_fork_ctx(ctx, fork_event);
  2370. rcu_read_unlock();
  2371. }
  2372. void perf_counter_fork(struct task_struct *task)
  2373. {
  2374. struct perf_fork_event fork_event;
  2375. if (!atomic_read(&nr_comm_counters) &&
  2376. !atomic_read(&nr_mmap_counters))
  2377. return;
  2378. fork_event = (struct perf_fork_event){
  2379. .task = task,
  2380. .event = {
  2381. .header = {
  2382. .type = PERF_EVENT_FORK,
  2383. .size = sizeof(fork_event.event),
  2384. },
  2385. },
  2386. };
  2387. perf_counter_fork_event(&fork_event);
  2388. }
  2389. /*
  2390. * comm tracking
  2391. */
  2392. struct perf_comm_event {
  2393. struct task_struct *task;
  2394. char *comm;
  2395. int comm_size;
  2396. struct {
  2397. struct perf_event_header header;
  2398. u32 pid;
  2399. u32 tid;
  2400. } event;
  2401. };
  2402. static void perf_counter_comm_output(struct perf_counter *counter,
  2403. struct perf_comm_event *comm_event)
  2404. {
  2405. struct perf_output_handle handle;
  2406. int size = comm_event->event.header.size;
  2407. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2408. if (ret)
  2409. return;
  2410. comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
  2411. comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
  2412. perf_output_put(&handle, comm_event->event);
  2413. perf_output_copy(&handle, comm_event->comm,
  2414. comm_event->comm_size);
  2415. perf_output_end(&handle);
  2416. }
  2417. static int perf_counter_comm_match(struct perf_counter *counter)
  2418. {
  2419. if (counter->attr.comm)
  2420. return 1;
  2421. return 0;
  2422. }
  2423. static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
  2424. struct perf_comm_event *comm_event)
  2425. {
  2426. struct perf_counter *counter;
  2427. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2428. return;
  2429. rcu_read_lock();
  2430. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2431. if (perf_counter_comm_match(counter))
  2432. perf_counter_comm_output(counter, comm_event);
  2433. }
  2434. rcu_read_unlock();
  2435. }
  2436. static void perf_counter_comm_event(struct perf_comm_event *comm_event)
  2437. {
  2438. struct perf_cpu_context *cpuctx;
  2439. struct perf_counter_context *ctx;
  2440. unsigned int size;
  2441. char *comm = comm_event->task->comm;
  2442. size = ALIGN(strlen(comm)+1, sizeof(u64));
  2443. comm_event->comm = comm;
  2444. comm_event->comm_size = size;
  2445. comm_event->event.header.size = sizeof(comm_event->event) + size;
  2446. cpuctx = &get_cpu_var(perf_cpu_context);
  2447. perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
  2448. put_cpu_var(perf_cpu_context);
  2449. rcu_read_lock();
  2450. /*
  2451. * doesn't really matter which of the child contexts the
  2452. * events ends up in.
  2453. */
  2454. ctx = rcu_dereference(current->perf_counter_ctxp);
  2455. if (ctx)
  2456. perf_counter_comm_ctx(ctx, comm_event);
  2457. rcu_read_unlock();
  2458. }
  2459. void perf_counter_comm(struct task_struct *task)
  2460. {
  2461. struct perf_comm_event comm_event;
  2462. if (task->perf_counter_ctxp)
  2463. perf_counter_enable_on_exec(task);
  2464. if (!atomic_read(&nr_comm_counters))
  2465. return;
  2466. comm_event = (struct perf_comm_event){
  2467. .task = task,
  2468. .event = {
  2469. .header = { .type = PERF_EVENT_COMM, },
  2470. },
  2471. };
  2472. perf_counter_comm_event(&comm_event);
  2473. }
  2474. /*
  2475. * mmap tracking
  2476. */
  2477. struct perf_mmap_event {
  2478. struct vm_area_struct *vma;
  2479. const char *file_name;
  2480. int file_size;
  2481. struct {
  2482. struct perf_event_header header;
  2483. u32 pid;
  2484. u32 tid;
  2485. u64 start;
  2486. u64 len;
  2487. u64 pgoff;
  2488. } event;
  2489. };
  2490. static void perf_counter_mmap_output(struct perf_counter *counter,
  2491. struct perf_mmap_event *mmap_event)
  2492. {
  2493. struct perf_output_handle handle;
  2494. int size = mmap_event->event.header.size;
  2495. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2496. if (ret)
  2497. return;
  2498. mmap_event->event.pid = perf_counter_pid(counter, current);
  2499. mmap_event->event.tid = perf_counter_tid(counter, current);
  2500. perf_output_put(&handle, mmap_event->event);
  2501. perf_output_copy(&handle, mmap_event->file_name,
  2502. mmap_event->file_size);
  2503. perf_output_end(&handle);
  2504. }
  2505. static int perf_counter_mmap_match(struct perf_counter *counter,
  2506. struct perf_mmap_event *mmap_event)
  2507. {
  2508. if (counter->attr.mmap)
  2509. return 1;
  2510. return 0;
  2511. }
  2512. static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
  2513. struct perf_mmap_event *mmap_event)
  2514. {
  2515. struct perf_counter *counter;
  2516. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2517. return;
  2518. rcu_read_lock();
  2519. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2520. if (perf_counter_mmap_match(counter, mmap_event))
  2521. perf_counter_mmap_output(counter, mmap_event);
  2522. }
  2523. rcu_read_unlock();
  2524. }
  2525. static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
  2526. {
  2527. struct perf_cpu_context *cpuctx;
  2528. struct perf_counter_context *ctx;
  2529. struct vm_area_struct *vma = mmap_event->vma;
  2530. struct file *file = vma->vm_file;
  2531. unsigned int size;
  2532. char tmp[16];
  2533. char *buf = NULL;
  2534. const char *name;
  2535. if (file) {
  2536. buf = kzalloc(PATH_MAX, GFP_KERNEL);
  2537. if (!buf) {
  2538. name = strncpy(tmp, "//enomem", sizeof(tmp));
  2539. goto got_name;
  2540. }
  2541. name = d_path(&file->f_path, buf, PATH_MAX);
  2542. if (IS_ERR(name)) {
  2543. name = strncpy(tmp, "//toolong", sizeof(tmp));
  2544. goto got_name;
  2545. }
  2546. } else {
  2547. name = arch_vma_name(mmap_event->vma);
  2548. if (name)
  2549. goto got_name;
  2550. if (!vma->vm_mm) {
  2551. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  2552. goto got_name;
  2553. }
  2554. name = strncpy(tmp, "//anon", sizeof(tmp));
  2555. goto got_name;
  2556. }
  2557. got_name:
  2558. size = ALIGN(strlen(name)+1, sizeof(u64));
  2559. mmap_event->file_name = name;
  2560. mmap_event->file_size = size;
  2561. mmap_event->event.header.size = sizeof(mmap_event->event) + size;
  2562. cpuctx = &get_cpu_var(perf_cpu_context);
  2563. perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
  2564. put_cpu_var(perf_cpu_context);
  2565. rcu_read_lock();
  2566. /*
  2567. * doesn't really matter which of the child contexts the
  2568. * events ends up in.
  2569. */
  2570. ctx = rcu_dereference(current->perf_counter_ctxp);
  2571. if (ctx)
  2572. perf_counter_mmap_ctx(ctx, mmap_event);
  2573. rcu_read_unlock();
  2574. kfree(buf);
  2575. }
  2576. void __perf_counter_mmap(struct vm_area_struct *vma)
  2577. {
  2578. struct perf_mmap_event mmap_event;
  2579. if (!atomic_read(&nr_mmap_counters))
  2580. return;
  2581. mmap_event = (struct perf_mmap_event){
  2582. .vma = vma,
  2583. .event = {
  2584. .header = { .type = PERF_EVENT_MMAP, },
  2585. .start = vma->vm_start,
  2586. .len = vma->vm_end - vma->vm_start,
  2587. .pgoff = vma->vm_pgoff,
  2588. },
  2589. };
  2590. perf_counter_mmap_event(&mmap_event);
  2591. }
  2592. /*
  2593. * Log sample_period changes so that analyzing tools can re-normalize the
  2594. * event flow.
  2595. */
  2596. struct freq_event {
  2597. struct perf_event_header header;
  2598. u64 time;
  2599. u64 id;
  2600. u64 period;
  2601. };
  2602. static void perf_log_period(struct perf_counter *counter, u64 period)
  2603. {
  2604. struct perf_output_handle handle;
  2605. struct freq_event event;
  2606. int ret;
  2607. if (counter->hw.sample_period == period)
  2608. return;
  2609. if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
  2610. return;
  2611. event = (struct freq_event) {
  2612. .header = {
  2613. .type = PERF_EVENT_PERIOD,
  2614. .misc = 0,
  2615. .size = sizeof(event),
  2616. },
  2617. .time = sched_clock(),
  2618. .id = counter->id,
  2619. .period = period,
  2620. };
  2621. ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
  2622. if (ret)
  2623. return;
  2624. perf_output_put(&handle, event);
  2625. perf_output_end(&handle);
  2626. }
  2627. /*
  2628. * IRQ throttle logging
  2629. */
  2630. static void perf_log_throttle(struct perf_counter *counter, int enable)
  2631. {
  2632. struct perf_output_handle handle;
  2633. int ret;
  2634. struct {
  2635. struct perf_event_header header;
  2636. u64 time;
  2637. u64 id;
  2638. } throttle_event = {
  2639. .header = {
  2640. .type = PERF_EVENT_THROTTLE + 1,
  2641. .misc = 0,
  2642. .size = sizeof(throttle_event),
  2643. },
  2644. .time = sched_clock(),
  2645. .id = counter->id,
  2646. };
  2647. ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
  2648. if (ret)
  2649. return;
  2650. perf_output_put(&handle, throttle_event);
  2651. perf_output_end(&handle);
  2652. }
  2653. /*
  2654. * Generic counter overflow handling, sampling.
  2655. */
  2656. int perf_counter_overflow(struct perf_counter *counter, int nmi,
  2657. struct perf_sample_data *data)
  2658. {
  2659. int events = atomic_read(&counter->event_limit);
  2660. int throttle = counter->pmu->unthrottle != NULL;
  2661. struct hw_perf_counter *hwc = &counter->hw;
  2662. int ret = 0;
  2663. if (!throttle) {
  2664. hwc->interrupts++;
  2665. } else {
  2666. if (hwc->interrupts != MAX_INTERRUPTS) {
  2667. hwc->interrupts++;
  2668. if (HZ * hwc->interrupts >
  2669. (u64)sysctl_perf_counter_sample_rate) {
  2670. hwc->interrupts = MAX_INTERRUPTS;
  2671. perf_log_throttle(counter, 0);
  2672. ret = 1;
  2673. }
  2674. } else {
  2675. /*
  2676. * Keep re-disabling counters even though on the previous
  2677. * pass we disabled it - just in case we raced with a
  2678. * sched-in and the counter got enabled again:
  2679. */
  2680. ret = 1;
  2681. }
  2682. }
  2683. if (counter->attr.freq) {
  2684. u64 now = sched_clock();
  2685. s64 delta = now - hwc->freq_stamp;
  2686. hwc->freq_stamp = now;
  2687. if (delta > 0 && delta < TICK_NSEC)
  2688. perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
  2689. }
  2690. /*
  2691. * XXX event_limit might not quite work as expected on inherited
  2692. * counters
  2693. */
  2694. counter->pending_kill = POLL_IN;
  2695. if (events && atomic_dec_and_test(&counter->event_limit)) {
  2696. ret = 1;
  2697. counter->pending_kill = POLL_HUP;
  2698. if (nmi) {
  2699. counter->pending_disable = 1;
  2700. perf_pending_queue(&counter->pending,
  2701. perf_pending_counter);
  2702. } else
  2703. perf_counter_disable(counter);
  2704. }
  2705. perf_counter_output(counter, nmi, data);
  2706. return ret;
  2707. }
  2708. /*
  2709. * Generic software counter infrastructure
  2710. */
  2711. static void perf_swcounter_update(struct perf_counter *counter)
  2712. {
  2713. struct hw_perf_counter *hwc = &counter->hw;
  2714. u64 prev, now;
  2715. s64 delta;
  2716. again:
  2717. prev = atomic64_read(&hwc->prev_count);
  2718. now = atomic64_read(&hwc->count);
  2719. if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
  2720. goto again;
  2721. delta = now - prev;
  2722. atomic64_add(delta, &counter->count);
  2723. atomic64_sub(delta, &hwc->period_left);
  2724. }
  2725. static void perf_swcounter_set_period(struct perf_counter *counter)
  2726. {
  2727. struct hw_perf_counter *hwc = &counter->hw;
  2728. s64 left = atomic64_read(&hwc->period_left);
  2729. s64 period = hwc->sample_period;
  2730. if (unlikely(left <= -period)) {
  2731. left = period;
  2732. atomic64_set(&hwc->period_left, left);
  2733. hwc->last_period = period;
  2734. }
  2735. if (unlikely(left <= 0)) {
  2736. left += period;
  2737. atomic64_add(period, &hwc->period_left);
  2738. hwc->last_period = period;
  2739. }
  2740. atomic64_set(&hwc->prev_count, -left);
  2741. atomic64_set(&hwc->count, -left);
  2742. }
  2743. static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
  2744. {
  2745. enum hrtimer_restart ret = HRTIMER_RESTART;
  2746. struct perf_sample_data data;
  2747. struct perf_counter *counter;
  2748. u64 period;
  2749. counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
  2750. counter->pmu->read(counter);
  2751. data.addr = 0;
  2752. data.regs = get_irq_regs();
  2753. /*
  2754. * In case we exclude kernel IPs or are somehow not in interrupt
  2755. * context, provide the next best thing, the user IP.
  2756. */
  2757. if ((counter->attr.exclude_kernel || !data.regs) &&
  2758. !counter->attr.exclude_user)
  2759. data.regs = task_pt_regs(current);
  2760. if (data.regs) {
  2761. if (perf_counter_overflow(counter, 0, &data))
  2762. ret = HRTIMER_NORESTART;
  2763. }
  2764. period = max_t(u64, 10000, counter->hw.sample_period);
  2765. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  2766. return ret;
  2767. }
  2768. static void perf_swcounter_overflow(struct perf_counter *counter,
  2769. int nmi, struct perf_sample_data *data)
  2770. {
  2771. data->period = counter->hw.last_period;
  2772. perf_swcounter_update(counter);
  2773. perf_swcounter_set_period(counter);
  2774. if (perf_counter_overflow(counter, nmi, data))
  2775. /* soft-disable the counter */
  2776. ;
  2777. }
  2778. static int perf_swcounter_is_counting(struct perf_counter *counter)
  2779. {
  2780. struct perf_counter_context *ctx;
  2781. unsigned long flags;
  2782. int count;
  2783. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  2784. return 1;
  2785. if (counter->state != PERF_COUNTER_STATE_INACTIVE)
  2786. return 0;
  2787. /*
  2788. * If the counter is inactive, it could be just because
  2789. * its task is scheduled out, or because it's in a group
  2790. * which could not go on the PMU. We want to count in
  2791. * the first case but not the second. If the context is
  2792. * currently active then an inactive software counter must
  2793. * be the second case. If it's not currently active then
  2794. * we need to know whether the counter was active when the
  2795. * context was last active, which we can determine by
  2796. * comparing counter->tstamp_stopped with ctx->time.
  2797. *
  2798. * We are within an RCU read-side critical section,
  2799. * which protects the existence of *ctx.
  2800. */
  2801. ctx = counter->ctx;
  2802. spin_lock_irqsave(&ctx->lock, flags);
  2803. count = 1;
  2804. /* Re-check state now we have the lock */
  2805. if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
  2806. counter->ctx->is_active ||
  2807. counter->tstamp_stopped < ctx->time)
  2808. count = 0;
  2809. spin_unlock_irqrestore(&ctx->lock, flags);
  2810. return count;
  2811. }
  2812. static int perf_swcounter_match(struct perf_counter *counter,
  2813. enum perf_type_id type,
  2814. u32 event, struct pt_regs *regs)
  2815. {
  2816. if (!perf_swcounter_is_counting(counter))
  2817. return 0;
  2818. if (counter->attr.type != type)
  2819. return 0;
  2820. if (counter->attr.config != event)
  2821. return 0;
  2822. if (regs) {
  2823. if (counter->attr.exclude_user && user_mode(regs))
  2824. return 0;
  2825. if (counter->attr.exclude_kernel && !user_mode(regs))
  2826. return 0;
  2827. }
  2828. return 1;
  2829. }
  2830. static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
  2831. int nmi, struct perf_sample_data *data)
  2832. {
  2833. int neg = atomic64_add_negative(nr, &counter->hw.count);
  2834. if (counter->hw.sample_period && !neg && data->regs)
  2835. perf_swcounter_overflow(counter, nmi, data);
  2836. }
  2837. static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
  2838. enum perf_type_id type,
  2839. u32 event, u64 nr, int nmi,
  2840. struct perf_sample_data *data)
  2841. {
  2842. struct perf_counter *counter;
  2843. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2844. return;
  2845. rcu_read_lock();
  2846. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2847. if (perf_swcounter_match(counter, type, event, data->regs))
  2848. perf_swcounter_add(counter, nr, nmi, data);
  2849. }
  2850. rcu_read_unlock();
  2851. }
  2852. static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
  2853. {
  2854. if (in_nmi())
  2855. return &cpuctx->recursion[3];
  2856. if (in_irq())
  2857. return &cpuctx->recursion[2];
  2858. if (in_softirq())
  2859. return &cpuctx->recursion[1];
  2860. return &cpuctx->recursion[0];
  2861. }
  2862. static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
  2863. u64 nr, int nmi,
  2864. struct perf_sample_data *data)
  2865. {
  2866. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  2867. int *recursion = perf_swcounter_recursion_context(cpuctx);
  2868. struct perf_counter_context *ctx;
  2869. if (*recursion)
  2870. goto out;
  2871. (*recursion)++;
  2872. barrier();
  2873. perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
  2874. nr, nmi, data);
  2875. rcu_read_lock();
  2876. /*
  2877. * doesn't really matter which of the child contexts the
  2878. * events ends up in.
  2879. */
  2880. ctx = rcu_dereference(current->perf_counter_ctxp);
  2881. if (ctx)
  2882. perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
  2883. rcu_read_unlock();
  2884. barrier();
  2885. (*recursion)--;
  2886. out:
  2887. put_cpu_var(perf_cpu_context);
  2888. }
  2889. void __perf_swcounter_event(u32 event, u64 nr, int nmi,
  2890. struct pt_regs *regs, u64 addr)
  2891. {
  2892. struct perf_sample_data data = {
  2893. .regs = regs,
  2894. .addr = addr,
  2895. };
  2896. do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
  2897. }
  2898. static void perf_swcounter_read(struct perf_counter *counter)
  2899. {
  2900. perf_swcounter_update(counter);
  2901. }
  2902. static int perf_swcounter_enable(struct perf_counter *counter)
  2903. {
  2904. perf_swcounter_set_period(counter);
  2905. return 0;
  2906. }
  2907. static void perf_swcounter_disable(struct perf_counter *counter)
  2908. {
  2909. perf_swcounter_update(counter);
  2910. }
  2911. static const struct pmu perf_ops_generic = {
  2912. .enable = perf_swcounter_enable,
  2913. .disable = perf_swcounter_disable,
  2914. .read = perf_swcounter_read,
  2915. };
  2916. /*
  2917. * Software counter: cpu wall time clock
  2918. */
  2919. static void cpu_clock_perf_counter_update(struct perf_counter *counter)
  2920. {
  2921. int cpu = raw_smp_processor_id();
  2922. s64 prev;
  2923. u64 now;
  2924. now = cpu_clock(cpu);
  2925. prev = atomic64_read(&counter->hw.prev_count);
  2926. atomic64_set(&counter->hw.prev_count, now);
  2927. atomic64_add(now - prev, &counter->count);
  2928. }
  2929. static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
  2930. {
  2931. struct hw_perf_counter *hwc = &counter->hw;
  2932. int cpu = raw_smp_processor_id();
  2933. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  2934. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2935. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2936. if (hwc->sample_period) {
  2937. u64 period = max_t(u64, 10000, hwc->sample_period);
  2938. __hrtimer_start_range_ns(&hwc->hrtimer,
  2939. ns_to_ktime(period), 0,
  2940. HRTIMER_MODE_REL, 0);
  2941. }
  2942. return 0;
  2943. }
  2944. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  2945. {
  2946. if (counter->hw.sample_period)
  2947. hrtimer_cancel(&counter->hw.hrtimer);
  2948. cpu_clock_perf_counter_update(counter);
  2949. }
  2950. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  2951. {
  2952. cpu_clock_perf_counter_update(counter);
  2953. }
  2954. static const struct pmu perf_ops_cpu_clock = {
  2955. .enable = cpu_clock_perf_counter_enable,
  2956. .disable = cpu_clock_perf_counter_disable,
  2957. .read = cpu_clock_perf_counter_read,
  2958. };
  2959. /*
  2960. * Software counter: task time clock
  2961. */
  2962. static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
  2963. {
  2964. u64 prev;
  2965. s64 delta;
  2966. prev = atomic64_xchg(&counter->hw.prev_count, now);
  2967. delta = now - prev;
  2968. atomic64_add(delta, &counter->count);
  2969. }
  2970. static int task_clock_perf_counter_enable(struct perf_counter *counter)
  2971. {
  2972. struct hw_perf_counter *hwc = &counter->hw;
  2973. u64 now;
  2974. now = counter->ctx->time;
  2975. atomic64_set(&hwc->prev_count, now);
  2976. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2977. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2978. if (hwc->sample_period) {
  2979. u64 period = max_t(u64, 10000, hwc->sample_period);
  2980. __hrtimer_start_range_ns(&hwc->hrtimer,
  2981. ns_to_ktime(period), 0,
  2982. HRTIMER_MODE_REL, 0);
  2983. }
  2984. return 0;
  2985. }
  2986. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  2987. {
  2988. if (counter->hw.sample_period)
  2989. hrtimer_cancel(&counter->hw.hrtimer);
  2990. task_clock_perf_counter_update(counter, counter->ctx->time);
  2991. }
  2992. static void task_clock_perf_counter_read(struct perf_counter *counter)
  2993. {
  2994. u64 time;
  2995. if (!in_nmi()) {
  2996. update_context_time(counter->ctx);
  2997. time = counter->ctx->time;
  2998. } else {
  2999. u64 now = perf_clock();
  3000. u64 delta = now - counter->ctx->timestamp;
  3001. time = counter->ctx->time + delta;
  3002. }
  3003. task_clock_perf_counter_update(counter, time);
  3004. }
  3005. static const struct pmu perf_ops_task_clock = {
  3006. .enable = task_clock_perf_counter_enable,
  3007. .disable = task_clock_perf_counter_disable,
  3008. .read = task_clock_perf_counter_read,
  3009. };
  3010. #ifdef CONFIG_EVENT_PROFILE
  3011. void perf_tpcounter_event(int event_id)
  3012. {
  3013. struct perf_sample_data data = {
  3014. .regs = get_irq_regs();
  3015. .addr = 0,
  3016. };
  3017. if (!data.regs)
  3018. data.regs = task_pt_regs(current);
  3019. do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
  3020. }
  3021. EXPORT_SYMBOL_GPL(perf_tpcounter_event);
  3022. extern int ftrace_profile_enable(int);
  3023. extern void ftrace_profile_disable(int);
  3024. static void tp_perf_counter_destroy(struct perf_counter *counter)
  3025. {
  3026. ftrace_profile_disable(perf_event_id(&counter->attr));
  3027. }
  3028. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  3029. {
  3030. int event_id = perf_event_id(&counter->attr);
  3031. int ret;
  3032. ret = ftrace_profile_enable(event_id);
  3033. if (ret)
  3034. return NULL;
  3035. counter->destroy = tp_perf_counter_destroy;
  3036. return &perf_ops_generic;
  3037. }
  3038. #else
  3039. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  3040. {
  3041. return NULL;
  3042. }
  3043. #endif
  3044. atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
  3045. static void sw_perf_counter_destroy(struct perf_counter *counter)
  3046. {
  3047. u64 event = counter->attr.config;
  3048. WARN_ON(counter->parent);
  3049. atomic_dec(&perf_swcounter_enabled[event]);
  3050. }
  3051. static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  3052. {
  3053. const struct pmu *pmu = NULL;
  3054. u64 event = counter->attr.config;
  3055. /*
  3056. * Software counters (currently) can't in general distinguish
  3057. * between user, kernel and hypervisor events.
  3058. * However, context switches and cpu migrations are considered
  3059. * to be kernel events, and page faults are never hypervisor
  3060. * events.
  3061. */
  3062. switch (event) {
  3063. case PERF_COUNT_SW_CPU_CLOCK:
  3064. pmu = &perf_ops_cpu_clock;
  3065. break;
  3066. case PERF_COUNT_SW_TASK_CLOCK:
  3067. /*
  3068. * If the user instantiates this as a per-cpu counter,
  3069. * use the cpu_clock counter instead.
  3070. */
  3071. if (counter->ctx->task)
  3072. pmu = &perf_ops_task_clock;
  3073. else
  3074. pmu = &perf_ops_cpu_clock;
  3075. break;
  3076. case PERF_COUNT_SW_PAGE_FAULTS:
  3077. case PERF_COUNT_SW_PAGE_FAULTS_MIN:
  3078. case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
  3079. case PERF_COUNT_SW_CONTEXT_SWITCHES:
  3080. case PERF_COUNT_SW_CPU_MIGRATIONS:
  3081. if (!counter->parent) {
  3082. atomic_inc(&perf_swcounter_enabled[event]);
  3083. counter->destroy = sw_perf_counter_destroy;
  3084. }
  3085. pmu = &perf_ops_generic;
  3086. break;
  3087. }
  3088. return pmu;
  3089. }
  3090. /*
  3091. * Allocate and initialize a counter structure
  3092. */
  3093. static struct perf_counter *
  3094. perf_counter_alloc(struct perf_counter_attr *attr,
  3095. int cpu,
  3096. struct perf_counter_context *ctx,
  3097. struct perf_counter *group_leader,
  3098. struct perf_counter *parent_counter,
  3099. gfp_t gfpflags)
  3100. {
  3101. const struct pmu *pmu;
  3102. struct perf_counter *counter;
  3103. struct hw_perf_counter *hwc;
  3104. long err;
  3105. counter = kzalloc(sizeof(*counter), gfpflags);
  3106. if (!counter)
  3107. return ERR_PTR(-ENOMEM);
  3108. /*
  3109. * Single counters are their own group leaders, with an
  3110. * empty sibling list:
  3111. */
  3112. if (!group_leader)
  3113. group_leader = counter;
  3114. mutex_init(&counter->child_mutex);
  3115. INIT_LIST_HEAD(&counter->child_list);
  3116. INIT_LIST_HEAD(&counter->list_entry);
  3117. INIT_LIST_HEAD(&counter->event_entry);
  3118. INIT_LIST_HEAD(&counter->sibling_list);
  3119. init_waitqueue_head(&counter->waitq);
  3120. mutex_init(&counter->mmap_mutex);
  3121. counter->cpu = cpu;
  3122. counter->attr = *attr;
  3123. counter->group_leader = group_leader;
  3124. counter->pmu = NULL;
  3125. counter->ctx = ctx;
  3126. counter->oncpu = -1;
  3127. counter->parent = parent_counter;
  3128. counter->ns = get_pid_ns(current->nsproxy->pid_ns);
  3129. counter->id = atomic64_inc_return(&perf_counter_id);
  3130. counter->state = PERF_COUNTER_STATE_INACTIVE;
  3131. if (attr->disabled)
  3132. counter->state = PERF_COUNTER_STATE_OFF;
  3133. pmu = NULL;
  3134. hwc = &counter->hw;
  3135. hwc->sample_period = attr->sample_period;
  3136. if (attr->freq && attr->sample_freq)
  3137. hwc->sample_period = 1;
  3138. atomic64_set(&hwc->period_left, hwc->sample_period);
  3139. /*
  3140. * we currently do not support PERF_SAMPLE_GROUP on inherited counters
  3141. */
  3142. if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
  3143. goto done;
  3144. switch (attr->type) {
  3145. case PERF_TYPE_RAW:
  3146. case PERF_TYPE_HARDWARE:
  3147. case PERF_TYPE_HW_CACHE:
  3148. pmu = hw_perf_counter_init(counter);
  3149. break;
  3150. case PERF_TYPE_SOFTWARE:
  3151. pmu = sw_perf_counter_init(counter);
  3152. break;
  3153. case PERF_TYPE_TRACEPOINT:
  3154. pmu = tp_perf_counter_init(counter);
  3155. break;
  3156. default:
  3157. break;
  3158. }
  3159. done:
  3160. err = 0;
  3161. if (!pmu)
  3162. err = -EINVAL;
  3163. else if (IS_ERR(pmu))
  3164. err = PTR_ERR(pmu);
  3165. if (err) {
  3166. if (counter->ns)
  3167. put_pid_ns(counter->ns);
  3168. kfree(counter);
  3169. return ERR_PTR(err);
  3170. }
  3171. counter->pmu = pmu;
  3172. if (!counter->parent) {
  3173. atomic_inc(&nr_counters);
  3174. if (counter->attr.mmap)
  3175. atomic_inc(&nr_mmap_counters);
  3176. if (counter->attr.comm)
  3177. atomic_inc(&nr_comm_counters);
  3178. }
  3179. return counter;
  3180. }
  3181. static int perf_copy_attr(struct perf_counter_attr __user *uattr,
  3182. struct perf_counter_attr *attr)
  3183. {
  3184. int ret;
  3185. u32 size;
  3186. if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  3187. return -EFAULT;
  3188. /*
  3189. * zero the full structure, so that a short copy will be nice.
  3190. */
  3191. memset(attr, 0, sizeof(*attr));
  3192. ret = get_user(size, &uattr->size);
  3193. if (ret)
  3194. return ret;
  3195. if (size > PAGE_SIZE) /* silly large */
  3196. goto err_size;
  3197. if (!size) /* abi compat */
  3198. size = PERF_ATTR_SIZE_VER0;
  3199. if (size < PERF_ATTR_SIZE_VER0)
  3200. goto err_size;
  3201. /*
  3202. * If we're handed a bigger struct than we know of,
  3203. * ensure all the unknown bits are 0.
  3204. */
  3205. if (size > sizeof(*attr)) {
  3206. unsigned long val;
  3207. unsigned long __user *addr;
  3208. unsigned long __user *end;
  3209. addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
  3210. sizeof(unsigned long));
  3211. end = PTR_ALIGN((void __user *)uattr + size,
  3212. sizeof(unsigned long));
  3213. for (; addr < end; addr += sizeof(unsigned long)) {
  3214. ret = get_user(val, addr);
  3215. if (ret)
  3216. return ret;
  3217. if (val)
  3218. goto err_size;
  3219. }
  3220. }
  3221. ret = copy_from_user(attr, uattr, size);
  3222. if (ret)
  3223. return -EFAULT;
  3224. /*
  3225. * If the type exists, the corresponding creation will verify
  3226. * the attr->config.
  3227. */
  3228. if (attr->type >= PERF_TYPE_MAX)
  3229. return -EINVAL;
  3230. if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
  3231. return -EINVAL;
  3232. if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  3233. return -EINVAL;
  3234. if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  3235. return -EINVAL;
  3236. out:
  3237. return ret;
  3238. err_size:
  3239. put_user(sizeof(*attr), &uattr->size);
  3240. ret = -E2BIG;
  3241. goto out;
  3242. }
  3243. /**
  3244. * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  3245. *
  3246. * @attr_uptr: event type attributes for monitoring/sampling
  3247. * @pid: target pid
  3248. * @cpu: target cpu
  3249. * @group_fd: group leader counter fd
  3250. */
  3251. SYSCALL_DEFINE5(perf_counter_open,
  3252. struct perf_counter_attr __user *, attr_uptr,
  3253. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  3254. {
  3255. struct perf_counter *counter, *group_leader;
  3256. struct perf_counter_attr attr;
  3257. struct perf_counter_context *ctx;
  3258. struct file *counter_file = NULL;
  3259. struct file *group_file = NULL;
  3260. int fput_needed = 0;
  3261. int fput_needed2 = 0;
  3262. int ret;
  3263. /* for future expandability... */
  3264. if (flags)
  3265. return -EINVAL;
  3266. ret = perf_copy_attr(attr_uptr, &attr);
  3267. if (ret)
  3268. return ret;
  3269. if (!attr.exclude_kernel) {
  3270. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  3271. return -EACCES;
  3272. }
  3273. if (attr.freq) {
  3274. if (attr.sample_freq > sysctl_perf_counter_sample_rate)
  3275. return -EINVAL;
  3276. }
  3277. /*
  3278. * Get the target context (task or percpu):
  3279. */
  3280. ctx = find_get_context(pid, cpu);
  3281. if (IS_ERR(ctx))
  3282. return PTR_ERR(ctx);
  3283. /*
  3284. * Look up the group leader (we will attach this counter to it):
  3285. */
  3286. group_leader = NULL;
  3287. if (group_fd != -1) {
  3288. ret = -EINVAL;
  3289. group_file = fget_light(group_fd, &fput_needed);
  3290. if (!group_file)
  3291. goto err_put_context;
  3292. if (group_file->f_op != &perf_fops)
  3293. goto err_put_context;
  3294. group_leader = group_file->private_data;
  3295. /*
  3296. * Do not allow a recursive hierarchy (this new sibling
  3297. * becoming part of another group-sibling):
  3298. */
  3299. if (group_leader->group_leader != group_leader)
  3300. goto err_put_context;
  3301. /*
  3302. * Do not allow to attach to a group in a different
  3303. * task or CPU context:
  3304. */
  3305. if (group_leader->ctx != ctx)
  3306. goto err_put_context;
  3307. /*
  3308. * Only a group leader can be exclusive or pinned
  3309. */
  3310. if (attr.exclusive || attr.pinned)
  3311. goto err_put_context;
  3312. }
  3313. counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
  3314. NULL, GFP_KERNEL);
  3315. ret = PTR_ERR(counter);
  3316. if (IS_ERR(counter))
  3317. goto err_put_context;
  3318. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  3319. if (ret < 0)
  3320. goto err_free_put_context;
  3321. counter_file = fget_light(ret, &fput_needed2);
  3322. if (!counter_file)
  3323. goto err_free_put_context;
  3324. counter->filp = counter_file;
  3325. WARN_ON_ONCE(ctx->parent_ctx);
  3326. mutex_lock(&ctx->mutex);
  3327. perf_install_in_context(ctx, counter, cpu);
  3328. ++ctx->generation;
  3329. mutex_unlock(&ctx->mutex);
  3330. counter->owner = current;
  3331. get_task_struct(current);
  3332. mutex_lock(&current->perf_counter_mutex);
  3333. list_add_tail(&counter->owner_entry, &current->perf_counter_list);
  3334. mutex_unlock(&current->perf_counter_mutex);
  3335. fput_light(counter_file, fput_needed2);
  3336. out_fput:
  3337. fput_light(group_file, fput_needed);
  3338. return ret;
  3339. err_free_put_context:
  3340. kfree(counter);
  3341. err_put_context:
  3342. put_ctx(ctx);
  3343. goto out_fput;
  3344. }
  3345. /*
  3346. * inherit a counter from parent task to child task:
  3347. */
  3348. static struct perf_counter *
  3349. inherit_counter(struct perf_counter *parent_counter,
  3350. struct task_struct *parent,
  3351. struct perf_counter_context *parent_ctx,
  3352. struct task_struct *child,
  3353. struct perf_counter *group_leader,
  3354. struct perf_counter_context *child_ctx)
  3355. {
  3356. struct perf_counter *child_counter;
  3357. /*
  3358. * Instead of creating recursive hierarchies of counters,
  3359. * we link inherited counters back to the original parent,
  3360. * which has a filp for sure, which we use as the reference
  3361. * count:
  3362. */
  3363. if (parent_counter->parent)
  3364. parent_counter = parent_counter->parent;
  3365. child_counter = perf_counter_alloc(&parent_counter->attr,
  3366. parent_counter->cpu, child_ctx,
  3367. group_leader, parent_counter,
  3368. GFP_KERNEL);
  3369. if (IS_ERR(child_counter))
  3370. return child_counter;
  3371. get_ctx(child_ctx);
  3372. /*
  3373. * Make the child state follow the state of the parent counter,
  3374. * not its attr.disabled bit. We hold the parent's mutex,
  3375. * so we won't race with perf_counter_{en, dis}able_family.
  3376. */
  3377. if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
  3378. child_counter->state = PERF_COUNTER_STATE_INACTIVE;
  3379. else
  3380. child_counter->state = PERF_COUNTER_STATE_OFF;
  3381. if (parent_counter->attr.freq)
  3382. child_counter->hw.sample_period = parent_counter->hw.sample_period;
  3383. /*
  3384. * Link it up in the child's context:
  3385. */
  3386. add_counter_to_ctx(child_counter, child_ctx);
  3387. /*
  3388. * Get a reference to the parent filp - we will fput it
  3389. * when the child counter exits. This is safe to do because
  3390. * we are in the parent and we know that the filp still
  3391. * exists and has a nonzero count:
  3392. */
  3393. atomic_long_inc(&parent_counter->filp->f_count);
  3394. /*
  3395. * Link this into the parent counter's child list
  3396. */
  3397. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3398. mutex_lock(&parent_counter->child_mutex);
  3399. list_add_tail(&child_counter->child_list, &parent_counter->child_list);
  3400. mutex_unlock(&parent_counter->child_mutex);
  3401. return child_counter;
  3402. }
  3403. static int inherit_group(struct perf_counter *parent_counter,
  3404. struct task_struct *parent,
  3405. struct perf_counter_context *parent_ctx,
  3406. struct task_struct *child,
  3407. struct perf_counter_context *child_ctx)
  3408. {
  3409. struct perf_counter *leader;
  3410. struct perf_counter *sub;
  3411. struct perf_counter *child_ctr;
  3412. leader = inherit_counter(parent_counter, parent, parent_ctx,
  3413. child, NULL, child_ctx);
  3414. if (IS_ERR(leader))
  3415. return PTR_ERR(leader);
  3416. list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
  3417. child_ctr = inherit_counter(sub, parent, parent_ctx,
  3418. child, leader, child_ctx);
  3419. if (IS_ERR(child_ctr))
  3420. return PTR_ERR(child_ctr);
  3421. }
  3422. return 0;
  3423. }
  3424. static void sync_child_counter(struct perf_counter *child_counter,
  3425. struct task_struct *child)
  3426. {
  3427. struct perf_counter *parent_counter = child_counter->parent;
  3428. u64 child_val;
  3429. if (child_counter->attr.inherit_stat)
  3430. perf_counter_read_event(child_counter, child);
  3431. child_val = atomic64_read(&child_counter->count);
  3432. /*
  3433. * Add back the child's count to the parent's count:
  3434. */
  3435. atomic64_add(child_val, &parent_counter->count);
  3436. atomic64_add(child_counter->total_time_enabled,
  3437. &parent_counter->child_total_time_enabled);
  3438. atomic64_add(child_counter->total_time_running,
  3439. &parent_counter->child_total_time_running);
  3440. /*
  3441. * Remove this counter from the parent's list
  3442. */
  3443. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3444. mutex_lock(&parent_counter->child_mutex);
  3445. list_del_init(&child_counter->child_list);
  3446. mutex_unlock(&parent_counter->child_mutex);
  3447. /*
  3448. * Release the parent counter, if this was the last
  3449. * reference to it.
  3450. */
  3451. fput(parent_counter->filp);
  3452. }
  3453. static void
  3454. __perf_counter_exit_task(struct perf_counter *child_counter,
  3455. struct perf_counter_context *child_ctx,
  3456. struct task_struct *child)
  3457. {
  3458. struct perf_counter *parent_counter;
  3459. update_counter_times(child_counter);
  3460. perf_counter_remove_from_context(child_counter);
  3461. parent_counter = child_counter->parent;
  3462. /*
  3463. * It can happen that parent exits first, and has counters
  3464. * that are still around due to the child reference. These
  3465. * counters need to be zapped - but otherwise linger.
  3466. */
  3467. if (parent_counter) {
  3468. sync_child_counter(child_counter, child);
  3469. free_counter(child_counter);
  3470. }
  3471. }
  3472. /*
  3473. * When a child task exits, feed back counter values to parent counters.
  3474. */
  3475. void perf_counter_exit_task(struct task_struct *child)
  3476. {
  3477. struct perf_counter *child_counter, *tmp;
  3478. struct perf_counter_context *child_ctx;
  3479. unsigned long flags;
  3480. if (likely(!child->perf_counter_ctxp))
  3481. return;
  3482. local_irq_save(flags);
  3483. /*
  3484. * We can't reschedule here because interrupts are disabled,
  3485. * and either child is current or it is a task that can't be
  3486. * scheduled, so we are now safe from rescheduling changing
  3487. * our context.
  3488. */
  3489. child_ctx = child->perf_counter_ctxp;
  3490. __perf_counter_task_sched_out(child_ctx);
  3491. /*
  3492. * Take the context lock here so that if find_get_context is
  3493. * reading child->perf_counter_ctxp, we wait until it has
  3494. * incremented the context's refcount before we do put_ctx below.
  3495. */
  3496. spin_lock(&child_ctx->lock);
  3497. child->perf_counter_ctxp = NULL;
  3498. if (child_ctx->parent_ctx) {
  3499. /*
  3500. * This context is a clone; unclone it so it can't get
  3501. * swapped to another process while we're removing all
  3502. * the counters from it.
  3503. */
  3504. put_ctx(child_ctx->parent_ctx);
  3505. child_ctx->parent_ctx = NULL;
  3506. }
  3507. spin_unlock(&child_ctx->lock);
  3508. local_irq_restore(flags);
  3509. /*
  3510. * We can recurse on the same lock type through:
  3511. *
  3512. * __perf_counter_exit_task()
  3513. * sync_child_counter()
  3514. * fput(parent_counter->filp)
  3515. * perf_release()
  3516. * mutex_lock(&ctx->mutex)
  3517. *
  3518. * But since its the parent context it won't be the same instance.
  3519. */
  3520. mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
  3521. again:
  3522. list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
  3523. list_entry)
  3524. __perf_counter_exit_task(child_counter, child_ctx, child);
  3525. /*
  3526. * If the last counter was a group counter, it will have appended all
  3527. * its siblings to the list, but we obtained 'tmp' before that which
  3528. * will still point to the list head terminating the iteration.
  3529. */
  3530. if (!list_empty(&child_ctx->counter_list))
  3531. goto again;
  3532. mutex_unlock(&child_ctx->mutex);
  3533. put_ctx(child_ctx);
  3534. }
  3535. /*
  3536. * free an unexposed, unused context as created by inheritance by
  3537. * init_task below, used by fork() in case of fail.
  3538. */
  3539. void perf_counter_free_task(struct task_struct *task)
  3540. {
  3541. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  3542. struct perf_counter *counter, *tmp;
  3543. if (!ctx)
  3544. return;
  3545. mutex_lock(&ctx->mutex);
  3546. again:
  3547. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
  3548. struct perf_counter *parent = counter->parent;
  3549. if (WARN_ON_ONCE(!parent))
  3550. continue;
  3551. mutex_lock(&parent->child_mutex);
  3552. list_del_init(&counter->child_list);
  3553. mutex_unlock(&parent->child_mutex);
  3554. fput(parent->filp);
  3555. list_del_counter(counter, ctx);
  3556. free_counter(counter);
  3557. }
  3558. if (!list_empty(&ctx->counter_list))
  3559. goto again;
  3560. mutex_unlock(&ctx->mutex);
  3561. put_ctx(ctx);
  3562. }
  3563. /*
  3564. * Initialize the perf_counter context in task_struct
  3565. */
  3566. int perf_counter_init_task(struct task_struct *child)
  3567. {
  3568. struct perf_counter_context *child_ctx, *parent_ctx;
  3569. struct perf_counter_context *cloned_ctx;
  3570. struct perf_counter *counter;
  3571. struct task_struct *parent = current;
  3572. int inherited_all = 1;
  3573. int ret = 0;
  3574. child->perf_counter_ctxp = NULL;
  3575. mutex_init(&child->perf_counter_mutex);
  3576. INIT_LIST_HEAD(&child->perf_counter_list);
  3577. if (likely(!parent->perf_counter_ctxp))
  3578. return 0;
  3579. /*
  3580. * This is executed from the parent task context, so inherit
  3581. * counters that have been marked for cloning.
  3582. * First allocate and initialize a context for the child.
  3583. */
  3584. child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  3585. if (!child_ctx)
  3586. return -ENOMEM;
  3587. __perf_counter_init_context(child_ctx, child);
  3588. child->perf_counter_ctxp = child_ctx;
  3589. get_task_struct(child);
  3590. /*
  3591. * If the parent's context is a clone, pin it so it won't get
  3592. * swapped under us.
  3593. */
  3594. parent_ctx = perf_pin_task_context(parent);
  3595. /*
  3596. * No need to check if parent_ctx != NULL here; since we saw
  3597. * it non-NULL earlier, the only reason for it to become NULL
  3598. * is if we exit, and since we're currently in the middle of
  3599. * a fork we can't be exiting at the same time.
  3600. */
  3601. /*
  3602. * Lock the parent list. No need to lock the child - not PID
  3603. * hashed yet and not running, so nobody can access it.
  3604. */
  3605. mutex_lock(&parent_ctx->mutex);
  3606. /*
  3607. * We dont have to disable NMIs - we are only looking at
  3608. * the list, not manipulating it:
  3609. */
  3610. list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
  3611. if (counter != counter->group_leader)
  3612. continue;
  3613. if (!counter->attr.inherit) {
  3614. inherited_all = 0;
  3615. continue;
  3616. }
  3617. ret = inherit_group(counter, parent, parent_ctx,
  3618. child, child_ctx);
  3619. if (ret) {
  3620. inherited_all = 0;
  3621. break;
  3622. }
  3623. }
  3624. if (inherited_all) {
  3625. /*
  3626. * Mark the child context as a clone of the parent
  3627. * context, or of whatever the parent is a clone of.
  3628. * Note that if the parent is a clone, it could get
  3629. * uncloned at any point, but that doesn't matter
  3630. * because the list of counters and the generation
  3631. * count can't have changed since we took the mutex.
  3632. */
  3633. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  3634. if (cloned_ctx) {
  3635. child_ctx->parent_ctx = cloned_ctx;
  3636. child_ctx->parent_gen = parent_ctx->parent_gen;
  3637. } else {
  3638. child_ctx->parent_ctx = parent_ctx;
  3639. child_ctx->parent_gen = parent_ctx->generation;
  3640. }
  3641. get_ctx(child_ctx->parent_ctx);
  3642. }
  3643. mutex_unlock(&parent_ctx->mutex);
  3644. perf_unpin_context(parent_ctx);
  3645. return ret;
  3646. }
  3647. static void __cpuinit perf_counter_init_cpu(int cpu)
  3648. {
  3649. struct perf_cpu_context *cpuctx;
  3650. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3651. __perf_counter_init_context(&cpuctx->ctx, NULL);
  3652. spin_lock(&perf_resource_lock);
  3653. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  3654. spin_unlock(&perf_resource_lock);
  3655. hw_perf_counter_setup(cpu);
  3656. }
  3657. #ifdef CONFIG_HOTPLUG_CPU
  3658. static void __perf_counter_exit_cpu(void *info)
  3659. {
  3660. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  3661. struct perf_counter_context *ctx = &cpuctx->ctx;
  3662. struct perf_counter *counter, *tmp;
  3663. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  3664. __perf_counter_remove_from_context(counter);
  3665. }
  3666. static void perf_counter_exit_cpu(int cpu)
  3667. {
  3668. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  3669. struct perf_counter_context *ctx = &cpuctx->ctx;
  3670. mutex_lock(&ctx->mutex);
  3671. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  3672. mutex_unlock(&ctx->mutex);
  3673. }
  3674. #else
  3675. static inline void perf_counter_exit_cpu(int cpu) { }
  3676. #endif
  3677. static int __cpuinit
  3678. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  3679. {
  3680. unsigned int cpu = (long)hcpu;
  3681. switch (action) {
  3682. case CPU_UP_PREPARE:
  3683. case CPU_UP_PREPARE_FROZEN:
  3684. perf_counter_init_cpu(cpu);
  3685. break;
  3686. case CPU_DOWN_PREPARE:
  3687. case CPU_DOWN_PREPARE_FROZEN:
  3688. perf_counter_exit_cpu(cpu);
  3689. break;
  3690. default:
  3691. break;
  3692. }
  3693. return NOTIFY_OK;
  3694. }
  3695. /*
  3696. * This has to have a higher priority than migration_notifier in sched.c.
  3697. */
  3698. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  3699. .notifier_call = perf_cpu_notify,
  3700. .priority = 20,
  3701. };
  3702. void __init perf_counter_init(void)
  3703. {
  3704. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  3705. (void *)(long)smp_processor_id());
  3706. register_cpu_notifier(&perf_cpu_nb);
  3707. }
  3708. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  3709. {
  3710. return sprintf(buf, "%d\n", perf_reserved_percpu);
  3711. }
  3712. static ssize_t
  3713. perf_set_reserve_percpu(struct sysdev_class *class,
  3714. const char *buf,
  3715. size_t count)
  3716. {
  3717. struct perf_cpu_context *cpuctx;
  3718. unsigned long val;
  3719. int err, cpu, mpt;
  3720. err = strict_strtoul(buf, 10, &val);
  3721. if (err)
  3722. return err;
  3723. if (val > perf_max_counters)
  3724. return -EINVAL;
  3725. spin_lock(&perf_resource_lock);
  3726. perf_reserved_percpu = val;
  3727. for_each_online_cpu(cpu) {
  3728. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3729. spin_lock_irq(&cpuctx->ctx.lock);
  3730. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  3731. perf_max_counters - perf_reserved_percpu);
  3732. cpuctx->max_pertask = mpt;
  3733. spin_unlock_irq(&cpuctx->ctx.lock);
  3734. }
  3735. spin_unlock(&perf_resource_lock);
  3736. return count;
  3737. }
  3738. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  3739. {
  3740. return sprintf(buf, "%d\n", perf_overcommit);
  3741. }
  3742. static ssize_t
  3743. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  3744. {
  3745. unsigned long val;
  3746. int err;
  3747. err = strict_strtoul(buf, 10, &val);
  3748. if (err)
  3749. return err;
  3750. if (val > 1)
  3751. return -EINVAL;
  3752. spin_lock(&perf_resource_lock);
  3753. perf_overcommit = val;
  3754. spin_unlock(&perf_resource_lock);
  3755. return count;
  3756. }
  3757. static SYSDEV_CLASS_ATTR(
  3758. reserve_percpu,
  3759. 0644,
  3760. perf_show_reserve_percpu,
  3761. perf_set_reserve_percpu
  3762. );
  3763. static SYSDEV_CLASS_ATTR(
  3764. overcommit,
  3765. 0644,
  3766. perf_show_overcommit,
  3767. perf_set_overcommit
  3768. );
  3769. static struct attribute *perfclass_attrs[] = {
  3770. &attr_reserve_percpu.attr,
  3771. &attr_overcommit.attr,
  3772. NULL
  3773. };
  3774. static struct attribute_group perfclass_attr_group = {
  3775. .attrs = perfclass_attrs,
  3776. .name = "perf_counters",
  3777. };
  3778. static int __init perf_counter_sysfs_init(void)
  3779. {
  3780. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  3781. &perfclass_attr_group);
  3782. }
  3783. device_initcall(perf_counter_sysfs_init);