perf_counter.c 103 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/dcache.h>
  19. #include <linux/percpu.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/rculist.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/anon_inodes.h>
  27. #include <linux/kernel_stat.h>
  28. #include <linux/perf_counter.h>
  29. #include <asm/irq_regs.h>
  30. /*
  31. * Each CPU has a list of per CPU counters:
  32. */
  33. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  34. int perf_max_counters __read_mostly = 1;
  35. static int perf_reserved_percpu __read_mostly;
  36. static int perf_overcommit __read_mostly = 1;
  37. static atomic_t nr_counters __read_mostly;
  38. static atomic_t nr_mmap_counters __read_mostly;
  39. static atomic_t nr_comm_counters __read_mostly;
  40. /*
  41. * perf counter paranoia level:
  42. * 0 - not paranoid
  43. * 1 - disallow cpu counters to unpriv
  44. * 2 - disallow kernel profiling to unpriv
  45. */
  46. int sysctl_perf_counter_paranoid __read_mostly;
  47. static inline bool perf_paranoid_cpu(void)
  48. {
  49. return sysctl_perf_counter_paranoid > 0;
  50. }
  51. static inline bool perf_paranoid_kernel(void)
  52. {
  53. return sysctl_perf_counter_paranoid > 1;
  54. }
  55. int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
  56. /*
  57. * max perf counter sample rate
  58. */
  59. int sysctl_perf_counter_sample_rate __read_mostly = 100000;
  60. static atomic64_t perf_counter_id;
  61. /*
  62. * Lock for (sysadmin-configurable) counter reservations:
  63. */
  64. static DEFINE_SPINLOCK(perf_resource_lock);
  65. /*
  66. * Architecture provided APIs - weak aliases:
  67. */
  68. extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  69. {
  70. return NULL;
  71. }
  72. void __weak hw_perf_disable(void) { barrier(); }
  73. void __weak hw_perf_enable(void) { barrier(); }
  74. void __weak hw_perf_counter_setup(int cpu) { barrier(); }
  75. int __weak
  76. hw_perf_group_sched_in(struct perf_counter *group_leader,
  77. struct perf_cpu_context *cpuctx,
  78. struct perf_counter_context *ctx, int cpu)
  79. {
  80. return 0;
  81. }
  82. void __weak perf_counter_print_debug(void) { }
  83. static DEFINE_PER_CPU(int, disable_count);
  84. void __perf_disable(void)
  85. {
  86. __get_cpu_var(disable_count)++;
  87. }
  88. bool __perf_enable(void)
  89. {
  90. return !--__get_cpu_var(disable_count);
  91. }
  92. void perf_disable(void)
  93. {
  94. __perf_disable();
  95. hw_perf_disable();
  96. }
  97. void perf_enable(void)
  98. {
  99. if (__perf_enable())
  100. hw_perf_enable();
  101. }
  102. static void get_ctx(struct perf_counter_context *ctx)
  103. {
  104. WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
  105. }
  106. static void free_ctx(struct rcu_head *head)
  107. {
  108. struct perf_counter_context *ctx;
  109. ctx = container_of(head, struct perf_counter_context, rcu_head);
  110. kfree(ctx);
  111. }
  112. static void put_ctx(struct perf_counter_context *ctx)
  113. {
  114. if (atomic_dec_and_test(&ctx->refcount)) {
  115. if (ctx->parent_ctx)
  116. put_ctx(ctx->parent_ctx);
  117. if (ctx->task)
  118. put_task_struct(ctx->task);
  119. call_rcu(&ctx->rcu_head, free_ctx);
  120. }
  121. }
  122. /*
  123. * Get the perf_counter_context for a task and lock it.
  124. * This has to cope with with the fact that until it is locked,
  125. * the context could get moved to another task.
  126. */
  127. static struct perf_counter_context *
  128. perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  129. {
  130. struct perf_counter_context *ctx;
  131. rcu_read_lock();
  132. retry:
  133. ctx = rcu_dereference(task->perf_counter_ctxp);
  134. if (ctx) {
  135. /*
  136. * If this context is a clone of another, it might
  137. * get swapped for another underneath us by
  138. * perf_counter_task_sched_out, though the
  139. * rcu_read_lock() protects us from any context
  140. * getting freed. Lock the context and check if it
  141. * got swapped before we could get the lock, and retry
  142. * if so. If we locked the right context, then it
  143. * can't get swapped on us any more.
  144. */
  145. spin_lock_irqsave(&ctx->lock, *flags);
  146. if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
  147. spin_unlock_irqrestore(&ctx->lock, *flags);
  148. goto retry;
  149. }
  150. if (!atomic_inc_not_zero(&ctx->refcount)) {
  151. spin_unlock_irqrestore(&ctx->lock, *flags);
  152. ctx = NULL;
  153. }
  154. }
  155. rcu_read_unlock();
  156. return ctx;
  157. }
  158. /*
  159. * Get the context for a task and increment its pin_count so it
  160. * can't get swapped to another task. This also increments its
  161. * reference count so that the context can't get freed.
  162. */
  163. static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
  164. {
  165. struct perf_counter_context *ctx;
  166. unsigned long flags;
  167. ctx = perf_lock_task_context(task, &flags);
  168. if (ctx) {
  169. ++ctx->pin_count;
  170. spin_unlock_irqrestore(&ctx->lock, flags);
  171. }
  172. return ctx;
  173. }
  174. static void perf_unpin_context(struct perf_counter_context *ctx)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&ctx->lock, flags);
  178. --ctx->pin_count;
  179. spin_unlock_irqrestore(&ctx->lock, flags);
  180. put_ctx(ctx);
  181. }
  182. /*
  183. * Add a counter from the lists for its context.
  184. * Must be called with ctx->mutex and ctx->lock held.
  185. */
  186. static void
  187. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  188. {
  189. struct perf_counter *group_leader = counter->group_leader;
  190. /*
  191. * Depending on whether it is a standalone or sibling counter,
  192. * add it straight to the context's counter list, or to the group
  193. * leader's sibling list:
  194. */
  195. if (group_leader == counter)
  196. list_add_tail(&counter->list_entry, &ctx->counter_list);
  197. else {
  198. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  199. group_leader->nr_siblings++;
  200. }
  201. list_add_rcu(&counter->event_entry, &ctx->event_list);
  202. ctx->nr_counters++;
  203. }
  204. /*
  205. * Remove a counter from the lists for its context.
  206. * Must be called with ctx->mutex and ctx->lock held.
  207. */
  208. static void
  209. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  210. {
  211. struct perf_counter *sibling, *tmp;
  212. if (list_empty(&counter->list_entry))
  213. return;
  214. ctx->nr_counters--;
  215. list_del_init(&counter->list_entry);
  216. list_del_rcu(&counter->event_entry);
  217. if (counter->group_leader != counter)
  218. counter->group_leader->nr_siblings--;
  219. /*
  220. * If this was a group counter with sibling counters then
  221. * upgrade the siblings to singleton counters by adding them
  222. * to the context list directly:
  223. */
  224. list_for_each_entry_safe(sibling, tmp,
  225. &counter->sibling_list, list_entry) {
  226. list_move_tail(&sibling->list_entry, &ctx->counter_list);
  227. sibling->group_leader = sibling;
  228. }
  229. }
  230. static void
  231. counter_sched_out(struct perf_counter *counter,
  232. struct perf_cpu_context *cpuctx,
  233. struct perf_counter_context *ctx)
  234. {
  235. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  236. return;
  237. counter->state = PERF_COUNTER_STATE_INACTIVE;
  238. counter->tstamp_stopped = ctx->time;
  239. counter->pmu->disable(counter);
  240. counter->oncpu = -1;
  241. if (!is_software_counter(counter))
  242. cpuctx->active_oncpu--;
  243. ctx->nr_active--;
  244. if (counter->attr.exclusive || !cpuctx->active_oncpu)
  245. cpuctx->exclusive = 0;
  246. }
  247. static void
  248. group_sched_out(struct perf_counter *group_counter,
  249. struct perf_cpu_context *cpuctx,
  250. struct perf_counter_context *ctx)
  251. {
  252. struct perf_counter *counter;
  253. if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
  254. return;
  255. counter_sched_out(group_counter, cpuctx, ctx);
  256. /*
  257. * Schedule out siblings (if any):
  258. */
  259. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  260. counter_sched_out(counter, cpuctx, ctx);
  261. if (group_counter->attr.exclusive)
  262. cpuctx->exclusive = 0;
  263. }
  264. /*
  265. * Cross CPU call to remove a performance counter
  266. *
  267. * We disable the counter on the hardware level first. After that we
  268. * remove it from the context list.
  269. */
  270. static void __perf_counter_remove_from_context(void *info)
  271. {
  272. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  273. struct perf_counter *counter = info;
  274. struct perf_counter_context *ctx = counter->ctx;
  275. /*
  276. * If this is a task context, we need to check whether it is
  277. * the current task context of this cpu. If not it has been
  278. * scheduled out before the smp call arrived.
  279. */
  280. if (ctx->task && cpuctx->task_ctx != ctx)
  281. return;
  282. spin_lock(&ctx->lock);
  283. /*
  284. * Protect the list operation against NMI by disabling the
  285. * counters on a global level.
  286. */
  287. perf_disable();
  288. counter_sched_out(counter, cpuctx, ctx);
  289. list_del_counter(counter, ctx);
  290. if (!ctx->task) {
  291. /*
  292. * Allow more per task counters with respect to the
  293. * reservation:
  294. */
  295. cpuctx->max_pertask =
  296. min(perf_max_counters - ctx->nr_counters,
  297. perf_max_counters - perf_reserved_percpu);
  298. }
  299. perf_enable();
  300. spin_unlock(&ctx->lock);
  301. }
  302. /*
  303. * Remove the counter from a task's (or a CPU's) list of counters.
  304. *
  305. * Must be called with ctx->mutex held.
  306. *
  307. * CPU counters are removed with a smp call. For task counters we only
  308. * call when the task is on a CPU.
  309. *
  310. * If counter->ctx is a cloned context, callers must make sure that
  311. * every task struct that counter->ctx->task could possibly point to
  312. * remains valid. This is OK when called from perf_release since
  313. * that only calls us on the top-level context, which can't be a clone.
  314. * When called from perf_counter_exit_task, it's OK because the
  315. * context has been detached from its task.
  316. */
  317. static void perf_counter_remove_from_context(struct perf_counter *counter)
  318. {
  319. struct perf_counter_context *ctx = counter->ctx;
  320. struct task_struct *task = ctx->task;
  321. if (!task) {
  322. /*
  323. * Per cpu counters are removed via an smp call and
  324. * the removal is always sucessful.
  325. */
  326. smp_call_function_single(counter->cpu,
  327. __perf_counter_remove_from_context,
  328. counter, 1);
  329. return;
  330. }
  331. retry:
  332. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  333. counter);
  334. spin_lock_irq(&ctx->lock);
  335. /*
  336. * If the context is active we need to retry the smp call.
  337. */
  338. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  339. spin_unlock_irq(&ctx->lock);
  340. goto retry;
  341. }
  342. /*
  343. * The lock prevents that this context is scheduled in so we
  344. * can remove the counter safely, if the call above did not
  345. * succeed.
  346. */
  347. if (!list_empty(&counter->list_entry)) {
  348. list_del_counter(counter, ctx);
  349. }
  350. spin_unlock_irq(&ctx->lock);
  351. }
  352. static inline u64 perf_clock(void)
  353. {
  354. return cpu_clock(smp_processor_id());
  355. }
  356. /*
  357. * Update the record of the current time in a context.
  358. */
  359. static void update_context_time(struct perf_counter_context *ctx)
  360. {
  361. u64 now = perf_clock();
  362. ctx->time += now - ctx->timestamp;
  363. ctx->timestamp = now;
  364. }
  365. /*
  366. * Update the total_time_enabled and total_time_running fields for a counter.
  367. */
  368. static void update_counter_times(struct perf_counter *counter)
  369. {
  370. struct perf_counter_context *ctx = counter->ctx;
  371. u64 run_end;
  372. if (counter->state < PERF_COUNTER_STATE_INACTIVE)
  373. return;
  374. counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
  375. if (counter->state == PERF_COUNTER_STATE_INACTIVE)
  376. run_end = counter->tstamp_stopped;
  377. else
  378. run_end = ctx->time;
  379. counter->total_time_running = run_end - counter->tstamp_running;
  380. }
  381. /*
  382. * Update total_time_enabled and total_time_running for all counters in a group.
  383. */
  384. static void update_group_times(struct perf_counter *leader)
  385. {
  386. struct perf_counter *counter;
  387. update_counter_times(leader);
  388. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  389. update_counter_times(counter);
  390. }
  391. /*
  392. * Cross CPU call to disable a performance counter
  393. */
  394. static void __perf_counter_disable(void *info)
  395. {
  396. struct perf_counter *counter = info;
  397. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  398. struct perf_counter_context *ctx = counter->ctx;
  399. /*
  400. * If this is a per-task counter, need to check whether this
  401. * counter's task is the current task on this cpu.
  402. */
  403. if (ctx->task && cpuctx->task_ctx != ctx)
  404. return;
  405. spin_lock(&ctx->lock);
  406. /*
  407. * If the counter is on, turn it off.
  408. * If it is in error state, leave it in error state.
  409. */
  410. if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
  411. update_context_time(ctx);
  412. update_counter_times(counter);
  413. if (counter == counter->group_leader)
  414. group_sched_out(counter, cpuctx, ctx);
  415. else
  416. counter_sched_out(counter, cpuctx, ctx);
  417. counter->state = PERF_COUNTER_STATE_OFF;
  418. }
  419. spin_unlock(&ctx->lock);
  420. }
  421. /*
  422. * Disable a counter.
  423. *
  424. * If counter->ctx is a cloned context, callers must make sure that
  425. * every task struct that counter->ctx->task could possibly point to
  426. * remains valid. This condition is satisifed when called through
  427. * perf_counter_for_each_child or perf_counter_for_each because they
  428. * hold the top-level counter's child_mutex, so any descendant that
  429. * goes to exit will block in sync_child_counter.
  430. * When called from perf_pending_counter it's OK because counter->ctx
  431. * is the current context on this CPU and preemption is disabled,
  432. * hence we can't get into perf_counter_task_sched_out for this context.
  433. */
  434. static void perf_counter_disable(struct perf_counter *counter)
  435. {
  436. struct perf_counter_context *ctx = counter->ctx;
  437. struct task_struct *task = ctx->task;
  438. if (!task) {
  439. /*
  440. * Disable the counter on the cpu that it's on
  441. */
  442. smp_call_function_single(counter->cpu, __perf_counter_disable,
  443. counter, 1);
  444. return;
  445. }
  446. retry:
  447. task_oncpu_function_call(task, __perf_counter_disable, counter);
  448. spin_lock_irq(&ctx->lock);
  449. /*
  450. * If the counter is still active, we need to retry the cross-call.
  451. */
  452. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  453. spin_unlock_irq(&ctx->lock);
  454. goto retry;
  455. }
  456. /*
  457. * Since we have the lock this context can't be scheduled
  458. * in, so we can change the state safely.
  459. */
  460. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  461. update_counter_times(counter);
  462. counter->state = PERF_COUNTER_STATE_OFF;
  463. }
  464. spin_unlock_irq(&ctx->lock);
  465. }
  466. static int
  467. counter_sched_in(struct perf_counter *counter,
  468. struct perf_cpu_context *cpuctx,
  469. struct perf_counter_context *ctx,
  470. int cpu)
  471. {
  472. if (counter->state <= PERF_COUNTER_STATE_OFF)
  473. return 0;
  474. counter->state = PERF_COUNTER_STATE_ACTIVE;
  475. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  476. /*
  477. * The new state must be visible before we turn it on in the hardware:
  478. */
  479. smp_wmb();
  480. if (counter->pmu->enable(counter)) {
  481. counter->state = PERF_COUNTER_STATE_INACTIVE;
  482. counter->oncpu = -1;
  483. return -EAGAIN;
  484. }
  485. counter->tstamp_running += ctx->time - counter->tstamp_stopped;
  486. if (!is_software_counter(counter))
  487. cpuctx->active_oncpu++;
  488. ctx->nr_active++;
  489. if (counter->attr.exclusive)
  490. cpuctx->exclusive = 1;
  491. return 0;
  492. }
  493. static int
  494. group_sched_in(struct perf_counter *group_counter,
  495. struct perf_cpu_context *cpuctx,
  496. struct perf_counter_context *ctx,
  497. int cpu)
  498. {
  499. struct perf_counter *counter, *partial_group;
  500. int ret;
  501. if (group_counter->state == PERF_COUNTER_STATE_OFF)
  502. return 0;
  503. ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
  504. if (ret)
  505. return ret < 0 ? ret : 0;
  506. if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
  507. return -EAGAIN;
  508. /*
  509. * Schedule in siblings as one group (if any):
  510. */
  511. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  512. if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
  513. partial_group = counter;
  514. goto group_error;
  515. }
  516. }
  517. return 0;
  518. group_error:
  519. /*
  520. * Groups can be scheduled in as one unit only, so undo any
  521. * partial group before returning:
  522. */
  523. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  524. if (counter == partial_group)
  525. break;
  526. counter_sched_out(counter, cpuctx, ctx);
  527. }
  528. counter_sched_out(group_counter, cpuctx, ctx);
  529. return -EAGAIN;
  530. }
  531. /*
  532. * Return 1 for a group consisting entirely of software counters,
  533. * 0 if the group contains any hardware counters.
  534. */
  535. static int is_software_only_group(struct perf_counter *leader)
  536. {
  537. struct perf_counter *counter;
  538. if (!is_software_counter(leader))
  539. return 0;
  540. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  541. if (!is_software_counter(counter))
  542. return 0;
  543. return 1;
  544. }
  545. /*
  546. * Work out whether we can put this counter group on the CPU now.
  547. */
  548. static int group_can_go_on(struct perf_counter *counter,
  549. struct perf_cpu_context *cpuctx,
  550. int can_add_hw)
  551. {
  552. /*
  553. * Groups consisting entirely of software counters can always go on.
  554. */
  555. if (is_software_only_group(counter))
  556. return 1;
  557. /*
  558. * If an exclusive group is already on, no other hardware
  559. * counters can go on.
  560. */
  561. if (cpuctx->exclusive)
  562. return 0;
  563. /*
  564. * If this group is exclusive and there are already
  565. * counters on the CPU, it can't go on.
  566. */
  567. if (counter->attr.exclusive && cpuctx->active_oncpu)
  568. return 0;
  569. /*
  570. * Otherwise, try to add it if all previous groups were able
  571. * to go on.
  572. */
  573. return can_add_hw;
  574. }
  575. static void add_counter_to_ctx(struct perf_counter *counter,
  576. struct perf_counter_context *ctx)
  577. {
  578. list_add_counter(counter, ctx);
  579. counter->tstamp_enabled = ctx->time;
  580. counter->tstamp_running = ctx->time;
  581. counter->tstamp_stopped = ctx->time;
  582. }
  583. /*
  584. * Cross CPU call to install and enable a performance counter
  585. *
  586. * Must be called with ctx->mutex held
  587. */
  588. static void __perf_install_in_context(void *info)
  589. {
  590. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  591. struct perf_counter *counter = info;
  592. struct perf_counter_context *ctx = counter->ctx;
  593. struct perf_counter *leader = counter->group_leader;
  594. int cpu = smp_processor_id();
  595. int err;
  596. /*
  597. * If this is a task context, we need to check whether it is
  598. * the current task context of this cpu. If not it has been
  599. * scheduled out before the smp call arrived.
  600. * Or possibly this is the right context but it isn't
  601. * on this cpu because it had no counters.
  602. */
  603. if (ctx->task && cpuctx->task_ctx != ctx) {
  604. if (cpuctx->task_ctx || ctx->task != current)
  605. return;
  606. cpuctx->task_ctx = ctx;
  607. }
  608. spin_lock(&ctx->lock);
  609. ctx->is_active = 1;
  610. update_context_time(ctx);
  611. /*
  612. * Protect the list operation against NMI by disabling the
  613. * counters on a global level. NOP for non NMI based counters.
  614. */
  615. perf_disable();
  616. add_counter_to_ctx(counter, ctx);
  617. /*
  618. * Don't put the counter on if it is disabled or if
  619. * it is in a group and the group isn't on.
  620. */
  621. if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
  622. (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
  623. goto unlock;
  624. /*
  625. * An exclusive counter can't go on if there are already active
  626. * hardware counters, and no hardware counter can go on if there
  627. * is already an exclusive counter on.
  628. */
  629. if (!group_can_go_on(counter, cpuctx, 1))
  630. err = -EEXIST;
  631. else
  632. err = counter_sched_in(counter, cpuctx, ctx, cpu);
  633. if (err) {
  634. /*
  635. * This counter couldn't go on. If it is in a group
  636. * then we have to pull the whole group off.
  637. * If the counter group is pinned then put it in error state.
  638. */
  639. if (leader != counter)
  640. group_sched_out(leader, cpuctx, ctx);
  641. if (leader->attr.pinned) {
  642. update_group_times(leader);
  643. leader->state = PERF_COUNTER_STATE_ERROR;
  644. }
  645. }
  646. if (!err && !ctx->task && cpuctx->max_pertask)
  647. cpuctx->max_pertask--;
  648. unlock:
  649. perf_enable();
  650. spin_unlock(&ctx->lock);
  651. }
  652. /*
  653. * Attach a performance counter to a context
  654. *
  655. * First we add the counter to the list with the hardware enable bit
  656. * in counter->hw_config cleared.
  657. *
  658. * If the counter is attached to a task which is on a CPU we use a smp
  659. * call to enable it in the task context. The task might have been
  660. * scheduled away, but we check this in the smp call again.
  661. *
  662. * Must be called with ctx->mutex held.
  663. */
  664. static void
  665. perf_install_in_context(struct perf_counter_context *ctx,
  666. struct perf_counter *counter,
  667. int cpu)
  668. {
  669. struct task_struct *task = ctx->task;
  670. if (!task) {
  671. /*
  672. * Per cpu counters are installed via an smp call and
  673. * the install is always sucessful.
  674. */
  675. smp_call_function_single(cpu, __perf_install_in_context,
  676. counter, 1);
  677. return;
  678. }
  679. retry:
  680. task_oncpu_function_call(task, __perf_install_in_context,
  681. counter);
  682. spin_lock_irq(&ctx->lock);
  683. /*
  684. * we need to retry the smp call.
  685. */
  686. if (ctx->is_active && list_empty(&counter->list_entry)) {
  687. spin_unlock_irq(&ctx->lock);
  688. goto retry;
  689. }
  690. /*
  691. * The lock prevents that this context is scheduled in so we
  692. * can add the counter safely, if it the call above did not
  693. * succeed.
  694. */
  695. if (list_empty(&counter->list_entry))
  696. add_counter_to_ctx(counter, ctx);
  697. spin_unlock_irq(&ctx->lock);
  698. }
  699. /*
  700. * Cross CPU call to enable a performance counter
  701. */
  702. static void __perf_counter_enable(void *info)
  703. {
  704. struct perf_counter *counter = info;
  705. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  706. struct perf_counter_context *ctx = counter->ctx;
  707. struct perf_counter *leader = counter->group_leader;
  708. int err;
  709. /*
  710. * If this is a per-task counter, need to check whether this
  711. * counter's task is the current task on this cpu.
  712. */
  713. if (ctx->task && cpuctx->task_ctx != ctx) {
  714. if (cpuctx->task_ctx || ctx->task != current)
  715. return;
  716. cpuctx->task_ctx = ctx;
  717. }
  718. spin_lock(&ctx->lock);
  719. ctx->is_active = 1;
  720. update_context_time(ctx);
  721. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  722. goto unlock;
  723. counter->state = PERF_COUNTER_STATE_INACTIVE;
  724. counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
  725. /*
  726. * If the counter is in a group and isn't the group leader,
  727. * then don't put it on unless the group is on.
  728. */
  729. if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
  730. goto unlock;
  731. if (!group_can_go_on(counter, cpuctx, 1)) {
  732. err = -EEXIST;
  733. } else {
  734. perf_disable();
  735. if (counter == leader)
  736. err = group_sched_in(counter, cpuctx, ctx,
  737. smp_processor_id());
  738. else
  739. err = counter_sched_in(counter, cpuctx, ctx,
  740. smp_processor_id());
  741. perf_enable();
  742. }
  743. if (err) {
  744. /*
  745. * If this counter can't go on and it's part of a
  746. * group, then the whole group has to come off.
  747. */
  748. if (leader != counter)
  749. group_sched_out(leader, cpuctx, ctx);
  750. if (leader->attr.pinned) {
  751. update_group_times(leader);
  752. leader->state = PERF_COUNTER_STATE_ERROR;
  753. }
  754. }
  755. unlock:
  756. spin_unlock(&ctx->lock);
  757. }
  758. /*
  759. * Enable a counter.
  760. *
  761. * If counter->ctx is a cloned context, callers must make sure that
  762. * every task struct that counter->ctx->task could possibly point to
  763. * remains valid. This condition is satisfied when called through
  764. * perf_counter_for_each_child or perf_counter_for_each as described
  765. * for perf_counter_disable.
  766. */
  767. static void perf_counter_enable(struct perf_counter *counter)
  768. {
  769. struct perf_counter_context *ctx = counter->ctx;
  770. struct task_struct *task = ctx->task;
  771. if (!task) {
  772. /*
  773. * Enable the counter on the cpu that it's on
  774. */
  775. smp_call_function_single(counter->cpu, __perf_counter_enable,
  776. counter, 1);
  777. return;
  778. }
  779. spin_lock_irq(&ctx->lock);
  780. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  781. goto out;
  782. /*
  783. * If the counter is in error state, clear that first.
  784. * That way, if we see the counter in error state below, we
  785. * know that it has gone back into error state, as distinct
  786. * from the task having been scheduled away before the
  787. * cross-call arrived.
  788. */
  789. if (counter->state == PERF_COUNTER_STATE_ERROR)
  790. counter->state = PERF_COUNTER_STATE_OFF;
  791. retry:
  792. spin_unlock_irq(&ctx->lock);
  793. task_oncpu_function_call(task, __perf_counter_enable, counter);
  794. spin_lock_irq(&ctx->lock);
  795. /*
  796. * If the context is active and the counter is still off,
  797. * we need to retry the cross-call.
  798. */
  799. if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
  800. goto retry;
  801. /*
  802. * Since we have the lock this context can't be scheduled
  803. * in, so we can change the state safely.
  804. */
  805. if (counter->state == PERF_COUNTER_STATE_OFF) {
  806. counter->state = PERF_COUNTER_STATE_INACTIVE;
  807. counter->tstamp_enabled =
  808. ctx->time - counter->total_time_enabled;
  809. }
  810. out:
  811. spin_unlock_irq(&ctx->lock);
  812. }
  813. static int perf_counter_refresh(struct perf_counter *counter, int refresh)
  814. {
  815. /*
  816. * not supported on inherited counters
  817. */
  818. if (counter->attr.inherit)
  819. return -EINVAL;
  820. atomic_add(refresh, &counter->event_limit);
  821. perf_counter_enable(counter);
  822. return 0;
  823. }
  824. void __perf_counter_sched_out(struct perf_counter_context *ctx,
  825. struct perf_cpu_context *cpuctx)
  826. {
  827. struct perf_counter *counter;
  828. spin_lock(&ctx->lock);
  829. ctx->is_active = 0;
  830. if (likely(!ctx->nr_counters))
  831. goto out;
  832. update_context_time(ctx);
  833. perf_disable();
  834. if (ctx->nr_active) {
  835. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  836. if (counter != counter->group_leader)
  837. counter_sched_out(counter, cpuctx, ctx);
  838. else
  839. group_sched_out(counter, cpuctx, ctx);
  840. }
  841. }
  842. perf_enable();
  843. out:
  844. spin_unlock(&ctx->lock);
  845. }
  846. /*
  847. * Test whether two contexts are equivalent, i.e. whether they
  848. * have both been cloned from the same version of the same context
  849. * and they both have the same number of enabled counters.
  850. * If the number of enabled counters is the same, then the set
  851. * of enabled counters should be the same, because these are both
  852. * inherited contexts, therefore we can't access individual counters
  853. * in them directly with an fd; we can only enable/disable all
  854. * counters via prctl, or enable/disable all counters in a family
  855. * via ioctl, which will have the same effect on both contexts.
  856. */
  857. static int context_equiv(struct perf_counter_context *ctx1,
  858. struct perf_counter_context *ctx2)
  859. {
  860. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  861. && ctx1->parent_gen == ctx2->parent_gen
  862. && !ctx1->pin_count && !ctx2->pin_count;
  863. }
  864. /*
  865. * Called from scheduler to remove the counters of the current task,
  866. * with interrupts disabled.
  867. *
  868. * We stop each counter and update the counter value in counter->count.
  869. *
  870. * This does not protect us against NMI, but disable()
  871. * sets the disabled bit in the control field of counter _before_
  872. * accessing the counter control register. If a NMI hits, then it will
  873. * not restart the counter.
  874. */
  875. void perf_counter_task_sched_out(struct task_struct *task,
  876. struct task_struct *next, int cpu)
  877. {
  878. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  879. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  880. struct perf_counter_context *next_ctx;
  881. struct perf_counter_context *parent;
  882. struct pt_regs *regs;
  883. int do_switch = 1;
  884. regs = task_pt_regs(task);
  885. perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
  886. if (likely(!ctx || !cpuctx->task_ctx))
  887. return;
  888. update_context_time(ctx);
  889. rcu_read_lock();
  890. parent = rcu_dereference(ctx->parent_ctx);
  891. next_ctx = next->perf_counter_ctxp;
  892. if (parent && next_ctx &&
  893. rcu_dereference(next_ctx->parent_ctx) == parent) {
  894. /*
  895. * Looks like the two contexts are clones, so we might be
  896. * able to optimize the context switch. We lock both
  897. * contexts and check that they are clones under the
  898. * lock (including re-checking that neither has been
  899. * uncloned in the meantime). It doesn't matter which
  900. * order we take the locks because no other cpu could
  901. * be trying to lock both of these tasks.
  902. */
  903. spin_lock(&ctx->lock);
  904. spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  905. if (context_equiv(ctx, next_ctx)) {
  906. /*
  907. * XXX do we need a memory barrier of sorts
  908. * wrt to rcu_dereference() of perf_counter_ctxp
  909. */
  910. task->perf_counter_ctxp = next_ctx;
  911. next->perf_counter_ctxp = ctx;
  912. ctx->task = next;
  913. next_ctx->task = task;
  914. do_switch = 0;
  915. }
  916. spin_unlock(&next_ctx->lock);
  917. spin_unlock(&ctx->lock);
  918. }
  919. rcu_read_unlock();
  920. if (do_switch) {
  921. __perf_counter_sched_out(ctx, cpuctx);
  922. cpuctx->task_ctx = NULL;
  923. }
  924. }
  925. /*
  926. * Called with IRQs disabled
  927. */
  928. static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
  929. {
  930. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  931. if (!cpuctx->task_ctx)
  932. return;
  933. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  934. return;
  935. __perf_counter_sched_out(ctx, cpuctx);
  936. cpuctx->task_ctx = NULL;
  937. }
  938. /*
  939. * Called with IRQs disabled
  940. */
  941. static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
  942. {
  943. __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
  944. }
  945. static void
  946. __perf_counter_sched_in(struct perf_counter_context *ctx,
  947. struct perf_cpu_context *cpuctx, int cpu)
  948. {
  949. struct perf_counter *counter;
  950. int can_add_hw = 1;
  951. spin_lock(&ctx->lock);
  952. ctx->is_active = 1;
  953. if (likely(!ctx->nr_counters))
  954. goto out;
  955. ctx->timestamp = perf_clock();
  956. perf_disable();
  957. /*
  958. * First go through the list and put on any pinned groups
  959. * in order to give them the best chance of going on.
  960. */
  961. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  962. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  963. !counter->attr.pinned)
  964. continue;
  965. if (counter->cpu != -1 && counter->cpu != cpu)
  966. continue;
  967. if (counter != counter->group_leader)
  968. counter_sched_in(counter, cpuctx, ctx, cpu);
  969. else {
  970. if (group_can_go_on(counter, cpuctx, 1))
  971. group_sched_in(counter, cpuctx, ctx, cpu);
  972. }
  973. /*
  974. * If this pinned group hasn't been scheduled,
  975. * put it in error state.
  976. */
  977. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  978. update_group_times(counter);
  979. counter->state = PERF_COUNTER_STATE_ERROR;
  980. }
  981. }
  982. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  983. /*
  984. * Ignore counters in OFF or ERROR state, and
  985. * ignore pinned counters since we did them already.
  986. */
  987. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  988. counter->attr.pinned)
  989. continue;
  990. /*
  991. * Listen to the 'cpu' scheduling filter constraint
  992. * of counters:
  993. */
  994. if (counter->cpu != -1 && counter->cpu != cpu)
  995. continue;
  996. if (counter != counter->group_leader) {
  997. if (counter_sched_in(counter, cpuctx, ctx, cpu))
  998. can_add_hw = 0;
  999. } else {
  1000. if (group_can_go_on(counter, cpuctx, can_add_hw)) {
  1001. if (group_sched_in(counter, cpuctx, ctx, cpu))
  1002. can_add_hw = 0;
  1003. }
  1004. }
  1005. }
  1006. perf_enable();
  1007. out:
  1008. spin_unlock(&ctx->lock);
  1009. }
  1010. /*
  1011. * Called from scheduler to add the counters of the current task
  1012. * with interrupts disabled.
  1013. *
  1014. * We restore the counter value and then enable it.
  1015. *
  1016. * This does not protect us against NMI, but enable()
  1017. * sets the enabled bit in the control field of counter _before_
  1018. * accessing the counter control register. If a NMI hits, then it will
  1019. * keep the counter running.
  1020. */
  1021. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  1022. {
  1023. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  1024. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  1025. if (likely(!ctx))
  1026. return;
  1027. if (cpuctx->task_ctx == ctx)
  1028. return;
  1029. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1030. cpuctx->task_ctx = ctx;
  1031. }
  1032. static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
  1033. {
  1034. struct perf_counter_context *ctx = &cpuctx->ctx;
  1035. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1036. }
  1037. #define MAX_INTERRUPTS (~0ULL)
  1038. static void perf_log_throttle(struct perf_counter *counter, int enable);
  1039. static void perf_log_period(struct perf_counter *counter, u64 period);
  1040. static void perf_adjust_period(struct perf_counter *counter, u64 events)
  1041. {
  1042. struct hw_perf_counter *hwc = &counter->hw;
  1043. u64 period, sample_period;
  1044. s64 delta;
  1045. events *= hwc->sample_period;
  1046. period = div64_u64(events, counter->attr.sample_freq);
  1047. delta = (s64)(period - hwc->sample_period);
  1048. delta = (delta + 7) / 8; /* low pass filter */
  1049. sample_period = hwc->sample_period + delta;
  1050. if (!sample_period)
  1051. sample_period = 1;
  1052. perf_log_period(counter, sample_period);
  1053. hwc->sample_period = sample_period;
  1054. }
  1055. static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
  1056. {
  1057. struct perf_counter *counter;
  1058. struct hw_perf_counter *hwc;
  1059. u64 interrupts, freq;
  1060. spin_lock(&ctx->lock);
  1061. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1062. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1063. continue;
  1064. hwc = &counter->hw;
  1065. interrupts = hwc->interrupts;
  1066. hwc->interrupts = 0;
  1067. /*
  1068. * unthrottle counters on the tick
  1069. */
  1070. if (interrupts == MAX_INTERRUPTS) {
  1071. perf_log_throttle(counter, 1);
  1072. counter->pmu->unthrottle(counter);
  1073. interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
  1074. }
  1075. if (!counter->attr.freq || !counter->attr.sample_freq)
  1076. continue;
  1077. /*
  1078. * if the specified freq < HZ then we need to skip ticks
  1079. */
  1080. if (counter->attr.sample_freq < HZ) {
  1081. freq = counter->attr.sample_freq;
  1082. hwc->freq_count += freq;
  1083. hwc->freq_interrupts += interrupts;
  1084. if (hwc->freq_count < HZ)
  1085. continue;
  1086. interrupts = hwc->freq_interrupts;
  1087. hwc->freq_interrupts = 0;
  1088. hwc->freq_count -= HZ;
  1089. } else
  1090. freq = HZ;
  1091. perf_adjust_period(counter, freq * interrupts);
  1092. /*
  1093. * In order to avoid being stalled by an (accidental) huge
  1094. * sample period, force reset the sample period if we didn't
  1095. * get any events in this freq period.
  1096. */
  1097. if (!interrupts) {
  1098. perf_disable();
  1099. counter->pmu->disable(counter);
  1100. atomic64_set(&hwc->period_left, 0);
  1101. counter->pmu->enable(counter);
  1102. perf_enable();
  1103. }
  1104. }
  1105. spin_unlock(&ctx->lock);
  1106. }
  1107. /*
  1108. * Round-robin a context's counters:
  1109. */
  1110. static void rotate_ctx(struct perf_counter_context *ctx)
  1111. {
  1112. struct perf_counter *counter;
  1113. if (!ctx->nr_counters)
  1114. return;
  1115. spin_lock(&ctx->lock);
  1116. /*
  1117. * Rotate the first entry last (works just fine for group counters too):
  1118. */
  1119. perf_disable();
  1120. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1121. list_move_tail(&counter->list_entry, &ctx->counter_list);
  1122. break;
  1123. }
  1124. perf_enable();
  1125. spin_unlock(&ctx->lock);
  1126. }
  1127. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  1128. {
  1129. struct perf_cpu_context *cpuctx;
  1130. struct perf_counter_context *ctx;
  1131. if (!atomic_read(&nr_counters))
  1132. return;
  1133. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1134. ctx = curr->perf_counter_ctxp;
  1135. perf_ctx_adjust_freq(&cpuctx->ctx);
  1136. if (ctx)
  1137. perf_ctx_adjust_freq(ctx);
  1138. perf_counter_cpu_sched_out(cpuctx);
  1139. if (ctx)
  1140. __perf_counter_task_sched_out(ctx);
  1141. rotate_ctx(&cpuctx->ctx);
  1142. if (ctx)
  1143. rotate_ctx(ctx);
  1144. perf_counter_cpu_sched_in(cpuctx, cpu);
  1145. if (ctx)
  1146. perf_counter_task_sched_in(curr, cpu);
  1147. }
  1148. /*
  1149. * Cross CPU call to read the hardware counter
  1150. */
  1151. static void __read(void *info)
  1152. {
  1153. struct perf_counter *counter = info;
  1154. struct perf_counter_context *ctx = counter->ctx;
  1155. unsigned long flags;
  1156. local_irq_save(flags);
  1157. if (ctx->is_active)
  1158. update_context_time(ctx);
  1159. counter->pmu->read(counter);
  1160. update_counter_times(counter);
  1161. local_irq_restore(flags);
  1162. }
  1163. static u64 perf_counter_read(struct perf_counter *counter)
  1164. {
  1165. /*
  1166. * If counter is enabled and currently active on a CPU, update the
  1167. * value in the counter structure:
  1168. */
  1169. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  1170. smp_call_function_single(counter->oncpu,
  1171. __read, counter, 1);
  1172. } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  1173. update_counter_times(counter);
  1174. }
  1175. return atomic64_read(&counter->count);
  1176. }
  1177. /*
  1178. * Initialize the perf_counter context in a task_struct:
  1179. */
  1180. static void
  1181. __perf_counter_init_context(struct perf_counter_context *ctx,
  1182. struct task_struct *task)
  1183. {
  1184. memset(ctx, 0, sizeof(*ctx));
  1185. spin_lock_init(&ctx->lock);
  1186. mutex_init(&ctx->mutex);
  1187. INIT_LIST_HEAD(&ctx->counter_list);
  1188. INIT_LIST_HEAD(&ctx->event_list);
  1189. atomic_set(&ctx->refcount, 1);
  1190. ctx->task = task;
  1191. }
  1192. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  1193. {
  1194. struct perf_counter_context *parent_ctx;
  1195. struct perf_counter_context *ctx;
  1196. struct perf_cpu_context *cpuctx;
  1197. struct task_struct *task;
  1198. unsigned long flags;
  1199. int err;
  1200. /*
  1201. * If cpu is not a wildcard then this is a percpu counter:
  1202. */
  1203. if (cpu != -1) {
  1204. /* Must be root to operate on a CPU counter: */
  1205. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  1206. return ERR_PTR(-EACCES);
  1207. if (cpu < 0 || cpu > num_possible_cpus())
  1208. return ERR_PTR(-EINVAL);
  1209. /*
  1210. * We could be clever and allow to attach a counter to an
  1211. * offline CPU and activate it when the CPU comes up, but
  1212. * that's for later.
  1213. */
  1214. if (!cpu_isset(cpu, cpu_online_map))
  1215. return ERR_PTR(-ENODEV);
  1216. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1217. ctx = &cpuctx->ctx;
  1218. get_ctx(ctx);
  1219. return ctx;
  1220. }
  1221. rcu_read_lock();
  1222. if (!pid)
  1223. task = current;
  1224. else
  1225. task = find_task_by_vpid(pid);
  1226. if (task)
  1227. get_task_struct(task);
  1228. rcu_read_unlock();
  1229. if (!task)
  1230. return ERR_PTR(-ESRCH);
  1231. /*
  1232. * Can't attach counters to a dying task.
  1233. */
  1234. err = -ESRCH;
  1235. if (task->flags & PF_EXITING)
  1236. goto errout;
  1237. /* Reuse ptrace permission checks for now. */
  1238. err = -EACCES;
  1239. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1240. goto errout;
  1241. retry:
  1242. ctx = perf_lock_task_context(task, &flags);
  1243. if (ctx) {
  1244. parent_ctx = ctx->parent_ctx;
  1245. if (parent_ctx) {
  1246. put_ctx(parent_ctx);
  1247. ctx->parent_ctx = NULL; /* no longer a clone */
  1248. }
  1249. spin_unlock_irqrestore(&ctx->lock, flags);
  1250. }
  1251. if (!ctx) {
  1252. ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  1253. err = -ENOMEM;
  1254. if (!ctx)
  1255. goto errout;
  1256. __perf_counter_init_context(ctx, task);
  1257. get_ctx(ctx);
  1258. if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
  1259. /*
  1260. * We raced with some other task; use
  1261. * the context they set.
  1262. */
  1263. kfree(ctx);
  1264. goto retry;
  1265. }
  1266. get_task_struct(task);
  1267. }
  1268. put_task_struct(task);
  1269. return ctx;
  1270. errout:
  1271. put_task_struct(task);
  1272. return ERR_PTR(err);
  1273. }
  1274. static void free_counter_rcu(struct rcu_head *head)
  1275. {
  1276. struct perf_counter *counter;
  1277. counter = container_of(head, struct perf_counter, rcu_head);
  1278. if (counter->ns)
  1279. put_pid_ns(counter->ns);
  1280. kfree(counter);
  1281. }
  1282. static void perf_pending_sync(struct perf_counter *counter);
  1283. static void free_counter(struct perf_counter *counter)
  1284. {
  1285. perf_pending_sync(counter);
  1286. if (!counter->parent) {
  1287. atomic_dec(&nr_counters);
  1288. if (counter->attr.mmap)
  1289. atomic_dec(&nr_mmap_counters);
  1290. if (counter->attr.comm)
  1291. atomic_dec(&nr_comm_counters);
  1292. }
  1293. if (counter->destroy)
  1294. counter->destroy(counter);
  1295. put_ctx(counter->ctx);
  1296. call_rcu(&counter->rcu_head, free_counter_rcu);
  1297. }
  1298. /*
  1299. * Called when the last reference to the file is gone.
  1300. */
  1301. static int perf_release(struct inode *inode, struct file *file)
  1302. {
  1303. struct perf_counter *counter = file->private_data;
  1304. struct perf_counter_context *ctx = counter->ctx;
  1305. file->private_data = NULL;
  1306. WARN_ON_ONCE(ctx->parent_ctx);
  1307. mutex_lock(&ctx->mutex);
  1308. perf_counter_remove_from_context(counter);
  1309. mutex_unlock(&ctx->mutex);
  1310. mutex_lock(&counter->owner->perf_counter_mutex);
  1311. list_del_init(&counter->owner_entry);
  1312. mutex_unlock(&counter->owner->perf_counter_mutex);
  1313. put_task_struct(counter->owner);
  1314. free_counter(counter);
  1315. return 0;
  1316. }
  1317. /*
  1318. * Read the performance counter - simple non blocking version for now
  1319. */
  1320. static ssize_t
  1321. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  1322. {
  1323. u64 values[4];
  1324. int n;
  1325. /*
  1326. * Return end-of-file for a read on a counter that is in
  1327. * error state (i.e. because it was pinned but it couldn't be
  1328. * scheduled on to the CPU at some point).
  1329. */
  1330. if (counter->state == PERF_COUNTER_STATE_ERROR)
  1331. return 0;
  1332. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1333. mutex_lock(&counter->child_mutex);
  1334. values[0] = perf_counter_read(counter);
  1335. n = 1;
  1336. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1337. values[n++] = counter->total_time_enabled +
  1338. atomic64_read(&counter->child_total_time_enabled);
  1339. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1340. values[n++] = counter->total_time_running +
  1341. atomic64_read(&counter->child_total_time_running);
  1342. if (counter->attr.read_format & PERF_FORMAT_ID)
  1343. values[n++] = counter->id;
  1344. mutex_unlock(&counter->child_mutex);
  1345. if (count < n * sizeof(u64))
  1346. return -EINVAL;
  1347. count = n * sizeof(u64);
  1348. if (copy_to_user(buf, values, count))
  1349. return -EFAULT;
  1350. return count;
  1351. }
  1352. static ssize_t
  1353. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1354. {
  1355. struct perf_counter *counter = file->private_data;
  1356. return perf_read_hw(counter, buf, count);
  1357. }
  1358. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1359. {
  1360. struct perf_counter *counter = file->private_data;
  1361. struct perf_mmap_data *data;
  1362. unsigned int events = POLL_HUP;
  1363. rcu_read_lock();
  1364. data = rcu_dereference(counter->data);
  1365. if (data)
  1366. events = atomic_xchg(&data->poll, 0);
  1367. rcu_read_unlock();
  1368. poll_wait(file, &counter->waitq, wait);
  1369. return events;
  1370. }
  1371. static void perf_counter_reset(struct perf_counter *counter)
  1372. {
  1373. (void)perf_counter_read(counter);
  1374. atomic64_set(&counter->count, 0);
  1375. perf_counter_update_userpage(counter);
  1376. }
  1377. /*
  1378. * Holding the top-level counter's child_mutex means that any
  1379. * descendant process that has inherited this counter will block
  1380. * in sync_child_counter if it goes to exit, thus satisfying the
  1381. * task existence requirements of perf_counter_enable/disable.
  1382. */
  1383. static void perf_counter_for_each_child(struct perf_counter *counter,
  1384. void (*func)(struct perf_counter *))
  1385. {
  1386. struct perf_counter *child;
  1387. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1388. mutex_lock(&counter->child_mutex);
  1389. func(counter);
  1390. list_for_each_entry(child, &counter->child_list, child_list)
  1391. func(child);
  1392. mutex_unlock(&counter->child_mutex);
  1393. }
  1394. static void perf_counter_for_each(struct perf_counter *counter,
  1395. void (*func)(struct perf_counter *))
  1396. {
  1397. struct perf_counter_context *ctx = counter->ctx;
  1398. struct perf_counter *sibling;
  1399. WARN_ON_ONCE(ctx->parent_ctx);
  1400. mutex_lock(&ctx->mutex);
  1401. counter = counter->group_leader;
  1402. perf_counter_for_each_child(counter, func);
  1403. func(counter);
  1404. list_for_each_entry(sibling, &counter->sibling_list, list_entry)
  1405. perf_counter_for_each_child(counter, func);
  1406. mutex_unlock(&ctx->mutex);
  1407. }
  1408. static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
  1409. {
  1410. struct perf_counter_context *ctx = counter->ctx;
  1411. unsigned long size;
  1412. int ret = 0;
  1413. u64 value;
  1414. if (!counter->attr.sample_period)
  1415. return -EINVAL;
  1416. size = copy_from_user(&value, arg, sizeof(value));
  1417. if (size != sizeof(value))
  1418. return -EFAULT;
  1419. if (!value)
  1420. return -EINVAL;
  1421. spin_lock_irq(&ctx->lock);
  1422. if (counter->attr.freq) {
  1423. if (value > sysctl_perf_counter_sample_rate) {
  1424. ret = -EINVAL;
  1425. goto unlock;
  1426. }
  1427. counter->attr.sample_freq = value;
  1428. } else {
  1429. perf_log_period(counter, value);
  1430. counter->attr.sample_period = value;
  1431. counter->hw.sample_period = value;
  1432. }
  1433. unlock:
  1434. spin_unlock_irq(&ctx->lock);
  1435. return ret;
  1436. }
  1437. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1438. {
  1439. struct perf_counter *counter = file->private_data;
  1440. void (*func)(struct perf_counter *);
  1441. u32 flags = arg;
  1442. switch (cmd) {
  1443. case PERF_COUNTER_IOC_ENABLE:
  1444. func = perf_counter_enable;
  1445. break;
  1446. case PERF_COUNTER_IOC_DISABLE:
  1447. func = perf_counter_disable;
  1448. break;
  1449. case PERF_COUNTER_IOC_RESET:
  1450. func = perf_counter_reset;
  1451. break;
  1452. case PERF_COUNTER_IOC_REFRESH:
  1453. return perf_counter_refresh(counter, arg);
  1454. case PERF_COUNTER_IOC_PERIOD:
  1455. return perf_counter_period(counter, (u64 __user *)arg);
  1456. default:
  1457. return -ENOTTY;
  1458. }
  1459. if (flags & PERF_IOC_FLAG_GROUP)
  1460. perf_counter_for_each(counter, func);
  1461. else
  1462. perf_counter_for_each_child(counter, func);
  1463. return 0;
  1464. }
  1465. int perf_counter_task_enable(void)
  1466. {
  1467. struct perf_counter *counter;
  1468. mutex_lock(&current->perf_counter_mutex);
  1469. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1470. perf_counter_for_each_child(counter, perf_counter_enable);
  1471. mutex_unlock(&current->perf_counter_mutex);
  1472. return 0;
  1473. }
  1474. int perf_counter_task_disable(void)
  1475. {
  1476. struct perf_counter *counter;
  1477. mutex_lock(&current->perf_counter_mutex);
  1478. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1479. perf_counter_for_each_child(counter, perf_counter_disable);
  1480. mutex_unlock(&current->perf_counter_mutex);
  1481. return 0;
  1482. }
  1483. /*
  1484. * Callers need to ensure there can be no nesting of this function, otherwise
  1485. * the seqlock logic goes bad. We can not serialize this because the arch
  1486. * code calls this from NMI context.
  1487. */
  1488. void perf_counter_update_userpage(struct perf_counter *counter)
  1489. {
  1490. struct perf_counter_mmap_page *userpg;
  1491. struct perf_mmap_data *data;
  1492. rcu_read_lock();
  1493. data = rcu_dereference(counter->data);
  1494. if (!data)
  1495. goto unlock;
  1496. userpg = data->user_page;
  1497. /*
  1498. * Disable preemption so as to not let the corresponding user-space
  1499. * spin too long if we get preempted.
  1500. */
  1501. preempt_disable();
  1502. ++userpg->lock;
  1503. barrier();
  1504. userpg->index = counter->hw.idx;
  1505. userpg->offset = atomic64_read(&counter->count);
  1506. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  1507. userpg->offset -= atomic64_read(&counter->hw.prev_count);
  1508. userpg->time_enabled = counter->total_time_enabled +
  1509. atomic64_read(&counter->child_total_time_enabled);
  1510. userpg->time_running = counter->total_time_running +
  1511. atomic64_read(&counter->child_total_time_running);
  1512. barrier();
  1513. ++userpg->lock;
  1514. preempt_enable();
  1515. unlock:
  1516. rcu_read_unlock();
  1517. }
  1518. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1519. {
  1520. struct perf_counter *counter = vma->vm_file->private_data;
  1521. struct perf_mmap_data *data;
  1522. int ret = VM_FAULT_SIGBUS;
  1523. if (vmf->flags & FAULT_FLAG_MKWRITE) {
  1524. if (vmf->pgoff == 0)
  1525. ret = 0;
  1526. return ret;
  1527. }
  1528. rcu_read_lock();
  1529. data = rcu_dereference(counter->data);
  1530. if (!data)
  1531. goto unlock;
  1532. if (vmf->pgoff == 0) {
  1533. vmf->page = virt_to_page(data->user_page);
  1534. } else {
  1535. int nr = vmf->pgoff - 1;
  1536. if ((unsigned)nr > data->nr_pages)
  1537. goto unlock;
  1538. if (vmf->flags & FAULT_FLAG_WRITE)
  1539. goto unlock;
  1540. vmf->page = virt_to_page(data->data_pages[nr]);
  1541. }
  1542. get_page(vmf->page);
  1543. vmf->page->mapping = vma->vm_file->f_mapping;
  1544. vmf->page->index = vmf->pgoff;
  1545. ret = 0;
  1546. unlock:
  1547. rcu_read_unlock();
  1548. return ret;
  1549. }
  1550. static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
  1551. {
  1552. struct perf_mmap_data *data;
  1553. unsigned long size;
  1554. int i;
  1555. WARN_ON(atomic_read(&counter->mmap_count));
  1556. size = sizeof(struct perf_mmap_data);
  1557. size += nr_pages * sizeof(void *);
  1558. data = kzalloc(size, GFP_KERNEL);
  1559. if (!data)
  1560. goto fail;
  1561. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1562. if (!data->user_page)
  1563. goto fail_user_page;
  1564. for (i = 0; i < nr_pages; i++) {
  1565. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1566. if (!data->data_pages[i])
  1567. goto fail_data_pages;
  1568. }
  1569. data->nr_pages = nr_pages;
  1570. atomic_set(&data->lock, -1);
  1571. rcu_assign_pointer(counter->data, data);
  1572. return 0;
  1573. fail_data_pages:
  1574. for (i--; i >= 0; i--)
  1575. free_page((unsigned long)data->data_pages[i]);
  1576. free_page((unsigned long)data->user_page);
  1577. fail_user_page:
  1578. kfree(data);
  1579. fail:
  1580. return -ENOMEM;
  1581. }
  1582. static void perf_mmap_free_page(unsigned long addr)
  1583. {
  1584. struct page *page = virt_to_page(addr);
  1585. page->mapping = NULL;
  1586. __free_page(page);
  1587. }
  1588. static void __perf_mmap_data_free(struct rcu_head *rcu_head)
  1589. {
  1590. struct perf_mmap_data *data;
  1591. int i;
  1592. data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
  1593. perf_mmap_free_page((unsigned long)data->user_page);
  1594. for (i = 0; i < data->nr_pages; i++)
  1595. perf_mmap_free_page((unsigned long)data->data_pages[i]);
  1596. kfree(data);
  1597. }
  1598. static void perf_mmap_data_free(struct perf_counter *counter)
  1599. {
  1600. struct perf_mmap_data *data = counter->data;
  1601. WARN_ON(atomic_read(&counter->mmap_count));
  1602. rcu_assign_pointer(counter->data, NULL);
  1603. call_rcu(&data->rcu_head, __perf_mmap_data_free);
  1604. }
  1605. static void perf_mmap_open(struct vm_area_struct *vma)
  1606. {
  1607. struct perf_counter *counter = vma->vm_file->private_data;
  1608. atomic_inc(&counter->mmap_count);
  1609. }
  1610. static void perf_mmap_close(struct vm_area_struct *vma)
  1611. {
  1612. struct perf_counter *counter = vma->vm_file->private_data;
  1613. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1614. if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
  1615. struct user_struct *user = current_user();
  1616. atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
  1617. vma->vm_mm->locked_vm -= counter->data->nr_locked;
  1618. perf_mmap_data_free(counter);
  1619. mutex_unlock(&counter->mmap_mutex);
  1620. }
  1621. }
  1622. static struct vm_operations_struct perf_mmap_vmops = {
  1623. .open = perf_mmap_open,
  1624. .close = perf_mmap_close,
  1625. .fault = perf_mmap_fault,
  1626. .page_mkwrite = perf_mmap_fault,
  1627. };
  1628. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  1629. {
  1630. struct perf_counter *counter = file->private_data;
  1631. unsigned long user_locked, user_lock_limit;
  1632. struct user_struct *user = current_user();
  1633. unsigned long locked, lock_limit;
  1634. unsigned long vma_size;
  1635. unsigned long nr_pages;
  1636. long user_extra, extra;
  1637. int ret = 0;
  1638. if (!(vma->vm_flags & VM_SHARED))
  1639. return -EINVAL;
  1640. vma_size = vma->vm_end - vma->vm_start;
  1641. nr_pages = (vma_size / PAGE_SIZE) - 1;
  1642. /*
  1643. * If we have data pages ensure they're a power-of-two number, so we
  1644. * can do bitmasks instead of modulo.
  1645. */
  1646. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  1647. return -EINVAL;
  1648. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  1649. return -EINVAL;
  1650. if (vma->vm_pgoff != 0)
  1651. return -EINVAL;
  1652. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1653. mutex_lock(&counter->mmap_mutex);
  1654. if (atomic_inc_not_zero(&counter->mmap_count)) {
  1655. if (nr_pages != counter->data->nr_pages)
  1656. ret = -EINVAL;
  1657. goto unlock;
  1658. }
  1659. user_extra = nr_pages + 1;
  1660. user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
  1661. /*
  1662. * Increase the limit linearly with more CPUs:
  1663. */
  1664. user_lock_limit *= num_online_cpus();
  1665. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  1666. extra = 0;
  1667. if (user_locked > user_lock_limit)
  1668. extra = user_locked - user_lock_limit;
  1669. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  1670. lock_limit >>= PAGE_SHIFT;
  1671. locked = vma->vm_mm->locked_vm + extra;
  1672. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  1673. ret = -EPERM;
  1674. goto unlock;
  1675. }
  1676. WARN_ON(counter->data);
  1677. ret = perf_mmap_data_alloc(counter, nr_pages);
  1678. if (ret)
  1679. goto unlock;
  1680. atomic_set(&counter->mmap_count, 1);
  1681. atomic_long_add(user_extra, &user->locked_vm);
  1682. vma->vm_mm->locked_vm += extra;
  1683. counter->data->nr_locked = extra;
  1684. if (vma->vm_flags & VM_WRITE)
  1685. counter->data->writable = 1;
  1686. unlock:
  1687. mutex_unlock(&counter->mmap_mutex);
  1688. vma->vm_flags |= VM_RESERVED;
  1689. vma->vm_ops = &perf_mmap_vmops;
  1690. return ret;
  1691. }
  1692. static int perf_fasync(int fd, struct file *filp, int on)
  1693. {
  1694. struct inode *inode = filp->f_path.dentry->d_inode;
  1695. struct perf_counter *counter = filp->private_data;
  1696. int retval;
  1697. mutex_lock(&inode->i_mutex);
  1698. retval = fasync_helper(fd, filp, on, &counter->fasync);
  1699. mutex_unlock(&inode->i_mutex);
  1700. if (retval < 0)
  1701. return retval;
  1702. return 0;
  1703. }
  1704. static const struct file_operations perf_fops = {
  1705. .release = perf_release,
  1706. .read = perf_read,
  1707. .poll = perf_poll,
  1708. .unlocked_ioctl = perf_ioctl,
  1709. .compat_ioctl = perf_ioctl,
  1710. .mmap = perf_mmap,
  1711. .fasync = perf_fasync,
  1712. };
  1713. /*
  1714. * Perf counter wakeup
  1715. *
  1716. * If there's data, ensure we set the poll() state and publish everything
  1717. * to user-space before waking everybody up.
  1718. */
  1719. void perf_counter_wakeup(struct perf_counter *counter)
  1720. {
  1721. wake_up_all(&counter->waitq);
  1722. if (counter->pending_kill) {
  1723. kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
  1724. counter->pending_kill = 0;
  1725. }
  1726. }
  1727. /*
  1728. * Pending wakeups
  1729. *
  1730. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  1731. *
  1732. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  1733. * single linked list and use cmpxchg() to add entries lockless.
  1734. */
  1735. static void perf_pending_counter(struct perf_pending_entry *entry)
  1736. {
  1737. struct perf_counter *counter = container_of(entry,
  1738. struct perf_counter, pending);
  1739. if (counter->pending_disable) {
  1740. counter->pending_disable = 0;
  1741. perf_counter_disable(counter);
  1742. }
  1743. if (counter->pending_wakeup) {
  1744. counter->pending_wakeup = 0;
  1745. perf_counter_wakeup(counter);
  1746. }
  1747. }
  1748. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  1749. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  1750. PENDING_TAIL,
  1751. };
  1752. static void perf_pending_queue(struct perf_pending_entry *entry,
  1753. void (*func)(struct perf_pending_entry *))
  1754. {
  1755. struct perf_pending_entry **head;
  1756. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  1757. return;
  1758. entry->func = func;
  1759. head = &get_cpu_var(perf_pending_head);
  1760. do {
  1761. entry->next = *head;
  1762. } while (cmpxchg(head, entry->next, entry) != entry->next);
  1763. set_perf_counter_pending();
  1764. put_cpu_var(perf_pending_head);
  1765. }
  1766. static int __perf_pending_run(void)
  1767. {
  1768. struct perf_pending_entry *list;
  1769. int nr = 0;
  1770. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  1771. while (list != PENDING_TAIL) {
  1772. void (*func)(struct perf_pending_entry *);
  1773. struct perf_pending_entry *entry = list;
  1774. list = list->next;
  1775. func = entry->func;
  1776. entry->next = NULL;
  1777. /*
  1778. * Ensure we observe the unqueue before we issue the wakeup,
  1779. * so that we won't be waiting forever.
  1780. * -- see perf_not_pending().
  1781. */
  1782. smp_wmb();
  1783. func(entry);
  1784. nr++;
  1785. }
  1786. return nr;
  1787. }
  1788. static inline int perf_not_pending(struct perf_counter *counter)
  1789. {
  1790. /*
  1791. * If we flush on whatever cpu we run, there is a chance we don't
  1792. * need to wait.
  1793. */
  1794. get_cpu();
  1795. __perf_pending_run();
  1796. put_cpu();
  1797. /*
  1798. * Ensure we see the proper queue state before going to sleep
  1799. * so that we do not miss the wakeup. -- see perf_pending_handle()
  1800. */
  1801. smp_rmb();
  1802. return counter->pending.next == NULL;
  1803. }
  1804. static void perf_pending_sync(struct perf_counter *counter)
  1805. {
  1806. wait_event(counter->waitq, perf_not_pending(counter));
  1807. }
  1808. void perf_counter_do_pending(void)
  1809. {
  1810. __perf_pending_run();
  1811. }
  1812. /*
  1813. * Callchain support -- arch specific
  1814. */
  1815. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1816. {
  1817. return NULL;
  1818. }
  1819. /*
  1820. * Output
  1821. */
  1822. struct perf_output_handle {
  1823. struct perf_counter *counter;
  1824. struct perf_mmap_data *data;
  1825. unsigned long head;
  1826. unsigned long offset;
  1827. int nmi;
  1828. int sample;
  1829. int locked;
  1830. unsigned long flags;
  1831. };
  1832. static bool perf_output_space(struct perf_mmap_data *data,
  1833. unsigned int offset, unsigned int head)
  1834. {
  1835. unsigned long tail;
  1836. unsigned long mask;
  1837. if (!data->writable)
  1838. return true;
  1839. mask = (data->nr_pages << PAGE_SHIFT) - 1;
  1840. /*
  1841. * Userspace could choose to issue a mb() before updating the tail
  1842. * pointer. So that all reads will be completed before the write is
  1843. * issued.
  1844. */
  1845. tail = ACCESS_ONCE(data->user_page->data_tail);
  1846. smp_rmb();
  1847. offset = (offset - tail) & mask;
  1848. head = (head - tail) & mask;
  1849. if ((int)(head - offset) < 0)
  1850. return false;
  1851. return true;
  1852. }
  1853. static void perf_output_wakeup(struct perf_output_handle *handle)
  1854. {
  1855. atomic_set(&handle->data->poll, POLL_IN);
  1856. if (handle->nmi) {
  1857. handle->counter->pending_wakeup = 1;
  1858. perf_pending_queue(&handle->counter->pending,
  1859. perf_pending_counter);
  1860. } else
  1861. perf_counter_wakeup(handle->counter);
  1862. }
  1863. /*
  1864. * Curious locking construct.
  1865. *
  1866. * We need to ensure a later event doesn't publish a head when a former
  1867. * event isn't done writing. However since we need to deal with NMIs we
  1868. * cannot fully serialize things.
  1869. *
  1870. * What we do is serialize between CPUs so we only have to deal with NMI
  1871. * nesting on a single CPU.
  1872. *
  1873. * We only publish the head (and generate a wakeup) when the outer-most
  1874. * event completes.
  1875. */
  1876. static void perf_output_lock(struct perf_output_handle *handle)
  1877. {
  1878. struct perf_mmap_data *data = handle->data;
  1879. int cpu;
  1880. handle->locked = 0;
  1881. local_irq_save(handle->flags);
  1882. cpu = smp_processor_id();
  1883. if (in_nmi() && atomic_read(&data->lock) == cpu)
  1884. return;
  1885. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1886. cpu_relax();
  1887. handle->locked = 1;
  1888. }
  1889. static void perf_output_unlock(struct perf_output_handle *handle)
  1890. {
  1891. struct perf_mmap_data *data = handle->data;
  1892. unsigned long head;
  1893. int cpu;
  1894. data->done_head = data->head;
  1895. if (!handle->locked)
  1896. goto out;
  1897. again:
  1898. /*
  1899. * The xchg implies a full barrier that ensures all writes are done
  1900. * before we publish the new head, matched by a rmb() in userspace when
  1901. * reading this position.
  1902. */
  1903. while ((head = atomic_long_xchg(&data->done_head, 0)))
  1904. data->user_page->data_head = head;
  1905. /*
  1906. * NMI can happen here, which means we can miss a done_head update.
  1907. */
  1908. cpu = atomic_xchg(&data->lock, -1);
  1909. WARN_ON_ONCE(cpu != smp_processor_id());
  1910. /*
  1911. * Therefore we have to validate we did not indeed do so.
  1912. */
  1913. if (unlikely(atomic_long_read(&data->done_head))) {
  1914. /*
  1915. * Since we had it locked, we can lock it again.
  1916. */
  1917. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1918. cpu_relax();
  1919. goto again;
  1920. }
  1921. if (atomic_xchg(&data->wakeup, 0))
  1922. perf_output_wakeup(handle);
  1923. out:
  1924. local_irq_restore(handle->flags);
  1925. }
  1926. static void perf_output_copy(struct perf_output_handle *handle,
  1927. const void *buf, unsigned int len)
  1928. {
  1929. unsigned int pages_mask;
  1930. unsigned int offset;
  1931. unsigned int size;
  1932. void **pages;
  1933. offset = handle->offset;
  1934. pages_mask = handle->data->nr_pages - 1;
  1935. pages = handle->data->data_pages;
  1936. do {
  1937. unsigned int page_offset;
  1938. int nr;
  1939. nr = (offset >> PAGE_SHIFT) & pages_mask;
  1940. page_offset = offset & (PAGE_SIZE - 1);
  1941. size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
  1942. memcpy(pages[nr] + page_offset, buf, size);
  1943. len -= size;
  1944. buf += size;
  1945. offset += size;
  1946. } while (len);
  1947. handle->offset = offset;
  1948. /*
  1949. * Check we didn't copy past our reservation window, taking the
  1950. * possible unsigned int wrap into account.
  1951. */
  1952. WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
  1953. }
  1954. #define perf_output_put(handle, x) \
  1955. perf_output_copy((handle), &(x), sizeof(x))
  1956. static int perf_output_begin(struct perf_output_handle *handle,
  1957. struct perf_counter *counter, unsigned int size,
  1958. int nmi, int sample)
  1959. {
  1960. struct perf_mmap_data *data;
  1961. unsigned int offset, head;
  1962. int have_lost;
  1963. struct {
  1964. struct perf_event_header header;
  1965. u64 id;
  1966. u64 lost;
  1967. } lost_event;
  1968. /*
  1969. * For inherited counters we send all the output towards the parent.
  1970. */
  1971. if (counter->parent)
  1972. counter = counter->parent;
  1973. rcu_read_lock();
  1974. data = rcu_dereference(counter->data);
  1975. if (!data)
  1976. goto out;
  1977. handle->data = data;
  1978. handle->counter = counter;
  1979. handle->nmi = nmi;
  1980. handle->sample = sample;
  1981. if (!data->nr_pages)
  1982. goto fail;
  1983. have_lost = atomic_read(&data->lost);
  1984. if (have_lost)
  1985. size += sizeof(lost_event);
  1986. perf_output_lock(handle);
  1987. do {
  1988. offset = head = atomic_long_read(&data->head);
  1989. head += size;
  1990. if (unlikely(!perf_output_space(data, offset, head)))
  1991. goto fail;
  1992. } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
  1993. handle->offset = offset;
  1994. handle->head = head;
  1995. if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
  1996. atomic_set(&data->wakeup, 1);
  1997. if (have_lost) {
  1998. lost_event.header.type = PERF_EVENT_LOST;
  1999. lost_event.header.misc = 0;
  2000. lost_event.header.size = sizeof(lost_event);
  2001. lost_event.id = counter->id;
  2002. lost_event.lost = atomic_xchg(&data->lost, 0);
  2003. perf_output_put(handle, lost_event);
  2004. }
  2005. return 0;
  2006. fail:
  2007. atomic_inc(&data->lost);
  2008. perf_output_unlock(handle);
  2009. out:
  2010. rcu_read_unlock();
  2011. return -ENOSPC;
  2012. }
  2013. static void perf_output_end(struct perf_output_handle *handle)
  2014. {
  2015. struct perf_counter *counter = handle->counter;
  2016. struct perf_mmap_data *data = handle->data;
  2017. int wakeup_events = counter->attr.wakeup_events;
  2018. if (handle->sample && wakeup_events) {
  2019. int events = atomic_inc_return(&data->events);
  2020. if (events >= wakeup_events) {
  2021. atomic_sub(wakeup_events, &data->events);
  2022. atomic_set(&data->wakeup, 1);
  2023. }
  2024. }
  2025. perf_output_unlock(handle);
  2026. rcu_read_unlock();
  2027. }
  2028. static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
  2029. {
  2030. /*
  2031. * only top level counters have the pid namespace they were created in
  2032. */
  2033. if (counter->parent)
  2034. counter = counter->parent;
  2035. return task_tgid_nr_ns(p, counter->ns);
  2036. }
  2037. static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
  2038. {
  2039. /*
  2040. * only top level counters have the pid namespace they were created in
  2041. */
  2042. if (counter->parent)
  2043. counter = counter->parent;
  2044. return task_pid_nr_ns(p, counter->ns);
  2045. }
  2046. static void perf_counter_output(struct perf_counter *counter, int nmi,
  2047. struct perf_sample_data *data)
  2048. {
  2049. int ret;
  2050. u64 sample_type = counter->attr.sample_type;
  2051. struct perf_output_handle handle;
  2052. struct perf_event_header header;
  2053. u64 ip;
  2054. struct {
  2055. u32 pid, tid;
  2056. } tid_entry;
  2057. struct {
  2058. u64 id;
  2059. u64 counter;
  2060. } group_entry;
  2061. struct perf_callchain_entry *callchain = NULL;
  2062. int callchain_size = 0;
  2063. u64 time;
  2064. struct {
  2065. u32 cpu, reserved;
  2066. } cpu_entry;
  2067. header.type = 0;
  2068. header.size = sizeof(header);
  2069. header.misc = PERF_EVENT_MISC_OVERFLOW;
  2070. header.misc |= perf_misc_flags(data->regs);
  2071. if (sample_type & PERF_SAMPLE_IP) {
  2072. ip = perf_instruction_pointer(data->regs);
  2073. header.type |= PERF_SAMPLE_IP;
  2074. header.size += sizeof(ip);
  2075. }
  2076. if (sample_type & PERF_SAMPLE_TID) {
  2077. /* namespace issues */
  2078. tid_entry.pid = perf_counter_pid(counter, current);
  2079. tid_entry.tid = perf_counter_tid(counter, current);
  2080. header.type |= PERF_SAMPLE_TID;
  2081. header.size += sizeof(tid_entry);
  2082. }
  2083. if (sample_type & PERF_SAMPLE_TIME) {
  2084. /*
  2085. * Maybe do better on x86 and provide cpu_clock_nmi()
  2086. */
  2087. time = sched_clock();
  2088. header.type |= PERF_SAMPLE_TIME;
  2089. header.size += sizeof(u64);
  2090. }
  2091. if (sample_type & PERF_SAMPLE_ADDR) {
  2092. header.type |= PERF_SAMPLE_ADDR;
  2093. header.size += sizeof(u64);
  2094. }
  2095. if (sample_type & PERF_SAMPLE_ID) {
  2096. header.type |= PERF_SAMPLE_ID;
  2097. header.size += sizeof(u64);
  2098. }
  2099. if (sample_type & PERF_SAMPLE_CPU) {
  2100. header.type |= PERF_SAMPLE_CPU;
  2101. header.size += sizeof(cpu_entry);
  2102. cpu_entry.cpu = raw_smp_processor_id();
  2103. }
  2104. if (sample_type & PERF_SAMPLE_PERIOD) {
  2105. header.type |= PERF_SAMPLE_PERIOD;
  2106. header.size += sizeof(u64);
  2107. }
  2108. if (sample_type & PERF_SAMPLE_GROUP) {
  2109. header.type |= PERF_SAMPLE_GROUP;
  2110. header.size += sizeof(u64) +
  2111. counter->nr_siblings * sizeof(group_entry);
  2112. }
  2113. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2114. callchain = perf_callchain(data->regs);
  2115. if (callchain) {
  2116. callchain_size = (1 + callchain->nr) * sizeof(u64);
  2117. header.type |= PERF_SAMPLE_CALLCHAIN;
  2118. header.size += callchain_size;
  2119. }
  2120. }
  2121. ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
  2122. if (ret)
  2123. return;
  2124. perf_output_put(&handle, header);
  2125. if (sample_type & PERF_SAMPLE_IP)
  2126. perf_output_put(&handle, ip);
  2127. if (sample_type & PERF_SAMPLE_TID)
  2128. perf_output_put(&handle, tid_entry);
  2129. if (sample_type & PERF_SAMPLE_TIME)
  2130. perf_output_put(&handle, time);
  2131. if (sample_type & PERF_SAMPLE_ADDR)
  2132. perf_output_put(&handle, data->addr);
  2133. if (sample_type & PERF_SAMPLE_ID)
  2134. perf_output_put(&handle, counter->id);
  2135. if (sample_type & PERF_SAMPLE_CPU)
  2136. perf_output_put(&handle, cpu_entry);
  2137. if (sample_type & PERF_SAMPLE_PERIOD)
  2138. perf_output_put(&handle, data->period);
  2139. /*
  2140. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
  2141. */
  2142. if (sample_type & PERF_SAMPLE_GROUP) {
  2143. struct perf_counter *leader, *sub;
  2144. u64 nr = counter->nr_siblings;
  2145. perf_output_put(&handle, nr);
  2146. leader = counter->group_leader;
  2147. list_for_each_entry(sub, &leader->sibling_list, list_entry) {
  2148. if (sub != counter)
  2149. sub->pmu->read(sub);
  2150. group_entry.id = sub->id;
  2151. group_entry.counter = atomic64_read(&sub->count);
  2152. perf_output_put(&handle, group_entry);
  2153. }
  2154. }
  2155. if (callchain)
  2156. perf_output_copy(&handle, callchain, callchain_size);
  2157. perf_output_end(&handle);
  2158. }
  2159. /*
  2160. * fork tracking
  2161. */
  2162. struct perf_fork_event {
  2163. struct task_struct *task;
  2164. struct {
  2165. struct perf_event_header header;
  2166. u32 pid;
  2167. u32 ppid;
  2168. } event;
  2169. };
  2170. static void perf_counter_fork_output(struct perf_counter *counter,
  2171. struct perf_fork_event *fork_event)
  2172. {
  2173. struct perf_output_handle handle;
  2174. int size = fork_event->event.header.size;
  2175. struct task_struct *task = fork_event->task;
  2176. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2177. if (ret)
  2178. return;
  2179. fork_event->event.pid = perf_counter_pid(counter, task);
  2180. fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
  2181. perf_output_put(&handle, fork_event->event);
  2182. perf_output_end(&handle);
  2183. }
  2184. static int perf_counter_fork_match(struct perf_counter *counter)
  2185. {
  2186. if (counter->attr.comm || counter->attr.mmap)
  2187. return 1;
  2188. return 0;
  2189. }
  2190. static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
  2191. struct perf_fork_event *fork_event)
  2192. {
  2193. struct perf_counter *counter;
  2194. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2195. return;
  2196. rcu_read_lock();
  2197. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2198. if (perf_counter_fork_match(counter))
  2199. perf_counter_fork_output(counter, fork_event);
  2200. }
  2201. rcu_read_unlock();
  2202. }
  2203. static void perf_counter_fork_event(struct perf_fork_event *fork_event)
  2204. {
  2205. struct perf_cpu_context *cpuctx;
  2206. struct perf_counter_context *ctx;
  2207. cpuctx = &get_cpu_var(perf_cpu_context);
  2208. perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
  2209. put_cpu_var(perf_cpu_context);
  2210. rcu_read_lock();
  2211. /*
  2212. * doesn't really matter which of the child contexts the
  2213. * events ends up in.
  2214. */
  2215. ctx = rcu_dereference(current->perf_counter_ctxp);
  2216. if (ctx)
  2217. perf_counter_fork_ctx(ctx, fork_event);
  2218. rcu_read_unlock();
  2219. }
  2220. void perf_counter_fork(struct task_struct *task)
  2221. {
  2222. struct perf_fork_event fork_event;
  2223. if (!atomic_read(&nr_comm_counters) &&
  2224. !atomic_read(&nr_mmap_counters))
  2225. return;
  2226. fork_event = (struct perf_fork_event){
  2227. .task = task,
  2228. .event = {
  2229. .header = {
  2230. .type = PERF_EVENT_FORK,
  2231. .size = sizeof(fork_event.event),
  2232. },
  2233. },
  2234. };
  2235. perf_counter_fork_event(&fork_event);
  2236. }
  2237. /*
  2238. * comm tracking
  2239. */
  2240. struct perf_comm_event {
  2241. struct task_struct *task;
  2242. char *comm;
  2243. int comm_size;
  2244. struct {
  2245. struct perf_event_header header;
  2246. u32 pid;
  2247. u32 tid;
  2248. } event;
  2249. };
  2250. static void perf_counter_comm_output(struct perf_counter *counter,
  2251. struct perf_comm_event *comm_event)
  2252. {
  2253. struct perf_output_handle handle;
  2254. int size = comm_event->event.header.size;
  2255. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2256. if (ret)
  2257. return;
  2258. comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
  2259. comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
  2260. perf_output_put(&handle, comm_event->event);
  2261. perf_output_copy(&handle, comm_event->comm,
  2262. comm_event->comm_size);
  2263. perf_output_end(&handle);
  2264. }
  2265. static int perf_counter_comm_match(struct perf_counter *counter)
  2266. {
  2267. if (counter->attr.comm)
  2268. return 1;
  2269. return 0;
  2270. }
  2271. static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
  2272. struct perf_comm_event *comm_event)
  2273. {
  2274. struct perf_counter *counter;
  2275. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2276. return;
  2277. rcu_read_lock();
  2278. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2279. if (perf_counter_comm_match(counter))
  2280. perf_counter_comm_output(counter, comm_event);
  2281. }
  2282. rcu_read_unlock();
  2283. }
  2284. static void perf_counter_comm_event(struct perf_comm_event *comm_event)
  2285. {
  2286. struct perf_cpu_context *cpuctx;
  2287. struct perf_counter_context *ctx;
  2288. unsigned int size;
  2289. char *comm = comm_event->task->comm;
  2290. size = ALIGN(strlen(comm)+1, sizeof(u64));
  2291. comm_event->comm = comm;
  2292. comm_event->comm_size = size;
  2293. comm_event->event.header.size = sizeof(comm_event->event) + size;
  2294. cpuctx = &get_cpu_var(perf_cpu_context);
  2295. perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
  2296. put_cpu_var(perf_cpu_context);
  2297. rcu_read_lock();
  2298. /*
  2299. * doesn't really matter which of the child contexts the
  2300. * events ends up in.
  2301. */
  2302. ctx = rcu_dereference(current->perf_counter_ctxp);
  2303. if (ctx)
  2304. perf_counter_comm_ctx(ctx, comm_event);
  2305. rcu_read_unlock();
  2306. }
  2307. void perf_counter_comm(struct task_struct *task)
  2308. {
  2309. struct perf_comm_event comm_event;
  2310. if (!atomic_read(&nr_comm_counters))
  2311. return;
  2312. comm_event = (struct perf_comm_event){
  2313. .task = task,
  2314. .event = {
  2315. .header = { .type = PERF_EVENT_COMM, },
  2316. },
  2317. };
  2318. perf_counter_comm_event(&comm_event);
  2319. }
  2320. /*
  2321. * mmap tracking
  2322. */
  2323. struct perf_mmap_event {
  2324. struct vm_area_struct *vma;
  2325. const char *file_name;
  2326. int file_size;
  2327. struct {
  2328. struct perf_event_header header;
  2329. u32 pid;
  2330. u32 tid;
  2331. u64 start;
  2332. u64 len;
  2333. u64 pgoff;
  2334. } event;
  2335. };
  2336. static void perf_counter_mmap_output(struct perf_counter *counter,
  2337. struct perf_mmap_event *mmap_event)
  2338. {
  2339. struct perf_output_handle handle;
  2340. int size = mmap_event->event.header.size;
  2341. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2342. if (ret)
  2343. return;
  2344. mmap_event->event.pid = perf_counter_pid(counter, current);
  2345. mmap_event->event.tid = perf_counter_tid(counter, current);
  2346. perf_output_put(&handle, mmap_event->event);
  2347. perf_output_copy(&handle, mmap_event->file_name,
  2348. mmap_event->file_size);
  2349. perf_output_end(&handle);
  2350. }
  2351. static int perf_counter_mmap_match(struct perf_counter *counter,
  2352. struct perf_mmap_event *mmap_event)
  2353. {
  2354. if (counter->attr.mmap)
  2355. return 1;
  2356. return 0;
  2357. }
  2358. static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
  2359. struct perf_mmap_event *mmap_event)
  2360. {
  2361. struct perf_counter *counter;
  2362. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2363. return;
  2364. rcu_read_lock();
  2365. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2366. if (perf_counter_mmap_match(counter, mmap_event))
  2367. perf_counter_mmap_output(counter, mmap_event);
  2368. }
  2369. rcu_read_unlock();
  2370. }
  2371. static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
  2372. {
  2373. struct perf_cpu_context *cpuctx;
  2374. struct perf_counter_context *ctx;
  2375. struct vm_area_struct *vma = mmap_event->vma;
  2376. struct file *file = vma->vm_file;
  2377. unsigned int size;
  2378. char tmp[16];
  2379. char *buf = NULL;
  2380. const char *name;
  2381. if (file) {
  2382. buf = kzalloc(PATH_MAX, GFP_KERNEL);
  2383. if (!buf) {
  2384. name = strncpy(tmp, "//enomem", sizeof(tmp));
  2385. goto got_name;
  2386. }
  2387. name = d_path(&file->f_path, buf, PATH_MAX);
  2388. if (IS_ERR(name)) {
  2389. name = strncpy(tmp, "//toolong", sizeof(tmp));
  2390. goto got_name;
  2391. }
  2392. } else {
  2393. name = arch_vma_name(mmap_event->vma);
  2394. if (name)
  2395. goto got_name;
  2396. if (!vma->vm_mm) {
  2397. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  2398. goto got_name;
  2399. }
  2400. name = strncpy(tmp, "//anon", sizeof(tmp));
  2401. goto got_name;
  2402. }
  2403. got_name:
  2404. size = ALIGN(strlen(name)+1, sizeof(u64));
  2405. mmap_event->file_name = name;
  2406. mmap_event->file_size = size;
  2407. mmap_event->event.header.size = sizeof(mmap_event->event) + size;
  2408. cpuctx = &get_cpu_var(perf_cpu_context);
  2409. perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
  2410. put_cpu_var(perf_cpu_context);
  2411. rcu_read_lock();
  2412. /*
  2413. * doesn't really matter which of the child contexts the
  2414. * events ends up in.
  2415. */
  2416. ctx = rcu_dereference(current->perf_counter_ctxp);
  2417. if (ctx)
  2418. perf_counter_mmap_ctx(ctx, mmap_event);
  2419. rcu_read_unlock();
  2420. kfree(buf);
  2421. }
  2422. void __perf_counter_mmap(struct vm_area_struct *vma)
  2423. {
  2424. struct perf_mmap_event mmap_event;
  2425. if (!atomic_read(&nr_mmap_counters))
  2426. return;
  2427. mmap_event = (struct perf_mmap_event){
  2428. .vma = vma,
  2429. .event = {
  2430. .header = { .type = PERF_EVENT_MMAP, },
  2431. .start = vma->vm_start,
  2432. .len = vma->vm_end - vma->vm_start,
  2433. .pgoff = vma->vm_pgoff,
  2434. },
  2435. };
  2436. perf_counter_mmap_event(&mmap_event);
  2437. }
  2438. /*
  2439. * Log sample_period changes so that analyzing tools can re-normalize the
  2440. * event flow.
  2441. */
  2442. struct freq_event {
  2443. struct perf_event_header header;
  2444. u64 time;
  2445. u64 id;
  2446. u64 period;
  2447. };
  2448. static void perf_log_period(struct perf_counter *counter, u64 period)
  2449. {
  2450. struct perf_output_handle handle;
  2451. struct freq_event event;
  2452. int ret;
  2453. if (counter->hw.sample_period == period)
  2454. return;
  2455. if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
  2456. return;
  2457. event = (struct freq_event) {
  2458. .header = {
  2459. .type = PERF_EVENT_PERIOD,
  2460. .misc = 0,
  2461. .size = sizeof(event),
  2462. },
  2463. .time = sched_clock(),
  2464. .id = counter->id,
  2465. .period = period,
  2466. };
  2467. ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
  2468. if (ret)
  2469. return;
  2470. perf_output_put(&handle, event);
  2471. perf_output_end(&handle);
  2472. }
  2473. /*
  2474. * IRQ throttle logging
  2475. */
  2476. static void perf_log_throttle(struct perf_counter *counter, int enable)
  2477. {
  2478. struct perf_output_handle handle;
  2479. int ret;
  2480. struct {
  2481. struct perf_event_header header;
  2482. u64 time;
  2483. u64 id;
  2484. } throttle_event = {
  2485. .header = {
  2486. .type = PERF_EVENT_THROTTLE + 1,
  2487. .misc = 0,
  2488. .size = sizeof(throttle_event),
  2489. },
  2490. .time = sched_clock(),
  2491. .id = counter->id,
  2492. };
  2493. ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
  2494. if (ret)
  2495. return;
  2496. perf_output_put(&handle, throttle_event);
  2497. perf_output_end(&handle);
  2498. }
  2499. /*
  2500. * Generic counter overflow handling, sampling.
  2501. */
  2502. int perf_counter_overflow(struct perf_counter *counter, int nmi,
  2503. struct perf_sample_data *data)
  2504. {
  2505. int events = atomic_read(&counter->event_limit);
  2506. int throttle = counter->pmu->unthrottle != NULL;
  2507. struct hw_perf_counter *hwc = &counter->hw;
  2508. int ret = 0;
  2509. if (!throttle) {
  2510. hwc->interrupts++;
  2511. } else {
  2512. if (hwc->interrupts != MAX_INTERRUPTS) {
  2513. hwc->interrupts++;
  2514. if (HZ * hwc->interrupts >
  2515. (u64)sysctl_perf_counter_sample_rate) {
  2516. hwc->interrupts = MAX_INTERRUPTS;
  2517. perf_log_throttle(counter, 0);
  2518. ret = 1;
  2519. }
  2520. } else {
  2521. /*
  2522. * Keep re-disabling counters even though on the previous
  2523. * pass we disabled it - just in case we raced with a
  2524. * sched-in and the counter got enabled again:
  2525. */
  2526. ret = 1;
  2527. }
  2528. }
  2529. if (counter->attr.freq) {
  2530. u64 now = sched_clock();
  2531. s64 delta = now - hwc->freq_stamp;
  2532. hwc->freq_stamp = now;
  2533. if (delta > 0 && delta < TICK_NSEC)
  2534. perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
  2535. }
  2536. /*
  2537. * XXX event_limit might not quite work as expected on inherited
  2538. * counters
  2539. */
  2540. counter->pending_kill = POLL_IN;
  2541. if (events && atomic_dec_and_test(&counter->event_limit)) {
  2542. ret = 1;
  2543. counter->pending_kill = POLL_HUP;
  2544. if (nmi) {
  2545. counter->pending_disable = 1;
  2546. perf_pending_queue(&counter->pending,
  2547. perf_pending_counter);
  2548. } else
  2549. perf_counter_disable(counter);
  2550. }
  2551. perf_counter_output(counter, nmi, data);
  2552. return ret;
  2553. }
  2554. /*
  2555. * Generic software counter infrastructure
  2556. */
  2557. static void perf_swcounter_update(struct perf_counter *counter)
  2558. {
  2559. struct hw_perf_counter *hwc = &counter->hw;
  2560. u64 prev, now;
  2561. s64 delta;
  2562. again:
  2563. prev = atomic64_read(&hwc->prev_count);
  2564. now = atomic64_read(&hwc->count);
  2565. if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
  2566. goto again;
  2567. delta = now - prev;
  2568. atomic64_add(delta, &counter->count);
  2569. atomic64_sub(delta, &hwc->period_left);
  2570. }
  2571. static void perf_swcounter_set_period(struct perf_counter *counter)
  2572. {
  2573. struct hw_perf_counter *hwc = &counter->hw;
  2574. s64 left = atomic64_read(&hwc->period_left);
  2575. s64 period = hwc->sample_period;
  2576. if (unlikely(left <= -period)) {
  2577. left = period;
  2578. atomic64_set(&hwc->period_left, left);
  2579. hwc->last_period = period;
  2580. }
  2581. if (unlikely(left <= 0)) {
  2582. left += period;
  2583. atomic64_add(period, &hwc->period_left);
  2584. hwc->last_period = period;
  2585. }
  2586. atomic64_set(&hwc->prev_count, -left);
  2587. atomic64_set(&hwc->count, -left);
  2588. }
  2589. static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
  2590. {
  2591. enum hrtimer_restart ret = HRTIMER_RESTART;
  2592. struct perf_sample_data data;
  2593. struct perf_counter *counter;
  2594. u64 period;
  2595. counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
  2596. counter->pmu->read(counter);
  2597. data.addr = 0;
  2598. data.regs = get_irq_regs();
  2599. /*
  2600. * In case we exclude kernel IPs or are somehow not in interrupt
  2601. * context, provide the next best thing, the user IP.
  2602. */
  2603. if ((counter->attr.exclude_kernel || !data.regs) &&
  2604. !counter->attr.exclude_user)
  2605. data.regs = task_pt_regs(current);
  2606. if (data.regs) {
  2607. if (perf_counter_overflow(counter, 0, &data))
  2608. ret = HRTIMER_NORESTART;
  2609. }
  2610. period = max_t(u64, 10000, counter->hw.sample_period);
  2611. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  2612. return ret;
  2613. }
  2614. static void perf_swcounter_overflow(struct perf_counter *counter,
  2615. int nmi, struct perf_sample_data *data)
  2616. {
  2617. data->period = counter->hw.last_period;
  2618. perf_swcounter_update(counter);
  2619. perf_swcounter_set_period(counter);
  2620. if (perf_counter_overflow(counter, nmi, data))
  2621. /* soft-disable the counter */
  2622. ;
  2623. }
  2624. static int perf_swcounter_is_counting(struct perf_counter *counter)
  2625. {
  2626. struct perf_counter_context *ctx;
  2627. unsigned long flags;
  2628. int count;
  2629. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  2630. return 1;
  2631. if (counter->state != PERF_COUNTER_STATE_INACTIVE)
  2632. return 0;
  2633. /*
  2634. * If the counter is inactive, it could be just because
  2635. * its task is scheduled out, or because it's in a group
  2636. * which could not go on the PMU. We want to count in
  2637. * the first case but not the second. If the context is
  2638. * currently active then an inactive software counter must
  2639. * be the second case. If it's not currently active then
  2640. * we need to know whether the counter was active when the
  2641. * context was last active, which we can determine by
  2642. * comparing counter->tstamp_stopped with ctx->time.
  2643. *
  2644. * We are within an RCU read-side critical section,
  2645. * which protects the existence of *ctx.
  2646. */
  2647. ctx = counter->ctx;
  2648. spin_lock_irqsave(&ctx->lock, flags);
  2649. count = 1;
  2650. /* Re-check state now we have the lock */
  2651. if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
  2652. counter->ctx->is_active ||
  2653. counter->tstamp_stopped < ctx->time)
  2654. count = 0;
  2655. spin_unlock_irqrestore(&ctx->lock, flags);
  2656. return count;
  2657. }
  2658. static int perf_swcounter_match(struct perf_counter *counter,
  2659. enum perf_type_id type,
  2660. u32 event, struct pt_regs *regs)
  2661. {
  2662. if (!perf_swcounter_is_counting(counter))
  2663. return 0;
  2664. if (counter->attr.type != type)
  2665. return 0;
  2666. if (counter->attr.config != event)
  2667. return 0;
  2668. if (regs) {
  2669. if (counter->attr.exclude_user && user_mode(regs))
  2670. return 0;
  2671. if (counter->attr.exclude_kernel && !user_mode(regs))
  2672. return 0;
  2673. }
  2674. return 1;
  2675. }
  2676. static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
  2677. int nmi, struct perf_sample_data *data)
  2678. {
  2679. int neg = atomic64_add_negative(nr, &counter->hw.count);
  2680. if (counter->hw.sample_period && !neg && data->regs)
  2681. perf_swcounter_overflow(counter, nmi, data);
  2682. }
  2683. static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
  2684. enum perf_type_id type,
  2685. u32 event, u64 nr, int nmi,
  2686. struct perf_sample_data *data)
  2687. {
  2688. struct perf_counter *counter;
  2689. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2690. return;
  2691. rcu_read_lock();
  2692. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2693. if (perf_swcounter_match(counter, type, event, data->regs))
  2694. perf_swcounter_add(counter, nr, nmi, data);
  2695. }
  2696. rcu_read_unlock();
  2697. }
  2698. static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
  2699. {
  2700. if (in_nmi())
  2701. return &cpuctx->recursion[3];
  2702. if (in_irq())
  2703. return &cpuctx->recursion[2];
  2704. if (in_softirq())
  2705. return &cpuctx->recursion[1];
  2706. return &cpuctx->recursion[0];
  2707. }
  2708. static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
  2709. u64 nr, int nmi,
  2710. struct perf_sample_data *data)
  2711. {
  2712. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  2713. int *recursion = perf_swcounter_recursion_context(cpuctx);
  2714. struct perf_counter_context *ctx;
  2715. if (*recursion)
  2716. goto out;
  2717. (*recursion)++;
  2718. barrier();
  2719. perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
  2720. nr, nmi, data);
  2721. rcu_read_lock();
  2722. /*
  2723. * doesn't really matter which of the child contexts the
  2724. * events ends up in.
  2725. */
  2726. ctx = rcu_dereference(current->perf_counter_ctxp);
  2727. if (ctx)
  2728. perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
  2729. rcu_read_unlock();
  2730. barrier();
  2731. (*recursion)--;
  2732. out:
  2733. put_cpu_var(perf_cpu_context);
  2734. }
  2735. void __perf_swcounter_event(u32 event, u64 nr, int nmi,
  2736. struct pt_regs *regs, u64 addr)
  2737. {
  2738. struct perf_sample_data data = {
  2739. .regs = regs,
  2740. .addr = addr,
  2741. };
  2742. do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
  2743. }
  2744. static void perf_swcounter_read(struct perf_counter *counter)
  2745. {
  2746. perf_swcounter_update(counter);
  2747. }
  2748. static int perf_swcounter_enable(struct perf_counter *counter)
  2749. {
  2750. perf_swcounter_set_period(counter);
  2751. return 0;
  2752. }
  2753. static void perf_swcounter_disable(struct perf_counter *counter)
  2754. {
  2755. perf_swcounter_update(counter);
  2756. }
  2757. static const struct pmu perf_ops_generic = {
  2758. .enable = perf_swcounter_enable,
  2759. .disable = perf_swcounter_disable,
  2760. .read = perf_swcounter_read,
  2761. };
  2762. /*
  2763. * Software counter: cpu wall time clock
  2764. */
  2765. static void cpu_clock_perf_counter_update(struct perf_counter *counter)
  2766. {
  2767. int cpu = raw_smp_processor_id();
  2768. s64 prev;
  2769. u64 now;
  2770. now = cpu_clock(cpu);
  2771. prev = atomic64_read(&counter->hw.prev_count);
  2772. atomic64_set(&counter->hw.prev_count, now);
  2773. atomic64_add(now - prev, &counter->count);
  2774. }
  2775. static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
  2776. {
  2777. struct hw_perf_counter *hwc = &counter->hw;
  2778. int cpu = raw_smp_processor_id();
  2779. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  2780. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2781. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2782. if (hwc->sample_period) {
  2783. u64 period = max_t(u64, 10000, hwc->sample_period);
  2784. __hrtimer_start_range_ns(&hwc->hrtimer,
  2785. ns_to_ktime(period), 0,
  2786. HRTIMER_MODE_REL, 0);
  2787. }
  2788. return 0;
  2789. }
  2790. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  2791. {
  2792. if (counter->hw.sample_period)
  2793. hrtimer_cancel(&counter->hw.hrtimer);
  2794. cpu_clock_perf_counter_update(counter);
  2795. }
  2796. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  2797. {
  2798. cpu_clock_perf_counter_update(counter);
  2799. }
  2800. static const struct pmu perf_ops_cpu_clock = {
  2801. .enable = cpu_clock_perf_counter_enable,
  2802. .disable = cpu_clock_perf_counter_disable,
  2803. .read = cpu_clock_perf_counter_read,
  2804. };
  2805. /*
  2806. * Software counter: task time clock
  2807. */
  2808. static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
  2809. {
  2810. u64 prev;
  2811. s64 delta;
  2812. prev = atomic64_xchg(&counter->hw.prev_count, now);
  2813. delta = now - prev;
  2814. atomic64_add(delta, &counter->count);
  2815. }
  2816. static int task_clock_perf_counter_enable(struct perf_counter *counter)
  2817. {
  2818. struct hw_perf_counter *hwc = &counter->hw;
  2819. u64 now;
  2820. now = counter->ctx->time;
  2821. atomic64_set(&hwc->prev_count, now);
  2822. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2823. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2824. if (hwc->sample_period) {
  2825. u64 period = max_t(u64, 10000, hwc->sample_period);
  2826. __hrtimer_start_range_ns(&hwc->hrtimer,
  2827. ns_to_ktime(period), 0,
  2828. HRTIMER_MODE_REL, 0);
  2829. }
  2830. return 0;
  2831. }
  2832. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  2833. {
  2834. if (counter->hw.sample_period)
  2835. hrtimer_cancel(&counter->hw.hrtimer);
  2836. task_clock_perf_counter_update(counter, counter->ctx->time);
  2837. }
  2838. static void task_clock_perf_counter_read(struct perf_counter *counter)
  2839. {
  2840. u64 time;
  2841. if (!in_nmi()) {
  2842. update_context_time(counter->ctx);
  2843. time = counter->ctx->time;
  2844. } else {
  2845. u64 now = perf_clock();
  2846. u64 delta = now - counter->ctx->timestamp;
  2847. time = counter->ctx->time + delta;
  2848. }
  2849. task_clock_perf_counter_update(counter, time);
  2850. }
  2851. static const struct pmu perf_ops_task_clock = {
  2852. .enable = task_clock_perf_counter_enable,
  2853. .disable = task_clock_perf_counter_disable,
  2854. .read = task_clock_perf_counter_read,
  2855. };
  2856. #ifdef CONFIG_EVENT_PROFILE
  2857. void perf_tpcounter_event(int event_id)
  2858. {
  2859. struct perf_sample_data data = {
  2860. .regs = get_irq_regs();
  2861. .addr = 0,
  2862. };
  2863. if (!data.regs)
  2864. data.regs = task_pt_regs(current);
  2865. do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
  2866. }
  2867. EXPORT_SYMBOL_GPL(perf_tpcounter_event);
  2868. extern int ftrace_profile_enable(int);
  2869. extern void ftrace_profile_disable(int);
  2870. static void tp_perf_counter_destroy(struct perf_counter *counter)
  2871. {
  2872. ftrace_profile_disable(perf_event_id(&counter->attr));
  2873. }
  2874. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2875. {
  2876. int event_id = perf_event_id(&counter->attr);
  2877. int ret;
  2878. ret = ftrace_profile_enable(event_id);
  2879. if (ret)
  2880. return NULL;
  2881. counter->destroy = tp_perf_counter_destroy;
  2882. return &perf_ops_generic;
  2883. }
  2884. #else
  2885. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2886. {
  2887. return NULL;
  2888. }
  2889. #endif
  2890. atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
  2891. static void sw_perf_counter_destroy(struct perf_counter *counter)
  2892. {
  2893. u64 event = counter->attr.config;
  2894. WARN_ON(counter->parent);
  2895. atomic_dec(&perf_swcounter_enabled[event]);
  2896. }
  2897. static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  2898. {
  2899. const struct pmu *pmu = NULL;
  2900. u64 event = counter->attr.config;
  2901. /*
  2902. * Software counters (currently) can't in general distinguish
  2903. * between user, kernel and hypervisor events.
  2904. * However, context switches and cpu migrations are considered
  2905. * to be kernel events, and page faults are never hypervisor
  2906. * events.
  2907. */
  2908. switch (event) {
  2909. case PERF_COUNT_SW_CPU_CLOCK:
  2910. pmu = &perf_ops_cpu_clock;
  2911. break;
  2912. case PERF_COUNT_SW_TASK_CLOCK:
  2913. /*
  2914. * If the user instantiates this as a per-cpu counter,
  2915. * use the cpu_clock counter instead.
  2916. */
  2917. if (counter->ctx->task)
  2918. pmu = &perf_ops_task_clock;
  2919. else
  2920. pmu = &perf_ops_cpu_clock;
  2921. break;
  2922. case PERF_COUNT_SW_PAGE_FAULTS:
  2923. case PERF_COUNT_SW_PAGE_FAULTS_MIN:
  2924. case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
  2925. case PERF_COUNT_SW_CONTEXT_SWITCHES:
  2926. case PERF_COUNT_SW_CPU_MIGRATIONS:
  2927. if (!counter->parent) {
  2928. atomic_inc(&perf_swcounter_enabled[event]);
  2929. counter->destroy = sw_perf_counter_destroy;
  2930. }
  2931. pmu = &perf_ops_generic;
  2932. break;
  2933. }
  2934. return pmu;
  2935. }
  2936. /*
  2937. * Allocate and initialize a counter structure
  2938. */
  2939. static struct perf_counter *
  2940. perf_counter_alloc(struct perf_counter_attr *attr,
  2941. int cpu,
  2942. struct perf_counter_context *ctx,
  2943. struct perf_counter *group_leader,
  2944. struct perf_counter *parent_counter,
  2945. gfp_t gfpflags)
  2946. {
  2947. const struct pmu *pmu;
  2948. struct perf_counter *counter;
  2949. struct hw_perf_counter *hwc;
  2950. long err;
  2951. counter = kzalloc(sizeof(*counter), gfpflags);
  2952. if (!counter)
  2953. return ERR_PTR(-ENOMEM);
  2954. /*
  2955. * Single counters are their own group leaders, with an
  2956. * empty sibling list:
  2957. */
  2958. if (!group_leader)
  2959. group_leader = counter;
  2960. mutex_init(&counter->child_mutex);
  2961. INIT_LIST_HEAD(&counter->child_list);
  2962. INIT_LIST_HEAD(&counter->list_entry);
  2963. INIT_LIST_HEAD(&counter->event_entry);
  2964. INIT_LIST_HEAD(&counter->sibling_list);
  2965. init_waitqueue_head(&counter->waitq);
  2966. mutex_init(&counter->mmap_mutex);
  2967. counter->cpu = cpu;
  2968. counter->attr = *attr;
  2969. counter->group_leader = group_leader;
  2970. counter->pmu = NULL;
  2971. counter->ctx = ctx;
  2972. counter->oncpu = -1;
  2973. counter->parent = parent_counter;
  2974. counter->ns = get_pid_ns(current->nsproxy->pid_ns);
  2975. counter->id = atomic64_inc_return(&perf_counter_id);
  2976. counter->state = PERF_COUNTER_STATE_INACTIVE;
  2977. if (attr->disabled)
  2978. counter->state = PERF_COUNTER_STATE_OFF;
  2979. pmu = NULL;
  2980. hwc = &counter->hw;
  2981. hwc->sample_period = attr->sample_period;
  2982. if (attr->freq && attr->sample_freq)
  2983. hwc->sample_period = 1;
  2984. atomic64_set(&hwc->period_left, hwc->sample_period);
  2985. /*
  2986. * we currently do not support PERF_SAMPLE_GROUP on inherited counters
  2987. */
  2988. if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
  2989. goto done;
  2990. switch (attr->type) {
  2991. case PERF_TYPE_RAW:
  2992. case PERF_TYPE_HARDWARE:
  2993. case PERF_TYPE_HW_CACHE:
  2994. pmu = hw_perf_counter_init(counter);
  2995. break;
  2996. case PERF_TYPE_SOFTWARE:
  2997. pmu = sw_perf_counter_init(counter);
  2998. break;
  2999. case PERF_TYPE_TRACEPOINT:
  3000. pmu = tp_perf_counter_init(counter);
  3001. break;
  3002. default:
  3003. break;
  3004. }
  3005. done:
  3006. err = 0;
  3007. if (!pmu)
  3008. err = -EINVAL;
  3009. else if (IS_ERR(pmu))
  3010. err = PTR_ERR(pmu);
  3011. if (err) {
  3012. if (counter->ns)
  3013. put_pid_ns(counter->ns);
  3014. kfree(counter);
  3015. return ERR_PTR(err);
  3016. }
  3017. counter->pmu = pmu;
  3018. if (!counter->parent) {
  3019. atomic_inc(&nr_counters);
  3020. if (counter->attr.mmap)
  3021. atomic_inc(&nr_mmap_counters);
  3022. if (counter->attr.comm)
  3023. atomic_inc(&nr_comm_counters);
  3024. }
  3025. return counter;
  3026. }
  3027. static int perf_copy_attr(struct perf_counter_attr __user *uattr,
  3028. struct perf_counter_attr *attr)
  3029. {
  3030. int ret;
  3031. u32 size;
  3032. if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
  3033. return -EFAULT;
  3034. /*
  3035. * zero the full structure, so that a short copy will be nice.
  3036. */
  3037. memset(attr, 0, sizeof(*attr));
  3038. ret = get_user(size, &uattr->size);
  3039. if (ret)
  3040. return ret;
  3041. if (size > PAGE_SIZE) /* silly large */
  3042. goto err_size;
  3043. if (!size) /* abi compat */
  3044. size = PERF_ATTR_SIZE_VER0;
  3045. if (size < PERF_ATTR_SIZE_VER0)
  3046. goto err_size;
  3047. /*
  3048. * If we're handed a bigger struct than we know of,
  3049. * ensure all the unknown bits are 0.
  3050. */
  3051. if (size > sizeof(*attr)) {
  3052. unsigned long val;
  3053. unsigned long __user *addr;
  3054. unsigned long __user *end;
  3055. addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
  3056. sizeof(unsigned long));
  3057. end = PTR_ALIGN((void __user *)uattr + size,
  3058. sizeof(unsigned long));
  3059. for (; addr < end; addr += sizeof(unsigned long)) {
  3060. ret = get_user(val, addr);
  3061. if (ret)
  3062. return ret;
  3063. if (val)
  3064. goto err_size;
  3065. }
  3066. }
  3067. ret = copy_from_user(attr, uattr, size);
  3068. if (ret)
  3069. return -EFAULT;
  3070. /*
  3071. * If the type exists, the corresponding creation will verify
  3072. * the attr->config.
  3073. */
  3074. if (attr->type >= PERF_TYPE_MAX)
  3075. return -EINVAL;
  3076. if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
  3077. return -EINVAL;
  3078. if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
  3079. return -EINVAL;
  3080. if (attr->read_format & ~(PERF_FORMAT_MAX-1))
  3081. return -EINVAL;
  3082. out:
  3083. return ret;
  3084. err_size:
  3085. put_user(sizeof(*attr), &uattr->size);
  3086. ret = -E2BIG;
  3087. goto out;
  3088. }
  3089. /**
  3090. * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  3091. *
  3092. * @attr_uptr: event type attributes for monitoring/sampling
  3093. * @pid: target pid
  3094. * @cpu: target cpu
  3095. * @group_fd: group leader counter fd
  3096. */
  3097. SYSCALL_DEFINE5(perf_counter_open,
  3098. struct perf_counter_attr __user *, attr_uptr,
  3099. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  3100. {
  3101. struct perf_counter *counter, *group_leader;
  3102. struct perf_counter_attr attr;
  3103. struct perf_counter_context *ctx;
  3104. struct file *counter_file = NULL;
  3105. struct file *group_file = NULL;
  3106. int fput_needed = 0;
  3107. int fput_needed2 = 0;
  3108. int ret;
  3109. /* for future expandability... */
  3110. if (flags)
  3111. return -EINVAL;
  3112. ret = perf_copy_attr(attr_uptr, &attr);
  3113. if (ret)
  3114. return ret;
  3115. if (!attr.exclude_kernel) {
  3116. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  3117. return -EACCES;
  3118. }
  3119. if (attr.freq) {
  3120. if (attr.sample_freq > sysctl_perf_counter_sample_rate)
  3121. return -EINVAL;
  3122. }
  3123. /*
  3124. * Get the target context (task or percpu):
  3125. */
  3126. ctx = find_get_context(pid, cpu);
  3127. if (IS_ERR(ctx))
  3128. return PTR_ERR(ctx);
  3129. /*
  3130. * Look up the group leader (we will attach this counter to it):
  3131. */
  3132. group_leader = NULL;
  3133. if (group_fd != -1) {
  3134. ret = -EINVAL;
  3135. group_file = fget_light(group_fd, &fput_needed);
  3136. if (!group_file)
  3137. goto err_put_context;
  3138. if (group_file->f_op != &perf_fops)
  3139. goto err_put_context;
  3140. group_leader = group_file->private_data;
  3141. /*
  3142. * Do not allow a recursive hierarchy (this new sibling
  3143. * becoming part of another group-sibling):
  3144. */
  3145. if (group_leader->group_leader != group_leader)
  3146. goto err_put_context;
  3147. /*
  3148. * Do not allow to attach to a group in a different
  3149. * task or CPU context:
  3150. */
  3151. if (group_leader->ctx != ctx)
  3152. goto err_put_context;
  3153. /*
  3154. * Only a group leader can be exclusive or pinned
  3155. */
  3156. if (attr.exclusive || attr.pinned)
  3157. goto err_put_context;
  3158. }
  3159. counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
  3160. NULL, GFP_KERNEL);
  3161. ret = PTR_ERR(counter);
  3162. if (IS_ERR(counter))
  3163. goto err_put_context;
  3164. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  3165. if (ret < 0)
  3166. goto err_free_put_context;
  3167. counter_file = fget_light(ret, &fput_needed2);
  3168. if (!counter_file)
  3169. goto err_free_put_context;
  3170. counter->filp = counter_file;
  3171. WARN_ON_ONCE(ctx->parent_ctx);
  3172. mutex_lock(&ctx->mutex);
  3173. perf_install_in_context(ctx, counter, cpu);
  3174. ++ctx->generation;
  3175. mutex_unlock(&ctx->mutex);
  3176. counter->owner = current;
  3177. get_task_struct(current);
  3178. mutex_lock(&current->perf_counter_mutex);
  3179. list_add_tail(&counter->owner_entry, &current->perf_counter_list);
  3180. mutex_unlock(&current->perf_counter_mutex);
  3181. fput_light(counter_file, fput_needed2);
  3182. out_fput:
  3183. fput_light(group_file, fput_needed);
  3184. return ret;
  3185. err_free_put_context:
  3186. kfree(counter);
  3187. err_put_context:
  3188. put_ctx(ctx);
  3189. goto out_fput;
  3190. }
  3191. /*
  3192. * inherit a counter from parent task to child task:
  3193. */
  3194. static struct perf_counter *
  3195. inherit_counter(struct perf_counter *parent_counter,
  3196. struct task_struct *parent,
  3197. struct perf_counter_context *parent_ctx,
  3198. struct task_struct *child,
  3199. struct perf_counter *group_leader,
  3200. struct perf_counter_context *child_ctx)
  3201. {
  3202. struct perf_counter *child_counter;
  3203. /*
  3204. * Instead of creating recursive hierarchies of counters,
  3205. * we link inherited counters back to the original parent,
  3206. * which has a filp for sure, which we use as the reference
  3207. * count:
  3208. */
  3209. if (parent_counter->parent)
  3210. parent_counter = parent_counter->parent;
  3211. child_counter = perf_counter_alloc(&parent_counter->attr,
  3212. parent_counter->cpu, child_ctx,
  3213. group_leader, parent_counter,
  3214. GFP_KERNEL);
  3215. if (IS_ERR(child_counter))
  3216. return child_counter;
  3217. get_ctx(child_ctx);
  3218. /*
  3219. * Make the child state follow the state of the parent counter,
  3220. * not its attr.disabled bit. We hold the parent's mutex,
  3221. * so we won't race with perf_counter_{en, dis}able_family.
  3222. */
  3223. if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
  3224. child_counter->state = PERF_COUNTER_STATE_INACTIVE;
  3225. else
  3226. child_counter->state = PERF_COUNTER_STATE_OFF;
  3227. if (parent_counter->attr.freq)
  3228. child_counter->hw.sample_period = parent_counter->hw.sample_period;
  3229. /*
  3230. * Link it up in the child's context:
  3231. */
  3232. add_counter_to_ctx(child_counter, child_ctx);
  3233. /*
  3234. * Get a reference to the parent filp - we will fput it
  3235. * when the child counter exits. This is safe to do because
  3236. * we are in the parent and we know that the filp still
  3237. * exists and has a nonzero count:
  3238. */
  3239. atomic_long_inc(&parent_counter->filp->f_count);
  3240. /*
  3241. * Link this into the parent counter's child list
  3242. */
  3243. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3244. mutex_lock(&parent_counter->child_mutex);
  3245. list_add_tail(&child_counter->child_list, &parent_counter->child_list);
  3246. mutex_unlock(&parent_counter->child_mutex);
  3247. return child_counter;
  3248. }
  3249. static int inherit_group(struct perf_counter *parent_counter,
  3250. struct task_struct *parent,
  3251. struct perf_counter_context *parent_ctx,
  3252. struct task_struct *child,
  3253. struct perf_counter_context *child_ctx)
  3254. {
  3255. struct perf_counter *leader;
  3256. struct perf_counter *sub;
  3257. struct perf_counter *child_ctr;
  3258. leader = inherit_counter(parent_counter, parent, parent_ctx,
  3259. child, NULL, child_ctx);
  3260. if (IS_ERR(leader))
  3261. return PTR_ERR(leader);
  3262. list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
  3263. child_ctr = inherit_counter(sub, parent, parent_ctx,
  3264. child, leader, child_ctx);
  3265. if (IS_ERR(child_ctr))
  3266. return PTR_ERR(child_ctr);
  3267. }
  3268. return 0;
  3269. }
  3270. static void sync_child_counter(struct perf_counter *child_counter,
  3271. struct perf_counter *parent_counter)
  3272. {
  3273. u64 child_val;
  3274. child_val = atomic64_read(&child_counter->count);
  3275. /*
  3276. * Add back the child's count to the parent's count:
  3277. */
  3278. atomic64_add(child_val, &parent_counter->count);
  3279. atomic64_add(child_counter->total_time_enabled,
  3280. &parent_counter->child_total_time_enabled);
  3281. atomic64_add(child_counter->total_time_running,
  3282. &parent_counter->child_total_time_running);
  3283. /*
  3284. * Remove this counter from the parent's list
  3285. */
  3286. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3287. mutex_lock(&parent_counter->child_mutex);
  3288. list_del_init(&child_counter->child_list);
  3289. mutex_unlock(&parent_counter->child_mutex);
  3290. /*
  3291. * Release the parent counter, if this was the last
  3292. * reference to it.
  3293. */
  3294. fput(parent_counter->filp);
  3295. }
  3296. static void
  3297. __perf_counter_exit_task(struct perf_counter *child_counter,
  3298. struct perf_counter_context *child_ctx)
  3299. {
  3300. struct perf_counter *parent_counter;
  3301. update_counter_times(child_counter);
  3302. perf_counter_remove_from_context(child_counter);
  3303. parent_counter = child_counter->parent;
  3304. /*
  3305. * It can happen that parent exits first, and has counters
  3306. * that are still around due to the child reference. These
  3307. * counters need to be zapped - but otherwise linger.
  3308. */
  3309. if (parent_counter) {
  3310. sync_child_counter(child_counter, parent_counter);
  3311. free_counter(child_counter);
  3312. }
  3313. }
  3314. /*
  3315. * When a child task exits, feed back counter values to parent counters.
  3316. */
  3317. void perf_counter_exit_task(struct task_struct *child)
  3318. {
  3319. struct perf_counter *child_counter, *tmp;
  3320. struct perf_counter_context *child_ctx;
  3321. unsigned long flags;
  3322. if (likely(!child->perf_counter_ctxp))
  3323. return;
  3324. local_irq_save(flags);
  3325. /*
  3326. * We can't reschedule here because interrupts are disabled,
  3327. * and either child is current or it is a task that can't be
  3328. * scheduled, so we are now safe from rescheduling changing
  3329. * our context.
  3330. */
  3331. child_ctx = child->perf_counter_ctxp;
  3332. __perf_counter_task_sched_out(child_ctx);
  3333. /*
  3334. * Take the context lock here so that if find_get_context is
  3335. * reading child->perf_counter_ctxp, we wait until it has
  3336. * incremented the context's refcount before we do put_ctx below.
  3337. */
  3338. spin_lock(&child_ctx->lock);
  3339. child->perf_counter_ctxp = NULL;
  3340. if (child_ctx->parent_ctx) {
  3341. /*
  3342. * This context is a clone; unclone it so it can't get
  3343. * swapped to another process while we're removing all
  3344. * the counters from it.
  3345. */
  3346. put_ctx(child_ctx->parent_ctx);
  3347. child_ctx->parent_ctx = NULL;
  3348. }
  3349. spin_unlock(&child_ctx->lock);
  3350. local_irq_restore(flags);
  3351. /*
  3352. * We can recurse on the same lock type through:
  3353. *
  3354. * __perf_counter_exit_task()
  3355. * sync_child_counter()
  3356. * fput(parent_counter->filp)
  3357. * perf_release()
  3358. * mutex_lock(&ctx->mutex)
  3359. *
  3360. * But since its the parent context it won't be the same instance.
  3361. */
  3362. mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
  3363. again:
  3364. list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
  3365. list_entry)
  3366. __perf_counter_exit_task(child_counter, child_ctx);
  3367. /*
  3368. * If the last counter was a group counter, it will have appended all
  3369. * its siblings to the list, but we obtained 'tmp' before that which
  3370. * will still point to the list head terminating the iteration.
  3371. */
  3372. if (!list_empty(&child_ctx->counter_list))
  3373. goto again;
  3374. mutex_unlock(&child_ctx->mutex);
  3375. put_ctx(child_ctx);
  3376. }
  3377. /*
  3378. * free an unexposed, unused context as created by inheritance by
  3379. * init_task below, used by fork() in case of fail.
  3380. */
  3381. void perf_counter_free_task(struct task_struct *task)
  3382. {
  3383. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  3384. struct perf_counter *counter, *tmp;
  3385. if (!ctx)
  3386. return;
  3387. mutex_lock(&ctx->mutex);
  3388. again:
  3389. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
  3390. struct perf_counter *parent = counter->parent;
  3391. if (WARN_ON_ONCE(!parent))
  3392. continue;
  3393. mutex_lock(&parent->child_mutex);
  3394. list_del_init(&counter->child_list);
  3395. mutex_unlock(&parent->child_mutex);
  3396. fput(parent->filp);
  3397. list_del_counter(counter, ctx);
  3398. free_counter(counter);
  3399. }
  3400. if (!list_empty(&ctx->counter_list))
  3401. goto again;
  3402. mutex_unlock(&ctx->mutex);
  3403. put_ctx(ctx);
  3404. }
  3405. /*
  3406. * Initialize the perf_counter context in task_struct
  3407. */
  3408. int perf_counter_init_task(struct task_struct *child)
  3409. {
  3410. struct perf_counter_context *child_ctx, *parent_ctx;
  3411. struct perf_counter_context *cloned_ctx;
  3412. struct perf_counter *counter;
  3413. struct task_struct *parent = current;
  3414. int inherited_all = 1;
  3415. int ret = 0;
  3416. child->perf_counter_ctxp = NULL;
  3417. mutex_init(&child->perf_counter_mutex);
  3418. INIT_LIST_HEAD(&child->perf_counter_list);
  3419. if (likely(!parent->perf_counter_ctxp))
  3420. return 0;
  3421. /*
  3422. * This is executed from the parent task context, so inherit
  3423. * counters that have been marked for cloning.
  3424. * First allocate and initialize a context for the child.
  3425. */
  3426. child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  3427. if (!child_ctx)
  3428. return -ENOMEM;
  3429. __perf_counter_init_context(child_ctx, child);
  3430. child->perf_counter_ctxp = child_ctx;
  3431. get_task_struct(child);
  3432. /*
  3433. * If the parent's context is a clone, pin it so it won't get
  3434. * swapped under us.
  3435. */
  3436. parent_ctx = perf_pin_task_context(parent);
  3437. /*
  3438. * No need to check if parent_ctx != NULL here; since we saw
  3439. * it non-NULL earlier, the only reason for it to become NULL
  3440. * is if we exit, and since we're currently in the middle of
  3441. * a fork we can't be exiting at the same time.
  3442. */
  3443. /*
  3444. * Lock the parent list. No need to lock the child - not PID
  3445. * hashed yet and not running, so nobody can access it.
  3446. */
  3447. mutex_lock(&parent_ctx->mutex);
  3448. /*
  3449. * We dont have to disable NMIs - we are only looking at
  3450. * the list, not manipulating it:
  3451. */
  3452. list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
  3453. if (counter != counter->group_leader)
  3454. continue;
  3455. if (!counter->attr.inherit) {
  3456. inherited_all = 0;
  3457. continue;
  3458. }
  3459. ret = inherit_group(counter, parent, parent_ctx,
  3460. child, child_ctx);
  3461. if (ret) {
  3462. inherited_all = 0;
  3463. break;
  3464. }
  3465. }
  3466. if (inherited_all) {
  3467. /*
  3468. * Mark the child context as a clone of the parent
  3469. * context, or of whatever the parent is a clone of.
  3470. * Note that if the parent is a clone, it could get
  3471. * uncloned at any point, but that doesn't matter
  3472. * because the list of counters and the generation
  3473. * count can't have changed since we took the mutex.
  3474. */
  3475. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  3476. if (cloned_ctx) {
  3477. child_ctx->parent_ctx = cloned_ctx;
  3478. child_ctx->parent_gen = parent_ctx->parent_gen;
  3479. } else {
  3480. child_ctx->parent_ctx = parent_ctx;
  3481. child_ctx->parent_gen = parent_ctx->generation;
  3482. }
  3483. get_ctx(child_ctx->parent_ctx);
  3484. }
  3485. mutex_unlock(&parent_ctx->mutex);
  3486. perf_unpin_context(parent_ctx);
  3487. return ret;
  3488. }
  3489. static void __cpuinit perf_counter_init_cpu(int cpu)
  3490. {
  3491. struct perf_cpu_context *cpuctx;
  3492. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3493. __perf_counter_init_context(&cpuctx->ctx, NULL);
  3494. spin_lock(&perf_resource_lock);
  3495. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  3496. spin_unlock(&perf_resource_lock);
  3497. hw_perf_counter_setup(cpu);
  3498. }
  3499. #ifdef CONFIG_HOTPLUG_CPU
  3500. static void __perf_counter_exit_cpu(void *info)
  3501. {
  3502. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  3503. struct perf_counter_context *ctx = &cpuctx->ctx;
  3504. struct perf_counter *counter, *tmp;
  3505. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  3506. __perf_counter_remove_from_context(counter);
  3507. }
  3508. static void perf_counter_exit_cpu(int cpu)
  3509. {
  3510. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  3511. struct perf_counter_context *ctx = &cpuctx->ctx;
  3512. mutex_lock(&ctx->mutex);
  3513. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  3514. mutex_unlock(&ctx->mutex);
  3515. }
  3516. #else
  3517. static inline void perf_counter_exit_cpu(int cpu) { }
  3518. #endif
  3519. static int __cpuinit
  3520. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  3521. {
  3522. unsigned int cpu = (long)hcpu;
  3523. switch (action) {
  3524. case CPU_UP_PREPARE:
  3525. case CPU_UP_PREPARE_FROZEN:
  3526. perf_counter_init_cpu(cpu);
  3527. break;
  3528. case CPU_DOWN_PREPARE:
  3529. case CPU_DOWN_PREPARE_FROZEN:
  3530. perf_counter_exit_cpu(cpu);
  3531. break;
  3532. default:
  3533. break;
  3534. }
  3535. return NOTIFY_OK;
  3536. }
  3537. /*
  3538. * This has to have a higher priority than migration_notifier in sched.c.
  3539. */
  3540. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  3541. .notifier_call = perf_cpu_notify,
  3542. .priority = 20,
  3543. };
  3544. void __init perf_counter_init(void)
  3545. {
  3546. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  3547. (void *)(long)smp_processor_id());
  3548. register_cpu_notifier(&perf_cpu_nb);
  3549. }
  3550. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  3551. {
  3552. return sprintf(buf, "%d\n", perf_reserved_percpu);
  3553. }
  3554. static ssize_t
  3555. perf_set_reserve_percpu(struct sysdev_class *class,
  3556. const char *buf,
  3557. size_t count)
  3558. {
  3559. struct perf_cpu_context *cpuctx;
  3560. unsigned long val;
  3561. int err, cpu, mpt;
  3562. err = strict_strtoul(buf, 10, &val);
  3563. if (err)
  3564. return err;
  3565. if (val > perf_max_counters)
  3566. return -EINVAL;
  3567. spin_lock(&perf_resource_lock);
  3568. perf_reserved_percpu = val;
  3569. for_each_online_cpu(cpu) {
  3570. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3571. spin_lock_irq(&cpuctx->ctx.lock);
  3572. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  3573. perf_max_counters - perf_reserved_percpu);
  3574. cpuctx->max_pertask = mpt;
  3575. spin_unlock_irq(&cpuctx->ctx.lock);
  3576. }
  3577. spin_unlock(&perf_resource_lock);
  3578. return count;
  3579. }
  3580. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  3581. {
  3582. return sprintf(buf, "%d\n", perf_overcommit);
  3583. }
  3584. static ssize_t
  3585. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  3586. {
  3587. unsigned long val;
  3588. int err;
  3589. err = strict_strtoul(buf, 10, &val);
  3590. if (err)
  3591. return err;
  3592. if (val > 1)
  3593. return -EINVAL;
  3594. spin_lock(&perf_resource_lock);
  3595. perf_overcommit = val;
  3596. spin_unlock(&perf_resource_lock);
  3597. return count;
  3598. }
  3599. static SYSDEV_CLASS_ATTR(
  3600. reserve_percpu,
  3601. 0644,
  3602. perf_show_reserve_percpu,
  3603. perf_set_reserve_percpu
  3604. );
  3605. static SYSDEV_CLASS_ATTR(
  3606. overcommit,
  3607. 0644,
  3608. perf_show_overcommit,
  3609. perf_set_overcommit
  3610. );
  3611. static struct attribute *perfclass_attrs[] = {
  3612. &attr_reserve_percpu.attr,
  3613. &attr_overcommit.attr,
  3614. NULL
  3615. };
  3616. static struct attribute_group perfclass_attr_group = {
  3617. .attrs = perfclass_attrs,
  3618. .name = "perf_counters",
  3619. };
  3620. static int __init perf_counter_sysfs_init(void)
  3621. {
  3622. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  3623. &perfclass_attr_group);
  3624. }
  3625. device_initcall(perf_counter_sysfs_init);