perf_counter.c 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/dcache.h>
  19. #include <linux/percpu.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/rculist.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/anon_inodes.h>
  27. #include <linux/kernel_stat.h>
  28. #include <linux/perf_counter.h>
  29. #include <asm/irq_regs.h>
  30. /*
  31. * Each CPU has a list of per CPU counters:
  32. */
  33. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  34. int perf_max_counters __read_mostly = 1;
  35. static int perf_reserved_percpu __read_mostly;
  36. static int perf_overcommit __read_mostly = 1;
  37. static atomic_t nr_counters __read_mostly;
  38. static atomic_t nr_mmap_counters __read_mostly;
  39. static atomic_t nr_comm_counters __read_mostly;
  40. int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
  41. int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
  42. int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
  43. static atomic64_t perf_counter_id;
  44. /*
  45. * Lock for (sysadmin-configurable) counter reservations:
  46. */
  47. static DEFINE_SPINLOCK(perf_resource_lock);
  48. /*
  49. * Architecture provided APIs - weak aliases:
  50. */
  51. extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  52. {
  53. return NULL;
  54. }
  55. void __weak hw_perf_disable(void) { barrier(); }
  56. void __weak hw_perf_enable(void) { barrier(); }
  57. void __weak hw_perf_counter_setup(int cpu) { barrier(); }
  58. int __weak
  59. hw_perf_group_sched_in(struct perf_counter *group_leader,
  60. struct perf_cpu_context *cpuctx,
  61. struct perf_counter_context *ctx, int cpu)
  62. {
  63. return 0;
  64. }
  65. void __weak perf_counter_print_debug(void) { }
  66. static DEFINE_PER_CPU(int, disable_count);
  67. void __perf_disable(void)
  68. {
  69. __get_cpu_var(disable_count)++;
  70. }
  71. bool __perf_enable(void)
  72. {
  73. return !--__get_cpu_var(disable_count);
  74. }
  75. void perf_disable(void)
  76. {
  77. __perf_disable();
  78. hw_perf_disable();
  79. }
  80. void perf_enable(void)
  81. {
  82. if (__perf_enable())
  83. hw_perf_enable();
  84. }
  85. static void get_ctx(struct perf_counter_context *ctx)
  86. {
  87. atomic_inc(&ctx->refcount);
  88. }
  89. static void free_ctx(struct rcu_head *head)
  90. {
  91. struct perf_counter_context *ctx;
  92. ctx = container_of(head, struct perf_counter_context, rcu_head);
  93. kfree(ctx);
  94. }
  95. static void put_ctx(struct perf_counter_context *ctx)
  96. {
  97. if (atomic_dec_and_test(&ctx->refcount)) {
  98. if (ctx->parent_ctx)
  99. put_ctx(ctx->parent_ctx);
  100. if (ctx->task)
  101. put_task_struct(ctx->task);
  102. call_rcu(&ctx->rcu_head, free_ctx);
  103. }
  104. }
  105. /*
  106. * Get the perf_counter_context for a task and lock it.
  107. * This has to cope with with the fact that until it is locked,
  108. * the context could get moved to another task.
  109. */
  110. static struct perf_counter_context *
  111. perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  112. {
  113. struct perf_counter_context *ctx;
  114. rcu_read_lock();
  115. retry:
  116. ctx = rcu_dereference(task->perf_counter_ctxp);
  117. if (ctx) {
  118. /*
  119. * If this context is a clone of another, it might
  120. * get swapped for another underneath us by
  121. * perf_counter_task_sched_out, though the
  122. * rcu_read_lock() protects us from any context
  123. * getting freed. Lock the context and check if it
  124. * got swapped before we could get the lock, and retry
  125. * if so. If we locked the right context, then it
  126. * can't get swapped on us any more.
  127. */
  128. spin_lock_irqsave(&ctx->lock, *flags);
  129. if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
  130. spin_unlock_irqrestore(&ctx->lock, *flags);
  131. goto retry;
  132. }
  133. }
  134. rcu_read_unlock();
  135. return ctx;
  136. }
  137. /*
  138. * Get the context for a task and increment its pin_count so it
  139. * can't get swapped to another task. This also increments its
  140. * reference count so that the context can't get freed.
  141. */
  142. static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
  143. {
  144. struct perf_counter_context *ctx;
  145. unsigned long flags;
  146. ctx = perf_lock_task_context(task, &flags);
  147. if (ctx) {
  148. ++ctx->pin_count;
  149. get_ctx(ctx);
  150. spin_unlock_irqrestore(&ctx->lock, flags);
  151. }
  152. return ctx;
  153. }
  154. static void perf_unpin_context(struct perf_counter_context *ctx)
  155. {
  156. unsigned long flags;
  157. spin_lock_irqsave(&ctx->lock, flags);
  158. --ctx->pin_count;
  159. spin_unlock_irqrestore(&ctx->lock, flags);
  160. put_ctx(ctx);
  161. }
  162. /*
  163. * Add a counter from the lists for its context.
  164. * Must be called with ctx->mutex and ctx->lock held.
  165. */
  166. static void
  167. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  168. {
  169. struct perf_counter *group_leader = counter->group_leader;
  170. /*
  171. * Depending on whether it is a standalone or sibling counter,
  172. * add it straight to the context's counter list, or to the group
  173. * leader's sibling list:
  174. */
  175. if (group_leader == counter)
  176. list_add_tail(&counter->list_entry, &ctx->counter_list);
  177. else {
  178. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  179. group_leader->nr_siblings++;
  180. }
  181. list_add_rcu(&counter->event_entry, &ctx->event_list);
  182. ctx->nr_counters++;
  183. }
  184. /*
  185. * Remove a counter from the lists for its context.
  186. * Must be called with ctx->mutex and ctx->lock held.
  187. */
  188. static void
  189. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  190. {
  191. struct perf_counter *sibling, *tmp;
  192. if (list_empty(&counter->list_entry))
  193. return;
  194. ctx->nr_counters--;
  195. list_del_init(&counter->list_entry);
  196. list_del_rcu(&counter->event_entry);
  197. if (counter->group_leader != counter)
  198. counter->group_leader->nr_siblings--;
  199. /*
  200. * If this was a group counter with sibling counters then
  201. * upgrade the siblings to singleton counters by adding them
  202. * to the context list directly:
  203. */
  204. list_for_each_entry_safe(sibling, tmp,
  205. &counter->sibling_list, list_entry) {
  206. list_move_tail(&sibling->list_entry, &ctx->counter_list);
  207. sibling->group_leader = sibling;
  208. }
  209. }
  210. static void
  211. counter_sched_out(struct perf_counter *counter,
  212. struct perf_cpu_context *cpuctx,
  213. struct perf_counter_context *ctx)
  214. {
  215. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  216. return;
  217. counter->state = PERF_COUNTER_STATE_INACTIVE;
  218. counter->tstamp_stopped = ctx->time;
  219. counter->pmu->disable(counter);
  220. counter->oncpu = -1;
  221. if (!is_software_counter(counter))
  222. cpuctx->active_oncpu--;
  223. ctx->nr_active--;
  224. if (counter->attr.exclusive || !cpuctx->active_oncpu)
  225. cpuctx->exclusive = 0;
  226. }
  227. static void
  228. group_sched_out(struct perf_counter *group_counter,
  229. struct perf_cpu_context *cpuctx,
  230. struct perf_counter_context *ctx)
  231. {
  232. struct perf_counter *counter;
  233. if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
  234. return;
  235. counter_sched_out(group_counter, cpuctx, ctx);
  236. /*
  237. * Schedule out siblings (if any):
  238. */
  239. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  240. counter_sched_out(counter, cpuctx, ctx);
  241. if (group_counter->attr.exclusive)
  242. cpuctx->exclusive = 0;
  243. }
  244. /*
  245. * Cross CPU call to remove a performance counter
  246. *
  247. * We disable the counter on the hardware level first. After that we
  248. * remove it from the context list.
  249. */
  250. static void __perf_counter_remove_from_context(void *info)
  251. {
  252. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  253. struct perf_counter *counter = info;
  254. struct perf_counter_context *ctx = counter->ctx;
  255. /*
  256. * If this is a task context, we need to check whether it is
  257. * the current task context of this cpu. If not it has been
  258. * scheduled out before the smp call arrived.
  259. */
  260. if (ctx->task && cpuctx->task_ctx != ctx)
  261. return;
  262. spin_lock(&ctx->lock);
  263. /*
  264. * Protect the list operation against NMI by disabling the
  265. * counters on a global level.
  266. */
  267. perf_disable();
  268. counter_sched_out(counter, cpuctx, ctx);
  269. list_del_counter(counter, ctx);
  270. if (!ctx->task) {
  271. /*
  272. * Allow more per task counters with respect to the
  273. * reservation:
  274. */
  275. cpuctx->max_pertask =
  276. min(perf_max_counters - ctx->nr_counters,
  277. perf_max_counters - perf_reserved_percpu);
  278. }
  279. perf_enable();
  280. spin_unlock(&ctx->lock);
  281. }
  282. /*
  283. * Remove the counter from a task's (or a CPU's) list of counters.
  284. *
  285. * Must be called with ctx->mutex held.
  286. *
  287. * CPU counters are removed with a smp call. For task counters we only
  288. * call when the task is on a CPU.
  289. *
  290. * If counter->ctx is a cloned context, callers must make sure that
  291. * every task struct that counter->ctx->task could possibly point to
  292. * remains valid. This is OK when called from perf_release since
  293. * that only calls us on the top-level context, which can't be a clone.
  294. * When called from perf_counter_exit_task, it's OK because the
  295. * context has been detached from its task.
  296. */
  297. static void perf_counter_remove_from_context(struct perf_counter *counter)
  298. {
  299. struct perf_counter_context *ctx = counter->ctx;
  300. struct task_struct *task = ctx->task;
  301. if (!task) {
  302. /*
  303. * Per cpu counters are removed via an smp call and
  304. * the removal is always sucessful.
  305. */
  306. smp_call_function_single(counter->cpu,
  307. __perf_counter_remove_from_context,
  308. counter, 1);
  309. return;
  310. }
  311. retry:
  312. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  313. counter);
  314. spin_lock_irq(&ctx->lock);
  315. /*
  316. * If the context is active we need to retry the smp call.
  317. */
  318. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  319. spin_unlock_irq(&ctx->lock);
  320. goto retry;
  321. }
  322. /*
  323. * The lock prevents that this context is scheduled in so we
  324. * can remove the counter safely, if the call above did not
  325. * succeed.
  326. */
  327. if (!list_empty(&counter->list_entry)) {
  328. list_del_counter(counter, ctx);
  329. }
  330. spin_unlock_irq(&ctx->lock);
  331. }
  332. static inline u64 perf_clock(void)
  333. {
  334. return cpu_clock(smp_processor_id());
  335. }
  336. /*
  337. * Update the record of the current time in a context.
  338. */
  339. static void update_context_time(struct perf_counter_context *ctx)
  340. {
  341. u64 now = perf_clock();
  342. ctx->time += now - ctx->timestamp;
  343. ctx->timestamp = now;
  344. }
  345. /*
  346. * Update the total_time_enabled and total_time_running fields for a counter.
  347. */
  348. static void update_counter_times(struct perf_counter *counter)
  349. {
  350. struct perf_counter_context *ctx = counter->ctx;
  351. u64 run_end;
  352. if (counter->state < PERF_COUNTER_STATE_INACTIVE)
  353. return;
  354. counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
  355. if (counter->state == PERF_COUNTER_STATE_INACTIVE)
  356. run_end = counter->tstamp_stopped;
  357. else
  358. run_end = ctx->time;
  359. counter->total_time_running = run_end - counter->tstamp_running;
  360. }
  361. /*
  362. * Update total_time_enabled and total_time_running for all counters in a group.
  363. */
  364. static void update_group_times(struct perf_counter *leader)
  365. {
  366. struct perf_counter *counter;
  367. update_counter_times(leader);
  368. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  369. update_counter_times(counter);
  370. }
  371. /*
  372. * Cross CPU call to disable a performance counter
  373. */
  374. static void __perf_counter_disable(void *info)
  375. {
  376. struct perf_counter *counter = info;
  377. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  378. struct perf_counter_context *ctx = counter->ctx;
  379. /*
  380. * If this is a per-task counter, need to check whether this
  381. * counter's task is the current task on this cpu.
  382. */
  383. if (ctx->task && cpuctx->task_ctx != ctx)
  384. return;
  385. spin_lock(&ctx->lock);
  386. /*
  387. * If the counter is on, turn it off.
  388. * If it is in error state, leave it in error state.
  389. */
  390. if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
  391. update_context_time(ctx);
  392. update_counter_times(counter);
  393. if (counter == counter->group_leader)
  394. group_sched_out(counter, cpuctx, ctx);
  395. else
  396. counter_sched_out(counter, cpuctx, ctx);
  397. counter->state = PERF_COUNTER_STATE_OFF;
  398. }
  399. spin_unlock(&ctx->lock);
  400. }
  401. /*
  402. * Disable a counter.
  403. *
  404. * If counter->ctx is a cloned context, callers must make sure that
  405. * every task struct that counter->ctx->task could possibly point to
  406. * remains valid. This condition is satisifed when called through
  407. * perf_counter_for_each_child or perf_counter_for_each because they
  408. * hold the top-level counter's child_mutex, so any descendant that
  409. * goes to exit will block in sync_child_counter.
  410. * When called from perf_pending_counter it's OK because counter->ctx
  411. * is the current context on this CPU and preemption is disabled,
  412. * hence we can't get into perf_counter_task_sched_out for this context.
  413. */
  414. static void perf_counter_disable(struct perf_counter *counter)
  415. {
  416. struct perf_counter_context *ctx = counter->ctx;
  417. struct task_struct *task = ctx->task;
  418. if (!task) {
  419. /*
  420. * Disable the counter on the cpu that it's on
  421. */
  422. smp_call_function_single(counter->cpu, __perf_counter_disable,
  423. counter, 1);
  424. return;
  425. }
  426. retry:
  427. task_oncpu_function_call(task, __perf_counter_disable, counter);
  428. spin_lock_irq(&ctx->lock);
  429. /*
  430. * If the counter is still active, we need to retry the cross-call.
  431. */
  432. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  433. spin_unlock_irq(&ctx->lock);
  434. goto retry;
  435. }
  436. /*
  437. * Since we have the lock this context can't be scheduled
  438. * in, so we can change the state safely.
  439. */
  440. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  441. update_counter_times(counter);
  442. counter->state = PERF_COUNTER_STATE_OFF;
  443. }
  444. spin_unlock_irq(&ctx->lock);
  445. }
  446. static int
  447. counter_sched_in(struct perf_counter *counter,
  448. struct perf_cpu_context *cpuctx,
  449. struct perf_counter_context *ctx,
  450. int cpu)
  451. {
  452. if (counter->state <= PERF_COUNTER_STATE_OFF)
  453. return 0;
  454. counter->state = PERF_COUNTER_STATE_ACTIVE;
  455. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  456. /*
  457. * The new state must be visible before we turn it on in the hardware:
  458. */
  459. smp_wmb();
  460. if (counter->pmu->enable(counter)) {
  461. counter->state = PERF_COUNTER_STATE_INACTIVE;
  462. counter->oncpu = -1;
  463. return -EAGAIN;
  464. }
  465. counter->tstamp_running += ctx->time - counter->tstamp_stopped;
  466. if (!is_software_counter(counter))
  467. cpuctx->active_oncpu++;
  468. ctx->nr_active++;
  469. if (counter->attr.exclusive)
  470. cpuctx->exclusive = 1;
  471. return 0;
  472. }
  473. static int
  474. group_sched_in(struct perf_counter *group_counter,
  475. struct perf_cpu_context *cpuctx,
  476. struct perf_counter_context *ctx,
  477. int cpu)
  478. {
  479. struct perf_counter *counter, *partial_group;
  480. int ret;
  481. if (group_counter->state == PERF_COUNTER_STATE_OFF)
  482. return 0;
  483. ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
  484. if (ret)
  485. return ret < 0 ? ret : 0;
  486. if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
  487. return -EAGAIN;
  488. /*
  489. * Schedule in siblings as one group (if any):
  490. */
  491. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  492. if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
  493. partial_group = counter;
  494. goto group_error;
  495. }
  496. }
  497. return 0;
  498. group_error:
  499. /*
  500. * Groups can be scheduled in as one unit only, so undo any
  501. * partial group before returning:
  502. */
  503. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  504. if (counter == partial_group)
  505. break;
  506. counter_sched_out(counter, cpuctx, ctx);
  507. }
  508. counter_sched_out(group_counter, cpuctx, ctx);
  509. return -EAGAIN;
  510. }
  511. /*
  512. * Return 1 for a group consisting entirely of software counters,
  513. * 0 if the group contains any hardware counters.
  514. */
  515. static int is_software_only_group(struct perf_counter *leader)
  516. {
  517. struct perf_counter *counter;
  518. if (!is_software_counter(leader))
  519. return 0;
  520. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  521. if (!is_software_counter(counter))
  522. return 0;
  523. return 1;
  524. }
  525. /*
  526. * Work out whether we can put this counter group on the CPU now.
  527. */
  528. static int group_can_go_on(struct perf_counter *counter,
  529. struct perf_cpu_context *cpuctx,
  530. int can_add_hw)
  531. {
  532. /*
  533. * Groups consisting entirely of software counters can always go on.
  534. */
  535. if (is_software_only_group(counter))
  536. return 1;
  537. /*
  538. * If an exclusive group is already on, no other hardware
  539. * counters can go on.
  540. */
  541. if (cpuctx->exclusive)
  542. return 0;
  543. /*
  544. * If this group is exclusive and there are already
  545. * counters on the CPU, it can't go on.
  546. */
  547. if (counter->attr.exclusive && cpuctx->active_oncpu)
  548. return 0;
  549. /*
  550. * Otherwise, try to add it if all previous groups were able
  551. * to go on.
  552. */
  553. return can_add_hw;
  554. }
  555. static void add_counter_to_ctx(struct perf_counter *counter,
  556. struct perf_counter_context *ctx)
  557. {
  558. list_add_counter(counter, ctx);
  559. counter->tstamp_enabled = ctx->time;
  560. counter->tstamp_running = ctx->time;
  561. counter->tstamp_stopped = ctx->time;
  562. }
  563. /*
  564. * Cross CPU call to install and enable a performance counter
  565. *
  566. * Must be called with ctx->mutex held
  567. */
  568. static void __perf_install_in_context(void *info)
  569. {
  570. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  571. struct perf_counter *counter = info;
  572. struct perf_counter_context *ctx = counter->ctx;
  573. struct perf_counter *leader = counter->group_leader;
  574. int cpu = smp_processor_id();
  575. int err;
  576. /*
  577. * If this is a task context, we need to check whether it is
  578. * the current task context of this cpu. If not it has been
  579. * scheduled out before the smp call arrived.
  580. * Or possibly this is the right context but it isn't
  581. * on this cpu because it had no counters.
  582. */
  583. if (ctx->task && cpuctx->task_ctx != ctx) {
  584. if (cpuctx->task_ctx || ctx->task != current)
  585. return;
  586. cpuctx->task_ctx = ctx;
  587. }
  588. spin_lock(&ctx->lock);
  589. ctx->is_active = 1;
  590. update_context_time(ctx);
  591. /*
  592. * Protect the list operation against NMI by disabling the
  593. * counters on a global level. NOP for non NMI based counters.
  594. */
  595. perf_disable();
  596. add_counter_to_ctx(counter, ctx);
  597. /*
  598. * Don't put the counter on if it is disabled or if
  599. * it is in a group and the group isn't on.
  600. */
  601. if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
  602. (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
  603. goto unlock;
  604. /*
  605. * An exclusive counter can't go on if there are already active
  606. * hardware counters, and no hardware counter can go on if there
  607. * is already an exclusive counter on.
  608. */
  609. if (!group_can_go_on(counter, cpuctx, 1))
  610. err = -EEXIST;
  611. else
  612. err = counter_sched_in(counter, cpuctx, ctx, cpu);
  613. if (err) {
  614. /*
  615. * This counter couldn't go on. If it is in a group
  616. * then we have to pull the whole group off.
  617. * If the counter group is pinned then put it in error state.
  618. */
  619. if (leader != counter)
  620. group_sched_out(leader, cpuctx, ctx);
  621. if (leader->attr.pinned) {
  622. update_group_times(leader);
  623. leader->state = PERF_COUNTER_STATE_ERROR;
  624. }
  625. }
  626. if (!err && !ctx->task && cpuctx->max_pertask)
  627. cpuctx->max_pertask--;
  628. unlock:
  629. perf_enable();
  630. spin_unlock(&ctx->lock);
  631. }
  632. /*
  633. * Attach a performance counter to a context
  634. *
  635. * First we add the counter to the list with the hardware enable bit
  636. * in counter->hw_config cleared.
  637. *
  638. * If the counter is attached to a task which is on a CPU we use a smp
  639. * call to enable it in the task context. The task might have been
  640. * scheduled away, but we check this in the smp call again.
  641. *
  642. * Must be called with ctx->mutex held.
  643. */
  644. static void
  645. perf_install_in_context(struct perf_counter_context *ctx,
  646. struct perf_counter *counter,
  647. int cpu)
  648. {
  649. struct task_struct *task = ctx->task;
  650. if (!task) {
  651. /*
  652. * Per cpu counters are installed via an smp call and
  653. * the install is always sucessful.
  654. */
  655. smp_call_function_single(cpu, __perf_install_in_context,
  656. counter, 1);
  657. return;
  658. }
  659. retry:
  660. task_oncpu_function_call(task, __perf_install_in_context,
  661. counter);
  662. spin_lock_irq(&ctx->lock);
  663. /*
  664. * we need to retry the smp call.
  665. */
  666. if (ctx->is_active && list_empty(&counter->list_entry)) {
  667. spin_unlock_irq(&ctx->lock);
  668. goto retry;
  669. }
  670. /*
  671. * The lock prevents that this context is scheduled in so we
  672. * can add the counter safely, if it the call above did not
  673. * succeed.
  674. */
  675. if (list_empty(&counter->list_entry))
  676. add_counter_to_ctx(counter, ctx);
  677. spin_unlock_irq(&ctx->lock);
  678. }
  679. /*
  680. * Cross CPU call to enable a performance counter
  681. */
  682. static void __perf_counter_enable(void *info)
  683. {
  684. struct perf_counter *counter = info;
  685. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  686. struct perf_counter_context *ctx = counter->ctx;
  687. struct perf_counter *leader = counter->group_leader;
  688. int err;
  689. /*
  690. * If this is a per-task counter, need to check whether this
  691. * counter's task is the current task on this cpu.
  692. */
  693. if (ctx->task && cpuctx->task_ctx != ctx) {
  694. if (cpuctx->task_ctx || ctx->task != current)
  695. return;
  696. cpuctx->task_ctx = ctx;
  697. }
  698. spin_lock(&ctx->lock);
  699. ctx->is_active = 1;
  700. update_context_time(ctx);
  701. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  702. goto unlock;
  703. counter->state = PERF_COUNTER_STATE_INACTIVE;
  704. counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
  705. /*
  706. * If the counter is in a group and isn't the group leader,
  707. * then don't put it on unless the group is on.
  708. */
  709. if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
  710. goto unlock;
  711. if (!group_can_go_on(counter, cpuctx, 1)) {
  712. err = -EEXIST;
  713. } else {
  714. perf_disable();
  715. if (counter == leader)
  716. err = group_sched_in(counter, cpuctx, ctx,
  717. smp_processor_id());
  718. else
  719. err = counter_sched_in(counter, cpuctx, ctx,
  720. smp_processor_id());
  721. perf_enable();
  722. }
  723. if (err) {
  724. /*
  725. * If this counter can't go on and it's part of a
  726. * group, then the whole group has to come off.
  727. */
  728. if (leader != counter)
  729. group_sched_out(leader, cpuctx, ctx);
  730. if (leader->attr.pinned) {
  731. update_group_times(leader);
  732. leader->state = PERF_COUNTER_STATE_ERROR;
  733. }
  734. }
  735. unlock:
  736. spin_unlock(&ctx->lock);
  737. }
  738. /*
  739. * Enable a counter.
  740. *
  741. * If counter->ctx is a cloned context, callers must make sure that
  742. * every task struct that counter->ctx->task could possibly point to
  743. * remains valid. This condition is satisfied when called through
  744. * perf_counter_for_each_child or perf_counter_for_each as described
  745. * for perf_counter_disable.
  746. */
  747. static void perf_counter_enable(struct perf_counter *counter)
  748. {
  749. struct perf_counter_context *ctx = counter->ctx;
  750. struct task_struct *task = ctx->task;
  751. if (!task) {
  752. /*
  753. * Enable the counter on the cpu that it's on
  754. */
  755. smp_call_function_single(counter->cpu, __perf_counter_enable,
  756. counter, 1);
  757. return;
  758. }
  759. spin_lock_irq(&ctx->lock);
  760. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  761. goto out;
  762. /*
  763. * If the counter is in error state, clear that first.
  764. * That way, if we see the counter in error state below, we
  765. * know that it has gone back into error state, as distinct
  766. * from the task having been scheduled away before the
  767. * cross-call arrived.
  768. */
  769. if (counter->state == PERF_COUNTER_STATE_ERROR)
  770. counter->state = PERF_COUNTER_STATE_OFF;
  771. retry:
  772. spin_unlock_irq(&ctx->lock);
  773. task_oncpu_function_call(task, __perf_counter_enable, counter);
  774. spin_lock_irq(&ctx->lock);
  775. /*
  776. * If the context is active and the counter is still off,
  777. * we need to retry the cross-call.
  778. */
  779. if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
  780. goto retry;
  781. /*
  782. * Since we have the lock this context can't be scheduled
  783. * in, so we can change the state safely.
  784. */
  785. if (counter->state == PERF_COUNTER_STATE_OFF) {
  786. counter->state = PERF_COUNTER_STATE_INACTIVE;
  787. counter->tstamp_enabled =
  788. ctx->time - counter->total_time_enabled;
  789. }
  790. out:
  791. spin_unlock_irq(&ctx->lock);
  792. }
  793. static int perf_counter_refresh(struct perf_counter *counter, int refresh)
  794. {
  795. /*
  796. * not supported on inherited counters
  797. */
  798. if (counter->attr.inherit)
  799. return -EINVAL;
  800. atomic_add(refresh, &counter->event_limit);
  801. perf_counter_enable(counter);
  802. return 0;
  803. }
  804. void __perf_counter_sched_out(struct perf_counter_context *ctx,
  805. struct perf_cpu_context *cpuctx)
  806. {
  807. struct perf_counter *counter;
  808. spin_lock(&ctx->lock);
  809. ctx->is_active = 0;
  810. if (likely(!ctx->nr_counters))
  811. goto out;
  812. update_context_time(ctx);
  813. perf_disable();
  814. if (ctx->nr_active) {
  815. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  816. if (counter != counter->group_leader)
  817. counter_sched_out(counter, cpuctx, ctx);
  818. else
  819. group_sched_out(counter, cpuctx, ctx);
  820. }
  821. }
  822. perf_enable();
  823. out:
  824. spin_unlock(&ctx->lock);
  825. }
  826. /*
  827. * Test whether two contexts are equivalent, i.e. whether they
  828. * have both been cloned from the same version of the same context
  829. * and they both have the same number of enabled counters.
  830. * If the number of enabled counters is the same, then the set
  831. * of enabled counters should be the same, because these are both
  832. * inherited contexts, therefore we can't access individual counters
  833. * in them directly with an fd; we can only enable/disable all
  834. * counters via prctl, or enable/disable all counters in a family
  835. * via ioctl, which will have the same effect on both contexts.
  836. */
  837. static int context_equiv(struct perf_counter_context *ctx1,
  838. struct perf_counter_context *ctx2)
  839. {
  840. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  841. && ctx1->parent_gen == ctx2->parent_gen
  842. && !ctx1->pin_count && !ctx2->pin_count;
  843. }
  844. /*
  845. * Called from scheduler to remove the counters of the current task,
  846. * with interrupts disabled.
  847. *
  848. * We stop each counter and update the counter value in counter->count.
  849. *
  850. * This does not protect us against NMI, but disable()
  851. * sets the disabled bit in the control field of counter _before_
  852. * accessing the counter control register. If a NMI hits, then it will
  853. * not restart the counter.
  854. */
  855. void perf_counter_task_sched_out(struct task_struct *task,
  856. struct task_struct *next, int cpu)
  857. {
  858. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  859. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  860. struct perf_counter_context *next_ctx;
  861. struct perf_counter_context *parent;
  862. struct pt_regs *regs;
  863. int do_switch = 1;
  864. regs = task_pt_regs(task);
  865. perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
  866. if (likely(!ctx || !cpuctx->task_ctx))
  867. return;
  868. update_context_time(ctx);
  869. rcu_read_lock();
  870. parent = rcu_dereference(ctx->parent_ctx);
  871. next_ctx = next->perf_counter_ctxp;
  872. if (parent && next_ctx &&
  873. rcu_dereference(next_ctx->parent_ctx) == parent) {
  874. /*
  875. * Looks like the two contexts are clones, so we might be
  876. * able to optimize the context switch. We lock both
  877. * contexts and check that they are clones under the
  878. * lock (including re-checking that neither has been
  879. * uncloned in the meantime). It doesn't matter which
  880. * order we take the locks because no other cpu could
  881. * be trying to lock both of these tasks.
  882. */
  883. spin_lock(&ctx->lock);
  884. spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  885. if (context_equiv(ctx, next_ctx)) {
  886. /*
  887. * XXX do we need a memory barrier of sorts
  888. * wrt to rcu_dereference() of perf_counter_ctxp
  889. */
  890. task->perf_counter_ctxp = next_ctx;
  891. next->perf_counter_ctxp = ctx;
  892. ctx->task = next;
  893. next_ctx->task = task;
  894. do_switch = 0;
  895. }
  896. spin_unlock(&next_ctx->lock);
  897. spin_unlock(&ctx->lock);
  898. }
  899. rcu_read_unlock();
  900. if (do_switch) {
  901. __perf_counter_sched_out(ctx, cpuctx);
  902. cpuctx->task_ctx = NULL;
  903. }
  904. }
  905. /*
  906. * Called with IRQs disabled
  907. */
  908. static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
  909. {
  910. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  911. if (!cpuctx->task_ctx)
  912. return;
  913. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  914. return;
  915. __perf_counter_sched_out(ctx, cpuctx);
  916. cpuctx->task_ctx = NULL;
  917. }
  918. /*
  919. * Called with IRQs disabled
  920. */
  921. static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
  922. {
  923. __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
  924. }
  925. static void
  926. __perf_counter_sched_in(struct perf_counter_context *ctx,
  927. struct perf_cpu_context *cpuctx, int cpu)
  928. {
  929. struct perf_counter *counter;
  930. int can_add_hw = 1;
  931. spin_lock(&ctx->lock);
  932. ctx->is_active = 1;
  933. if (likely(!ctx->nr_counters))
  934. goto out;
  935. ctx->timestamp = perf_clock();
  936. perf_disable();
  937. /*
  938. * First go through the list and put on any pinned groups
  939. * in order to give them the best chance of going on.
  940. */
  941. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  942. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  943. !counter->attr.pinned)
  944. continue;
  945. if (counter->cpu != -1 && counter->cpu != cpu)
  946. continue;
  947. if (counter != counter->group_leader)
  948. counter_sched_in(counter, cpuctx, ctx, cpu);
  949. else {
  950. if (group_can_go_on(counter, cpuctx, 1))
  951. group_sched_in(counter, cpuctx, ctx, cpu);
  952. }
  953. /*
  954. * If this pinned group hasn't been scheduled,
  955. * put it in error state.
  956. */
  957. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  958. update_group_times(counter);
  959. counter->state = PERF_COUNTER_STATE_ERROR;
  960. }
  961. }
  962. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  963. /*
  964. * Ignore counters in OFF or ERROR state, and
  965. * ignore pinned counters since we did them already.
  966. */
  967. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  968. counter->attr.pinned)
  969. continue;
  970. /*
  971. * Listen to the 'cpu' scheduling filter constraint
  972. * of counters:
  973. */
  974. if (counter->cpu != -1 && counter->cpu != cpu)
  975. continue;
  976. if (counter != counter->group_leader) {
  977. if (counter_sched_in(counter, cpuctx, ctx, cpu))
  978. can_add_hw = 0;
  979. } else {
  980. if (group_can_go_on(counter, cpuctx, can_add_hw)) {
  981. if (group_sched_in(counter, cpuctx, ctx, cpu))
  982. can_add_hw = 0;
  983. }
  984. }
  985. }
  986. perf_enable();
  987. out:
  988. spin_unlock(&ctx->lock);
  989. }
  990. /*
  991. * Called from scheduler to add the counters of the current task
  992. * with interrupts disabled.
  993. *
  994. * We restore the counter value and then enable it.
  995. *
  996. * This does not protect us against NMI, but enable()
  997. * sets the enabled bit in the control field of counter _before_
  998. * accessing the counter control register. If a NMI hits, then it will
  999. * keep the counter running.
  1000. */
  1001. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  1002. {
  1003. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  1004. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  1005. if (likely(!ctx))
  1006. return;
  1007. if (cpuctx->task_ctx == ctx)
  1008. return;
  1009. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1010. cpuctx->task_ctx = ctx;
  1011. }
  1012. static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
  1013. {
  1014. struct perf_counter_context *ctx = &cpuctx->ctx;
  1015. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1016. }
  1017. #define MAX_INTERRUPTS (~0ULL)
  1018. static void perf_log_throttle(struct perf_counter *counter, int enable);
  1019. static void perf_log_period(struct perf_counter *counter, u64 period);
  1020. static void perf_adjust_freq(struct perf_counter_context *ctx)
  1021. {
  1022. struct perf_counter *counter;
  1023. u64 interrupts, sample_period;
  1024. u64 events, period;
  1025. s64 delta;
  1026. spin_lock(&ctx->lock);
  1027. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1028. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1029. continue;
  1030. interrupts = counter->hw.interrupts;
  1031. counter->hw.interrupts = 0;
  1032. if (interrupts == MAX_INTERRUPTS) {
  1033. perf_log_throttle(counter, 1);
  1034. counter->pmu->unthrottle(counter);
  1035. interrupts = 2*sysctl_perf_counter_limit/HZ;
  1036. }
  1037. if (!counter->attr.freq || !counter->attr.sample_freq)
  1038. continue;
  1039. events = HZ * interrupts * counter->hw.sample_period;
  1040. period = div64_u64(events, counter->attr.sample_freq);
  1041. delta = (s64)(1 + period - counter->hw.sample_period);
  1042. delta >>= 1;
  1043. sample_period = counter->hw.sample_period + delta;
  1044. if (!sample_period)
  1045. sample_period = 1;
  1046. perf_log_period(counter, sample_period);
  1047. counter->hw.sample_period = sample_period;
  1048. }
  1049. spin_unlock(&ctx->lock);
  1050. }
  1051. /*
  1052. * Round-robin a context's counters:
  1053. */
  1054. static void rotate_ctx(struct perf_counter_context *ctx)
  1055. {
  1056. struct perf_counter *counter;
  1057. if (!ctx->nr_counters)
  1058. return;
  1059. spin_lock(&ctx->lock);
  1060. /*
  1061. * Rotate the first entry last (works just fine for group counters too):
  1062. */
  1063. perf_disable();
  1064. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1065. list_move_tail(&counter->list_entry, &ctx->counter_list);
  1066. break;
  1067. }
  1068. perf_enable();
  1069. spin_unlock(&ctx->lock);
  1070. }
  1071. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  1072. {
  1073. struct perf_cpu_context *cpuctx;
  1074. struct perf_counter_context *ctx;
  1075. if (!atomic_read(&nr_counters))
  1076. return;
  1077. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1078. ctx = curr->perf_counter_ctxp;
  1079. perf_adjust_freq(&cpuctx->ctx);
  1080. if (ctx)
  1081. perf_adjust_freq(ctx);
  1082. perf_counter_cpu_sched_out(cpuctx);
  1083. if (ctx)
  1084. __perf_counter_task_sched_out(ctx);
  1085. rotate_ctx(&cpuctx->ctx);
  1086. if (ctx)
  1087. rotate_ctx(ctx);
  1088. perf_counter_cpu_sched_in(cpuctx, cpu);
  1089. if (ctx)
  1090. perf_counter_task_sched_in(curr, cpu);
  1091. }
  1092. /*
  1093. * Cross CPU call to read the hardware counter
  1094. */
  1095. static void __read(void *info)
  1096. {
  1097. struct perf_counter *counter = info;
  1098. struct perf_counter_context *ctx = counter->ctx;
  1099. unsigned long flags;
  1100. local_irq_save(flags);
  1101. if (ctx->is_active)
  1102. update_context_time(ctx);
  1103. counter->pmu->read(counter);
  1104. update_counter_times(counter);
  1105. local_irq_restore(flags);
  1106. }
  1107. static u64 perf_counter_read(struct perf_counter *counter)
  1108. {
  1109. /*
  1110. * If counter is enabled and currently active on a CPU, update the
  1111. * value in the counter structure:
  1112. */
  1113. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  1114. smp_call_function_single(counter->oncpu,
  1115. __read, counter, 1);
  1116. } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  1117. update_counter_times(counter);
  1118. }
  1119. return atomic64_read(&counter->count);
  1120. }
  1121. /*
  1122. * Initialize the perf_counter context in a task_struct:
  1123. */
  1124. static void
  1125. __perf_counter_init_context(struct perf_counter_context *ctx,
  1126. struct task_struct *task)
  1127. {
  1128. memset(ctx, 0, sizeof(*ctx));
  1129. spin_lock_init(&ctx->lock);
  1130. mutex_init(&ctx->mutex);
  1131. INIT_LIST_HEAD(&ctx->counter_list);
  1132. INIT_LIST_HEAD(&ctx->event_list);
  1133. atomic_set(&ctx->refcount, 1);
  1134. ctx->task = task;
  1135. }
  1136. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  1137. {
  1138. struct perf_counter_context *parent_ctx;
  1139. struct perf_counter_context *ctx;
  1140. struct perf_cpu_context *cpuctx;
  1141. struct task_struct *task;
  1142. unsigned long flags;
  1143. int err;
  1144. /*
  1145. * If cpu is not a wildcard then this is a percpu counter:
  1146. */
  1147. if (cpu != -1) {
  1148. /* Must be root to operate on a CPU counter: */
  1149. if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
  1150. return ERR_PTR(-EACCES);
  1151. if (cpu < 0 || cpu > num_possible_cpus())
  1152. return ERR_PTR(-EINVAL);
  1153. /*
  1154. * We could be clever and allow to attach a counter to an
  1155. * offline CPU and activate it when the CPU comes up, but
  1156. * that's for later.
  1157. */
  1158. if (!cpu_isset(cpu, cpu_online_map))
  1159. return ERR_PTR(-ENODEV);
  1160. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1161. ctx = &cpuctx->ctx;
  1162. get_ctx(ctx);
  1163. return ctx;
  1164. }
  1165. rcu_read_lock();
  1166. if (!pid)
  1167. task = current;
  1168. else
  1169. task = find_task_by_vpid(pid);
  1170. if (task)
  1171. get_task_struct(task);
  1172. rcu_read_unlock();
  1173. if (!task)
  1174. return ERR_PTR(-ESRCH);
  1175. /*
  1176. * Can't attach counters to a dying task.
  1177. */
  1178. err = -ESRCH;
  1179. if (task->flags & PF_EXITING)
  1180. goto errout;
  1181. /* Reuse ptrace permission checks for now. */
  1182. err = -EACCES;
  1183. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1184. goto errout;
  1185. retry:
  1186. ctx = perf_lock_task_context(task, &flags);
  1187. if (ctx) {
  1188. parent_ctx = ctx->parent_ctx;
  1189. if (parent_ctx) {
  1190. put_ctx(parent_ctx);
  1191. ctx->parent_ctx = NULL; /* no longer a clone */
  1192. }
  1193. /*
  1194. * Get an extra reference before dropping the lock so that
  1195. * this context won't get freed if the task exits.
  1196. */
  1197. get_ctx(ctx);
  1198. spin_unlock_irqrestore(&ctx->lock, flags);
  1199. }
  1200. if (!ctx) {
  1201. ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  1202. err = -ENOMEM;
  1203. if (!ctx)
  1204. goto errout;
  1205. __perf_counter_init_context(ctx, task);
  1206. get_ctx(ctx);
  1207. if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
  1208. /*
  1209. * We raced with some other task; use
  1210. * the context they set.
  1211. */
  1212. kfree(ctx);
  1213. goto retry;
  1214. }
  1215. get_task_struct(task);
  1216. }
  1217. put_task_struct(task);
  1218. return ctx;
  1219. errout:
  1220. put_task_struct(task);
  1221. return ERR_PTR(err);
  1222. }
  1223. static void free_counter_rcu(struct rcu_head *head)
  1224. {
  1225. struct perf_counter *counter;
  1226. counter = container_of(head, struct perf_counter, rcu_head);
  1227. if (counter->ns)
  1228. put_pid_ns(counter->ns);
  1229. kfree(counter);
  1230. }
  1231. static void perf_pending_sync(struct perf_counter *counter);
  1232. static void free_counter(struct perf_counter *counter)
  1233. {
  1234. perf_pending_sync(counter);
  1235. atomic_dec(&nr_counters);
  1236. if (counter->attr.mmap)
  1237. atomic_dec(&nr_mmap_counters);
  1238. if (counter->attr.comm)
  1239. atomic_dec(&nr_comm_counters);
  1240. if (counter->destroy)
  1241. counter->destroy(counter);
  1242. put_ctx(counter->ctx);
  1243. call_rcu(&counter->rcu_head, free_counter_rcu);
  1244. }
  1245. /*
  1246. * Called when the last reference to the file is gone.
  1247. */
  1248. static int perf_release(struct inode *inode, struct file *file)
  1249. {
  1250. struct perf_counter *counter = file->private_data;
  1251. struct perf_counter_context *ctx = counter->ctx;
  1252. file->private_data = NULL;
  1253. WARN_ON_ONCE(ctx->parent_ctx);
  1254. mutex_lock(&ctx->mutex);
  1255. perf_counter_remove_from_context(counter);
  1256. mutex_unlock(&ctx->mutex);
  1257. mutex_lock(&counter->owner->perf_counter_mutex);
  1258. list_del_init(&counter->owner_entry);
  1259. mutex_unlock(&counter->owner->perf_counter_mutex);
  1260. put_task_struct(counter->owner);
  1261. free_counter(counter);
  1262. return 0;
  1263. }
  1264. /*
  1265. * Read the performance counter - simple non blocking version for now
  1266. */
  1267. static ssize_t
  1268. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  1269. {
  1270. u64 values[3];
  1271. int n;
  1272. /*
  1273. * Return end-of-file for a read on a counter that is in
  1274. * error state (i.e. because it was pinned but it couldn't be
  1275. * scheduled on to the CPU at some point).
  1276. */
  1277. if (counter->state == PERF_COUNTER_STATE_ERROR)
  1278. return 0;
  1279. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1280. mutex_lock(&counter->child_mutex);
  1281. values[0] = perf_counter_read(counter);
  1282. n = 1;
  1283. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1284. values[n++] = counter->total_time_enabled +
  1285. atomic64_read(&counter->child_total_time_enabled);
  1286. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1287. values[n++] = counter->total_time_running +
  1288. atomic64_read(&counter->child_total_time_running);
  1289. if (counter->attr.read_format & PERF_FORMAT_ID)
  1290. values[n++] = counter->id;
  1291. mutex_unlock(&counter->child_mutex);
  1292. if (count < n * sizeof(u64))
  1293. return -EINVAL;
  1294. count = n * sizeof(u64);
  1295. if (copy_to_user(buf, values, count))
  1296. return -EFAULT;
  1297. return count;
  1298. }
  1299. static ssize_t
  1300. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1301. {
  1302. struct perf_counter *counter = file->private_data;
  1303. return perf_read_hw(counter, buf, count);
  1304. }
  1305. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1306. {
  1307. struct perf_counter *counter = file->private_data;
  1308. struct perf_mmap_data *data;
  1309. unsigned int events = POLL_HUP;
  1310. rcu_read_lock();
  1311. data = rcu_dereference(counter->data);
  1312. if (data)
  1313. events = atomic_xchg(&data->poll, 0);
  1314. rcu_read_unlock();
  1315. poll_wait(file, &counter->waitq, wait);
  1316. return events;
  1317. }
  1318. static void perf_counter_reset(struct perf_counter *counter)
  1319. {
  1320. (void)perf_counter_read(counter);
  1321. atomic64_set(&counter->count, 0);
  1322. perf_counter_update_userpage(counter);
  1323. }
  1324. static void perf_counter_for_each_sibling(struct perf_counter *counter,
  1325. void (*func)(struct perf_counter *))
  1326. {
  1327. struct perf_counter_context *ctx = counter->ctx;
  1328. struct perf_counter *sibling;
  1329. WARN_ON_ONCE(ctx->parent_ctx);
  1330. mutex_lock(&ctx->mutex);
  1331. counter = counter->group_leader;
  1332. func(counter);
  1333. list_for_each_entry(sibling, &counter->sibling_list, list_entry)
  1334. func(sibling);
  1335. mutex_unlock(&ctx->mutex);
  1336. }
  1337. /*
  1338. * Holding the top-level counter's child_mutex means that any
  1339. * descendant process that has inherited this counter will block
  1340. * in sync_child_counter if it goes to exit, thus satisfying the
  1341. * task existence requirements of perf_counter_enable/disable.
  1342. */
  1343. static void perf_counter_for_each_child(struct perf_counter *counter,
  1344. void (*func)(struct perf_counter *))
  1345. {
  1346. struct perf_counter *child;
  1347. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1348. mutex_lock(&counter->child_mutex);
  1349. func(counter);
  1350. list_for_each_entry(child, &counter->child_list, child_list)
  1351. func(child);
  1352. mutex_unlock(&counter->child_mutex);
  1353. }
  1354. static void perf_counter_for_each(struct perf_counter *counter,
  1355. void (*func)(struct perf_counter *))
  1356. {
  1357. struct perf_counter *child;
  1358. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1359. mutex_lock(&counter->child_mutex);
  1360. perf_counter_for_each_sibling(counter, func);
  1361. list_for_each_entry(child, &counter->child_list, child_list)
  1362. perf_counter_for_each_sibling(child, func);
  1363. mutex_unlock(&counter->child_mutex);
  1364. }
  1365. static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
  1366. {
  1367. struct perf_counter_context *ctx = counter->ctx;
  1368. unsigned long size;
  1369. int ret = 0;
  1370. u64 value;
  1371. if (!counter->attr.sample_period)
  1372. return -EINVAL;
  1373. size = copy_from_user(&value, arg, sizeof(value));
  1374. if (size != sizeof(value))
  1375. return -EFAULT;
  1376. if (!value)
  1377. return -EINVAL;
  1378. spin_lock_irq(&ctx->lock);
  1379. if (counter->attr.freq) {
  1380. if (value > sysctl_perf_counter_limit) {
  1381. ret = -EINVAL;
  1382. goto unlock;
  1383. }
  1384. counter->attr.sample_freq = value;
  1385. } else {
  1386. counter->attr.sample_period = value;
  1387. counter->hw.sample_period = value;
  1388. perf_log_period(counter, value);
  1389. }
  1390. unlock:
  1391. spin_unlock_irq(&ctx->lock);
  1392. return ret;
  1393. }
  1394. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1395. {
  1396. struct perf_counter *counter = file->private_data;
  1397. void (*func)(struct perf_counter *);
  1398. u32 flags = arg;
  1399. switch (cmd) {
  1400. case PERF_COUNTER_IOC_ENABLE:
  1401. func = perf_counter_enable;
  1402. break;
  1403. case PERF_COUNTER_IOC_DISABLE:
  1404. func = perf_counter_disable;
  1405. break;
  1406. case PERF_COUNTER_IOC_RESET:
  1407. func = perf_counter_reset;
  1408. break;
  1409. case PERF_COUNTER_IOC_REFRESH:
  1410. return perf_counter_refresh(counter, arg);
  1411. case PERF_COUNTER_IOC_PERIOD:
  1412. return perf_counter_period(counter, (u64 __user *)arg);
  1413. default:
  1414. return -ENOTTY;
  1415. }
  1416. if (flags & PERF_IOC_FLAG_GROUP)
  1417. perf_counter_for_each(counter, func);
  1418. else
  1419. perf_counter_for_each_child(counter, func);
  1420. return 0;
  1421. }
  1422. int perf_counter_task_enable(void)
  1423. {
  1424. struct perf_counter *counter;
  1425. mutex_lock(&current->perf_counter_mutex);
  1426. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1427. perf_counter_for_each_child(counter, perf_counter_enable);
  1428. mutex_unlock(&current->perf_counter_mutex);
  1429. return 0;
  1430. }
  1431. int perf_counter_task_disable(void)
  1432. {
  1433. struct perf_counter *counter;
  1434. mutex_lock(&current->perf_counter_mutex);
  1435. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1436. perf_counter_for_each_child(counter, perf_counter_disable);
  1437. mutex_unlock(&current->perf_counter_mutex);
  1438. return 0;
  1439. }
  1440. /*
  1441. * Callers need to ensure there can be no nesting of this function, otherwise
  1442. * the seqlock logic goes bad. We can not serialize this because the arch
  1443. * code calls this from NMI context.
  1444. */
  1445. void perf_counter_update_userpage(struct perf_counter *counter)
  1446. {
  1447. struct perf_counter_mmap_page *userpg;
  1448. struct perf_mmap_data *data;
  1449. rcu_read_lock();
  1450. data = rcu_dereference(counter->data);
  1451. if (!data)
  1452. goto unlock;
  1453. userpg = data->user_page;
  1454. /*
  1455. * Disable preemption so as to not let the corresponding user-space
  1456. * spin too long if we get preempted.
  1457. */
  1458. preempt_disable();
  1459. ++userpg->lock;
  1460. barrier();
  1461. userpg->index = counter->hw.idx;
  1462. userpg->offset = atomic64_read(&counter->count);
  1463. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  1464. userpg->offset -= atomic64_read(&counter->hw.prev_count);
  1465. barrier();
  1466. ++userpg->lock;
  1467. preempt_enable();
  1468. unlock:
  1469. rcu_read_unlock();
  1470. }
  1471. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1472. {
  1473. struct perf_counter *counter = vma->vm_file->private_data;
  1474. struct perf_mmap_data *data;
  1475. int ret = VM_FAULT_SIGBUS;
  1476. rcu_read_lock();
  1477. data = rcu_dereference(counter->data);
  1478. if (!data)
  1479. goto unlock;
  1480. if (vmf->pgoff == 0) {
  1481. vmf->page = virt_to_page(data->user_page);
  1482. } else {
  1483. int nr = vmf->pgoff - 1;
  1484. if ((unsigned)nr > data->nr_pages)
  1485. goto unlock;
  1486. vmf->page = virt_to_page(data->data_pages[nr]);
  1487. }
  1488. get_page(vmf->page);
  1489. ret = 0;
  1490. unlock:
  1491. rcu_read_unlock();
  1492. return ret;
  1493. }
  1494. static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
  1495. {
  1496. struct perf_mmap_data *data;
  1497. unsigned long size;
  1498. int i;
  1499. WARN_ON(atomic_read(&counter->mmap_count));
  1500. size = sizeof(struct perf_mmap_data);
  1501. size += nr_pages * sizeof(void *);
  1502. data = kzalloc(size, GFP_KERNEL);
  1503. if (!data)
  1504. goto fail;
  1505. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1506. if (!data->user_page)
  1507. goto fail_user_page;
  1508. for (i = 0; i < nr_pages; i++) {
  1509. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1510. if (!data->data_pages[i])
  1511. goto fail_data_pages;
  1512. }
  1513. data->nr_pages = nr_pages;
  1514. atomic_set(&data->lock, -1);
  1515. rcu_assign_pointer(counter->data, data);
  1516. return 0;
  1517. fail_data_pages:
  1518. for (i--; i >= 0; i--)
  1519. free_page((unsigned long)data->data_pages[i]);
  1520. free_page((unsigned long)data->user_page);
  1521. fail_user_page:
  1522. kfree(data);
  1523. fail:
  1524. return -ENOMEM;
  1525. }
  1526. static void __perf_mmap_data_free(struct rcu_head *rcu_head)
  1527. {
  1528. struct perf_mmap_data *data;
  1529. int i;
  1530. data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
  1531. free_page((unsigned long)data->user_page);
  1532. for (i = 0; i < data->nr_pages; i++)
  1533. free_page((unsigned long)data->data_pages[i]);
  1534. kfree(data);
  1535. }
  1536. static void perf_mmap_data_free(struct perf_counter *counter)
  1537. {
  1538. struct perf_mmap_data *data = counter->data;
  1539. WARN_ON(atomic_read(&counter->mmap_count));
  1540. rcu_assign_pointer(counter->data, NULL);
  1541. call_rcu(&data->rcu_head, __perf_mmap_data_free);
  1542. }
  1543. static void perf_mmap_open(struct vm_area_struct *vma)
  1544. {
  1545. struct perf_counter *counter = vma->vm_file->private_data;
  1546. atomic_inc(&counter->mmap_count);
  1547. }
  1548. static void perf_mmap_close(struct vm_area_struct *vma)
  1549. {
  1550. struct perf_counter *counter = vma->vm_file->private_data;
  1551. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1552. if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
  1553. struct user_struct *user = current_user();
  1554. atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
  1555. vma->vm_mm->locked_vm -= counter->data->nr_locked;
  1556. perf_mmap_data_free(counter);
  1557. mutex_unlock(&counter->mmap_mutex);
  1558. }
  1559. }
  1560. static struct vm_operations_struct perf_mmap_vmops = {
  1561. .open = perf_mmap_open,
  1562. .close = perf_mmap_close,
  1563. .fault = perf_mmap_fault,
  1564. };
  1565. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  1566. {
  1567. struct perf_counter *counter = file->private_data;
  1568. unsigned long user_locked, user_lock_limit;
  1569. struct user_struct *user = current_user();
  1570. unsigned long locked, lock_limit;
  1571. unsigned long vma_size;
  1572. unsigned long nr_pages;
  1573. long user_extra, extra;
  1574. int ret = 0;
  1575. if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
  1576. return -EINVAL;
  1577. vma_size = vma->vm_end - vma->vm_start;
  1578. nr_pages = (vma_size / PAGE_SIZE) - 1;
  1579. /*
  1580. * If we have data pages ensure they're a power-of-two number, so we
  1581. * can do bitmasks instead of modulo.
  1582. */
  1583. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  1584. return -EINVAL;
  1585. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  1586. return -EINVAL;
  1587. if (vma->vm_pgoff != 0)
  1588. return -EINVAL;
  1589. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1590. mutex_lock(&counter->mmap_mutex);
  1591. if (atomic_inc_not_zero(&counter->mmap_count)) {
  1592. if (nr_pages != counter->data->nr_pages)
  1593. ret = -EINVAL;
  1594. goto unlock;
  1595. }
  1596. user_extra = nr_pages + 1;
  1597. user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
  1598. /*
  1599. * Increase the limit linearly with more CPUs:
  1600. */
  1601. user_lock_limit *= num_online_cpus();
  1602. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  1603. extra = 0;
  1604. if (user_locked > user_lock_limit)
  1605. extra = user_locked - user_lock_limit;
  1606. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  1607. lock_limit >>= PAGE_SHIFT;
  1608. locked = vma->vm_mm->locked_vm + extra;
  1609. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  1610. ret = -EPERM;
  1611. goto unlock;
  1612. }
  1613. WARN_ON(counter->data);
  1614. ret = perf_mmap_data_alloc(counter, nr_pages);
  1615. if (ret)
  1616. goto unlock;
  1617. atomic_set(&counter->mmap_count, 1);
  1618. atomic_long_add(user_extra, &user->locked_vm);
  1619. vma->vm_mm->locked_vm += extra;
  1620. counter->data->nr_locked = extra;
  1621. unlock:
  1622. mutex_unlock(&counter->mmap_mutex);
  1623. vma->vm_flags &= ~VM_MAYWRITE;
  1624. vma->vm_flags |= VM_RESERVED;
  1625. vma->vm_ops = &perf_mmap_vmops;
  1626. return ret;
  1627. }
  1628. static int perf_fasync(int fd, struct file *filp, int on)
  1629. {
  1630. struct inode *inode = filp->f_path.dentry->d_inode;
  1631. struct perf_counter *counter = filp->private_data;
  1632. int retval;
  1633. mutex_lock(&inode->i_mutex);
  1634. retval = fasync_helper(fd, filp, on, &counter->fasync);
  1635. mutex_unlock(&inode->i_mutex);
  1636. if (retval < 0)
  1637. return retval;
  1638. return 0;
  1639. }
  1640. static const struct file_operations perf_fops = {
  1641. .release = perf_release,
  1642. .read = perf_read,
  1643. .poll = perf_poll,
  1644. .unlocked_ioctl = perf_ioctl,
  1645. .compat_ioctl = perf_ioctl,
  1646. .mmap = perf_mmap,
  1647. .fasync = perf_fasync,
  1648. };
  1649. /*
  1650. * Perf counter wakeup
  1651. *
  1652. * If there's data, ensure we set the poll() state and publish everything
  1653. * to user-space before waking everybody up.
  1654. */
  1655. void perf_counter_wakeup(struct perf_counter *counter)
  1656. {
  1657. wake_up_all(&counter->waitq);
  1658. if (counter->pending_kill) {
  1659. kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
  1660. counter->pending_kill = 0;
  1661. }
  1662. }
  1663. /*
  1664. * Pending wakeups
  1665. *
  1666. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  1667. *
  1668. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  1669. * single linked list and use cmpxchg() to add entries lockless.
  1670. */
  1671. static void perf_pending_counter(struct perf_pending_entry *entry)
  1672. {
  1673. struct perf_counter *counter = container_of(entry,
  1674. struct perf_counter, pending);
  1675. if (counter->pending_disable) {
  1676. counter->pending_disable = 0;
  1677. perf_counter_disable(counter);
  1678. }
  1679. if (counter->pending_wakeup) {
  1680. counter->pending_wakeup = 0;
  1681. perf_counter_wakeup(counter);
  1682. }
  1683. }
  1684. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  1685. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  1686. PENDING_TAIL,
  1687. };
  1688. static void perf_pending_queue(struct perf_pending_entry *entry,
  1689. void (*func)(struct perf_pending_entry *))
  1690. {
  1691. struct perf_pending_entry **head;
  1692. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  1693. return;
  1694. entry->func = func;
  1695. head = &get_cpu_var(perf_pending_head);
  1696. do {
  1697. entry->next = *head;
  1698. } while (cmpxchg(head, entry->next, entry) != entry->next);
  1699. set_perf_counter_pending();
  1700. put_cpu_var(perf_pending_head);
  1701. }
  1702. static int __perf_pending_run(void)
  1703. {
  1704. struct perf_pending_entry *list;
  1705. int nr = 0;
  1706. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  1707. while (list != PENDING_TAIL) {
  1708. void (*func)(struct perf_pending_entry *);
  1709. struct perf_pending_entry *entry = list;
  1710. list = list->next;
  1711. func = entry->func;
  1712. entry->next = NULL;
  1713. /*
  1714. * Ensure we observe the unqueue before we issue the wakeup,
  1715. * so that we won't be waiting forever.
  1716. * -- see perf_not_pending().
  1717. */
  1718. smp_wmb();
  1719. func(entry);
  1720. nr++;
  1721. }
  1722. return nr;
  1723. }
  1724. static inline int perf_not_pending(struct perf_counter *counter)
  1725. {
  1726. /*
  1727. * If we flush on whatever cpu we run, there is a chance we don't
  1728. * need to wait.
  1729. */
  1730. get_cpu();
  1731. __perf_pending_run();
  1732. put_cpu();
  1733. /*
  1734. * Ensure we see the proper queue state before going to sleep
  1735. * so that we do not miss the wakeup. -- see perf_pending_handle()
  1736. */
  1737. smp_rmb();
  1738. return counter->pending.next == NULL;
  1739. }
  1740. static void perf_pending_sync(struct perf_counter *counter)
  1741. {
  1742. wait_event(counter->waitq, perf_not_pending(counter));
  1743. }
  1744. void perf_counter_do_pending(void)
  1745. {
  1746. __perf_pending_run();
  1747. }
  1748. /*
  1749. * Callchain support -- arch specific
  1750. */
  1751. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1752. {
  1753. return NULL;
  1754. }
  1755. /*
  1756. * Output
  1757. */
  1758. struct perf_output_handle {
  1759. struct perf_counter *counter;
  1760. struct perf_mmap_data *data;
  1761. unsigned long head;
  1762. unsigned long offset;
  1763. int nmi;
  1764. int overflow;
  1765. int locked;
  1766. unsigned long flags;
  1767. };
  1768. static void perf_output_wakeup(struct perf_output_handle *handle)
  1769. {
  1770. atomic_set(&handle->data->poll, POLL_IN);
  1771. if (handle->nmi) {
  1772. handle->counter->pending_wakeup = 1;
  1773. perf_pending_queue(&handle->counter->pending,
  1774. perf_pending_counter);
  1775. } else
  1776. perf_counter_wakeup(handle->counter);
  1777. }
  1778. /*
  1779. * Curious locking construct.
  1780. *
  1781. * We need to ensure a later event doesn't publish a head when a former
  1782. * event isn't done writing. However since we need to deal with NMIs we
  1783. * cannot fully serialize things.
  1784. *
  1785. * What we do is serialize between CPUs so we only have to deal with NMI
  1786. * nesting on a single CPU.
  1787. *
  1788. * We only publish the head (and generate a wakeup) when the outer-most
  1789. * event completes.
  1790. */
  1791. static void perf_output_lock(struct perf_output_handle *handle)
  1792. {
  1793. struct perf_mmap_data *data = handle->data;
  1794. int cpu;
  1795. handle->locked = 0;
  1796. local_irq_save(handle->flags);
  1797. cpu = smp_processor_id();
  1798. if (in_nmi() && atomic_read(&data->lock) == cpu)
  1799. return;
  1800. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1801. cpu_relax();
  1802. handle->locked = 1;
  1803. }
  1804. static void perf_output_unlock(struct perf_output_handle *handle)
  1805. {
  1806. struct perf_mmap_data *data = handle->data;
  1807. unsigned long head;
  1808. int cpu;
  1809. data->done_head = data->head;
  1810. if (!handle->locked)
  1811. goto out;
  1812. again:
  1813. /*
  1814. * The xchg implies a full barrier that ensures all writes are done
  1815. * before we publish the new head, matched by a rmb() in userspace when
  1816. * reading this position.
  1817. */
  1818. while ((head = atomic_long_xchg(&data->done_head, 0)))
  1819. data->user_page->data_head = head;
  1820. /*
  1821. * NMI can happen here, which means we can miss a done_head update.
  1822. */
  1823. cpu = atomic_xchg(&data->lock, -1);
  1824. WARN_ON_ONCE(cpu != smp_processor_id());
  1825. /*
  1826. * Therefore we have to validate we did not indeed do so.
  1827. */
  1828. if (unlikely(atomic_long_read(&data->done_head))) {
  1829. /*
  1830. * Since we had it locked, we can lock it again.
  1831. */
  1832. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1833. cpu_relax();
  1834. goto again;
  1835. }
  1836. if (atomic_xchg(&data->wakeup, 0))
  1837. perf_output_wakeup(handle);
  1838. out:
  1839. local_irq_restore(handle->flags);
  1840. }
  1841. static int perf_output_begin(struct perf_output_handle *handle,
  1842. struct perf_counter *counter, unsigned int size,
  1843. int nmi, int overflow)
  1844. {
  1845. struct perf_mmap_data *data;
  1846. unsigned int offset, head;
  1847. /*
  1848. * For inherited counters we send all the output towards the parent.
  1849. */
  1850. if (counter->parent)
  1851. counter = counter->parent;
  1852. rcu_read_lock();
  1853. data = rcu_dereference(counter->data);
  1854. if (!data)
  1855. goto out;
  1856. handle->data = data;
  1857. handle->counter = counter;
  1858. handle->nmi = nmi;
  1859. handle->overflow = overflow;
  1860. if (!data->nr_pages)
  1861. goto fail;
  1862. perf_output_lock(handle);
  1863. do {
  1864. offset = head = atomic_long_read(&data->head);
  1865. head += size;
  1866. } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
  1867. handle->offset = offset;
  1868. handle->head = head;
  1869. if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
  1870. atomic_set(&data->wakeup, 1);
  1871. return 0;
  1872. fail:
  1873. perf_output_wakeup(handle);
  1874. out:
  1875. rcu_read_unlock();
  1876. return -ENOSPC;
  1877. }
  1878. static void perf_output_copy(struct perf_output_handle *handle,
  1879. const void *buf, unsigned int len)
  1880. {
  1881. unsigned int pages_mask;
  1882. unsigned int offset;
  1883. unsigned int size;
  1884. void **pages;
  1885. offset = handle->offset;
  1886. pages_mask = handle->data->nr_pages - 1;
  1887. pages = handle->data->data_pages;
  1888. do {
  1889. unsigned int page_offset;
  1890. int nr;
  1891. nr = (offset >> PAGE_SHIFT) & pages_mask;
  1892. page_offset = offset & (PAGE_SIZE - 1);
  1893. size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
  1894. memcpy(pages[nr] + page_offset, buf, size);
  1895. len -= size;
  1896. buf += size;
  1897. offset += size;
  1898. } while (len);
  1899. handle->offset = offset;
  1900. /*
  1901. * Check we didn't copy past our reservation window, taking the
  1902. * possible unsigned int wrap into account.
  1903. */
  1904. WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
  1905. }
  1906. #define perf_output_put(handle, x) \
  1907. perf_output_copy((handle), &(x), sizeof(x))
  1908. static void perf_output_end(struct perf_output_handle *handle)
  1909. {
  1910. struct perf_counter *counter = handle->counter;
  1911. struct perf_mmap_data *data = handle->data;
  1912. int wakeup_events = counter->attr.wakeup_events;
  1913. if (handle->overflow && wakeup_events) {
  1914. int events = atomic_inc_return(&data->events);
  1915. if (events >= wakeup_events) {
  1916. atomic_sub(wakeup_events, &data->events);
  1917. atomic_set(&data->wakeup, 1);
  1918. }
  1919. }
  1920. perf_output_unlock(handle);
  1921. rcu_read_unlock();
  1922. }
  1923. static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
  1924. {
  1925. /*
  1926. * only top level counters have the pid namespace they were created in
  1927. */
  1928. if (counter->parent)
  1929. counter = counter->parent;
  1930. return task_tgid_nr_ns(p, counter->ns);
  1931. }
  1932. static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
  1933. {
  1934. /*
  1935. * only top level counters have the pid namespace they were created in
  1936. */
  1937. if (counter->parent)
  1938. counter = counter->parent;
  1939. return task_pid_nr_ns(p, counter->ns);
  1940. }
  1941. static void perf_counter_output(struct perf_counter *counter,
  1942. int nmi, struct pt_regs *regs, u64 addr)
  1943. {
  1944. int ret;
  1945. u64 sample_type = counter->attr.sample_type;
  1946. struct perf_output_handle handle;
  1947. struct perf_event_header header;
  1948. u64 ip;
  1949. struct {
  1950. u32 pid, tid;
  1951. } tid_entry;
  1952. struct {
  1953. u64 id;
  1954. u64 counter;
  1955. } group_entry;
  1956. struct perf_callchain_entry *callchain = NULL;
  1957. int callchain_size = 0;
  1958. u64 time;
  1959. struct {
  1960. u32 cpu, reserved;
  1961. } cpu_entry;
  1962. header.type = 0;
  1963. header.size = sizeof(header);
  1964. header.misc = PERF_EVENT_MISC_OVERFLOW;
  1965. header.misc |= perf_misc_flags(regs);
  1966. if (sample_type & PERF_SAMPLE_IP) {
  1967. ip = perf_instruction_pointer(regs);
  1968. header.type |= PERF_SAMPLE_IP;
  1969. header.size += sizeof(ip);
  1970. }
  1971. if (sample_type & PERF_SAMPLE_TID) {
  1972. /* namespace issues */
  1973. tid_entry.pid = perf_counter_pid(counter, current);
  1974. tid_entry.tid = perf_counter_tid(counter, current);
  1975. header.type |= PERF_SAMPLE_TID;
  1976. header.size += sizeof(tid_entry);
  1977. }
  1978. if (sample_type & PERF_SAMPLE_TIME) {
  1979. /*
  1980. * Maybe do better on x86 and provide cpu_clock_nmi()
  1981. */
  1982. time = sched_clock();
  1983. header.type |= PERF_SAMPLE_TIME;
  1984. header.size += sizeof(u64);
  1985. }
  1986. if (sample_type & PERF_SAMPLE_ADDR) {
  1987. header.type |= PERF_SAMPLE_ADDR;
  1988. header.size += sizeof(u64);
  1989. }
  1990. if (sample_type & PERF_SAMPLE_ID) {
  1991. header.type |= PERF_SAMPLE_ID;
  1992. header.size += sizeof(u64);
  1993. }
  1994. if (sample_type & PERF_SAMPLE_CPU) {
  1995. header.type |= PERF_SAMPLE_CPU;
  1996. header.size += sizeof(cpu_entry);
  1997. cpu_entry.cpu = raw_smp_processor_id();
  1998. }
  1999. if (sample_type & PERF_SAMPLE_PERIOD) {
  2000. header.type |= PERF_SAMPLE_PERIOD;
  2001. header.size += sizeof(u64);
  2002. }
  2003. if (sample_type & PERF_SAMPLE_GROUP) {
  2004. header.type |= PERF_SAMPLE_GROUP;
  2005. header.size += sizeof(u64) +
  2006. counter->nr_siblings * sizeof(group_entry);
  2007. }
  2008. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2009. callchain = perf_callchain(regs);
  2010. if (callchain) {
  2011. callchain_size = (1 + callchain->nr) * sizeof(u64);
  2012. header.type |= PERF_SAMPLE_CALLCHAIN;
  2013. header.size += callchain_size;
  2014. }
  2015. }
  2016. ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
  2017. if (ret)
  2018. return;
  2019. perf_output_put(&handle, header);
  2020. if (sample_type & PERF_SAMPLE_IP)
  2021. perf_output_put(&handle, ip);
  2022. if (sample_type & PERF_SAMPLE_TID)
  2023. perf_output_put(&handle, tid_entry);
  2024. if (sample_type & PERF_SAMPLE_TIME)
  2025. perf_output_put(&handle, time);
  2026. if (sample_type & PERF_SAMPLE_ADDR)
  2027. perf_output_put(&handle, addr);
  2028. if (sample_type & PERF_SAMPLE_ID)
  2029. perf_output_put(&handle, counter->id);
  2030. if (sample_type & PERF_SAMPLE_CPU)
  2031. perf_output_put(&handle, cpu_entry);
  2032. if (sample_type & PERF_SAMPLE_PERIOD)
  2033. perf_output_put(&handle, counter->hw.sample_period);
  2034. /*
  2035. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
  2036. */
  2037. if (sample_type & PERF_SAMPLE_GROUP) {
  2038. struct perf_counter *leader, *sub;
  2039. u64 nr = counter->nr_siblings;
  2040. perf_output_put(&handle, nr);
  2041. leader = counter->group_leader;
  2042. list_for_each_entry(sub, &leader->sibling_list, list_entry) {
  2043. if (sub != counter)
  2044. sub->pmu->read(sub);
  2045. group_entry.id = sub->id;
  2046. group_entry.counter = atomic64_read(&sub->count);
  2047. perf_output_put(&handle, group_entry);
  2048. }
  2049. }
  2050. if (callchain)
  2051. perf_output_copy(&handle, callchain, callchain_size);
  2052. perf_output_end(&handle);
  2053. }
  2054. /*
  2055. * fork tracking
  2056. */
  2057. struct perf_fork_event {
  2058. struct task_struct *task;
  2059. struct {
  2060. struct perf_event_header header;
  2061. u32 pid;
  2062. u32 ppid;
  2063. } event;
  2064. };
  2065. static void perf_counter_fork_output(struct perf_counter *counter,
  2066. struct perf_fork_event *fork_event)
  2067. {
  2068. struct perf_output_handle handle;
  2069. int size = fork_event->event.header.size;
  2070. struct task_struct *task = fork_event->task;
  2071. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2072. if (ret)
  2073. return;
  2074. fork_event->event.pid = perf_counter_pid(counter, task);
  2075. fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
  2076. perf_output_put(&handle, fork_event->event);
  2077. perf_output_end(&handle);
  2078. }
  2079. static int perf_counter_fork_match(struct perf_counter *counter)
  2080. {
  2081. if (counter->attr.comm || counter->attr.mmap)
  2082. return 1;
  2083. return 0;
  2084. }
  2085. static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
  2086. struct perf_fork_event *fork_event)
  2087. {
  2088. struct perf_counter *counter;
  2089. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2090. return;
  2091. rcu_read_lock();
  2092. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2093. if (perf_counter_fork_match(counter))
  2094. perf_counter_fork_output(counter, fork_event);
  2095. }
  2096. rcu_read_unlock();
  2097. }
  2098. static void perf_counter_fork_event(struct perf_fork_event *fork_event)
  2099. {
  2100. struct perf_cpu_context *cpuctx;
  2101. struct perf_counter_context *ctx;
  2102. cpuctx = &get_cpu_var(perf_cpu_context);
  2103. perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
  2104. put_cpu_var(perf_cpu_context);
  2105. rcu_read_lock();
  2106. /*
  2107. * doesn't really matter which of the child contexts the
  2108. * events ends up in.
  2109. */
  2110. ctx = rcu_dereference(current->perf_counter_ctxp);
  2111. if (ctx)
  2112. perf_counter_fork_ctx(ctx, fork_event);
  2113. rcu_read_unlock();
  2114. }
  2115. void perf_counter_fork(struct task_struct *task)
  2116. {
  2117. struct perf_fork_event fork_event;
  2118. if (!atomic_read(&nr_comm_counters) &&
  2119. !atomic_read(&nr_mmap_counters))
  2120. return;
  2121. fork_event = (struct perf_fork_event){
  2122. .task = task,
  2123. .event = {
  2124. .header = {
  2125. .type = PERF_EVENT_FORK,
  2126. .size = sizeof(fork_event.event),
  2127. },
  2128. },
  2129. };
  2130. perf_counter_fork_event(&fork_event);
  2131. }
  2132. /*
  2133. * comm tracking
  2134. */
  2135. struct perf_comm_event {
  2136. struct task_struct *task;
  2137. char *comm;
  2138. int comm_size;
  2139. struct {
  2140. struct perf_event_header header;
  2141. u32 pid;
  2142. u32 tid;
  2143. } event;
  2144. };
  2145. static void perf_counter_comm_output(struct perf_counter *counter,
  2146. struct perf_comm_event *comm_event)
  2147. {
  2148. struct perf_output_handle handle;
  2149. int size = comm_event->event.header.size;
  2150. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2151. if (ret)
  2152. return;
  2153. comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
  2154. comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
  2155. perf_output_put(&handle, comm_event->event);
  2156. perf_output_copy(&handle, comm_event->comm,
  2157. comm_event->comm_size);
  2158. perf_output_end(&handle);
  2159. }
  2160. static int perf_counter_comm_match(struct perf_counter *counter)
  2161. {
  2162. if (counter->attr.comm)
  2163. return 1;
  2164. return 0;
  2165. }
  2166. static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
  2167. struct perf_comm_event *comm_event)
  2168. {
  2169. struct perf_counter *counter;
  2170. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2171. return;
  2172. rcu_read_lock();
  2173. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2174. if (perf_counter_comm_match(counter))
  2175. perf_counter_comm_output(counter, comm_event);
  2176. }
  2177. rcu_read_unlock();
  2178. }
  2179. static void perf_counter_comm_event(struct perf_comm_event *comm_event)
  2180. {
  2181. struct perf_cpu_context *cpuctx;
  2182. struct perf_counter_context *ctx;
  2183. unsigned int size;
  2184. char *comm = comm_event->task->comm;
  2185. size = ALIGN(strlen(comm)+1, sizeof(u64));
  2186. comm_event->comm = comm;
  2187. comm_event->comm_size = size;
  2188. comm_event->event.header.size = sizeof(comm_event->event) + size;
  2189. cpuctx = &get_cpu_var(perf_cpu_context);
  2190. perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
  2191. put_cpu_var(perf_cpu_context);
  2192. rcu_read_lock();
  2193. /*
  2194. * doesn't really matter which of the child contexts the
  2195. * events ends up in.
  2196. */
  2197. ctx = rcu_dereference(current->perf_counter_ctxp);
  2198. if (ctx)
  2199. perf_counter_comm_ctx(ctx, comm_event);
  2200. rcu_read_unlock();
  2201. }
  2202. void perf_counter_comm(struct task_struct *task)
  2203. {
  2204. struct perf_comm_event comm_event;
  2205. if (!atomic_read(&nr_comm_counters))
  2206. return;
  2207. comm_event = (struct perf_comm_event){
  2208. .task = task,
  2209. .event = {
  2210. .header = { .type = PERF_EVENT_COMM, },
  2211. },
  2212. };
  2213. perf_counter_comm_event(&comm_event);
  2214. }
  2215. /*
  2216. * mmap tracking
  2217. */
  2218. struct perf_mmap_event {
  2219. struct vm_area_struct *vma;
  2220. const char *file_name;
  2221. int file_size;
  2222. struct {
  2223. struct perf_event_header header;
  2224. u32 pid;
  2225. u32 tid;
  2226. u64 start;
  2227. u64 len;
  2228. u64 pgoff;
  2229. } event;
  2230. };
  2231. static void perf_counter_mmap_output(struct perf_counter *counter,
  2232. struct perf_mmap_event *mmap_event)
  2233. {
  2234. struct perf_output_handle handle;
  2235. int size = mmap_event->event.header.size;
  2236. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2237. if (ret)
  2238. return;
  2239. mmap_event->event.pid = perf_counter_pid(counter, current);
  2240. mmap_event->event.tid = perf_counter_tid(counter, current);
  2241. perf_output_put(&handle, mmap_event->event);
  2242. perf_output_copy(&handle, mmap_event->file_name,
  2243. mmap_event->file_size);
  2244. perf_output_end(&handle);
  2245. }
  2246. static int perf_counter_mmap_match(struct perf_counter *counter,
  2247. struct perf_mmap_event *mmap_event)
  2248. {
  2249. if (counter->attr.mmap)
  2250. return 1;
  2251. return 0;
  2252. }
  2253. static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
  2254. struct perf_mmap_event *mmap_event)
  2255. {
  2256. struct perf_counter *counter;
  2257. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2258. return;
  2259. rcu_read_lock();
  2260. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2261. if (perf_counter_mmap_match(counter, mmap_event))
  2262. perf_counter_mmap_output(counter, mmap_event);
  2263. }
  2264. rcu_read_unlock();
  2265. }
  2266. static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
  2267. {
  2268. struct perf_cpu_context *cpuctx;
  2269. struct perf_counter_context *ctx;
  2270. struct vm_area_struct *vma = mmap_event->vma;
  2271. struct file *file = vma->vm_file;
  2272. unsigned int size;
  2273. char tmp[16];
  2274. char *buf = NULL;
  2275. const char *name;
  2276. if (file) {
  2277. buf = kzalloc(PATH_MAX, GFP_KERNEL);
  2278. if (!buf) {
  2279. name = strncpy(tmp, "//enomem", sizeof(tmp));
  2280. goto got_name;
  2281. }
  2282. name = d_path(&file->f_path, buf, PATH_MAX);
  2283. if (IS_ERR(name)) {
  2284. name = strncpy(tmp, "//toolong", sizeof(tmp));
  2285. goto got_name;
  2286. }
  2287. } else {
  2288. name = arch_vma_name(mmap_event->vma);
  2289. if (name)
  2290. goto got_name;
  2291. if (!vma->vm_mm) {
  2292. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  2293. goto got_name;
  2294. }
  2295. name = strncpy(tmp, "//anon", sizeof(tmp));
  2296. goto got_name;
  2297. }
  2298. got_name:
  2299. size = ALIGN(strlen(name)+1, sizeof(u64));
  2300. mmap_event->file_name = name;
  2301. mmap_event->file_size = size;
  2302. mmap_event->event.header.size = sizeof(mmap_event->event) + size;
  2303. cpuctx = &get_cpu_var(perf_cpu_context);
  2304. perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
  2305. put_cpu_var(perf_cpu_context);
  2306. rcu_read_lock();
  2307. /*
  2308. * doesn't really matter which of the child contexts the
  2309. * events ends up in.
  2310. */
  2311. ctx = rcu_dereference(current->perf_counter_ctxp);
  2312. if (ctx)
  2313. perf_counter_mmap_ctx(ctx, mmap_event);
  2314. rcu_read_unlock();
  2315. kfree(buf);
  2316. }
  2317. void __perf_counter_mmap(struct vm_area_struct *vma)
  2318. {
  2319. struct perf_mmap_event mmap_event;
  2320. if (!atomic_read(&nr_mmap_counters))
  2321. return;
  2322. mmap_event = (struct perf_mmap_event){
  2323. .vma = vma,
  2324. .event = {
  2325. .header = { .type = PERF_EVENT_MMAP, },
  2326. .start = vma->vm_start,
  2327. .len = vma->vm_end - vma->vm_start,
  2328. .pgoff = vma->vm_pgoff,
  2329. },
  2330. };
  2331. perf_counter_mmap_event(&mmap_event);
  2332. }
  2333. /*
  2334. * Log sample_period changes so that analyzing tools can re-normalize the
  2335. * event flow.
  2336. */
  2337. static void perf_log_period(struct perf_counter *counter, u64 period)
  2338. {
  2339. struct perf_output_handle handle;
  2340. int ret;
  2341. struct {
  2342. struct perf_event_header header;
  2343. u64 time;
  2344. u64 id;
  2345. u64 period;
  2346. } freq_event = {
  2347. .header = {
  2348. .type = PERF_EVENT_PERIOD,
  2349. .misc = 0,
  2350. .size = sizeof(freq_event),
  2351. },
  2352. .time = sched_clock(),
  2353. .id = counter->id,
  2354. .period = period,
  2355. };
  2356. if (counter->hw.sample_period == period)
  2357. return;
  2358. ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
  2359. if (ret)
  2360. return;
  2361. perf_output_put(&handle, freq_event);
  2362. perf_output_end(&handle);
  2363. }
  2364. /*
  2365. * IRQ throttle logging
  2366. */
  2367. static void perf_log_throttle(struct perf_counter *counter, int enable)
  2368. {
  2369. struct perf_output_handle handle;
  2370. int ret;
  2371. struct {
  2372. struct perf_event_header header;
  2373. u64 time;
  2374. } throttle_event = {
  2375. .header = {
  2376. .type = PERF_EVENT_THROTTLE + 1,
  2377. .misc = 0,
  2378. .size = sizeof(throttle_event),
  2379. },
  2380. .time = sched_clock(),
  2381. };
  2382. ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
  2383. if (ret)
  2384. return;
  2385. perf_output_put(&handle, throttle_event);
  2386. perf_output_end(&handle);
  2387. }
  2388. /*
  2389. * Generic counter overflow handling.
  2390. */
  2391. int perf_counter_overflow(struct perf_counter *counter,
  2392. int nmi, struct pt_regs *regs, u64 addr)
  2393. {
  2394. int events = atomic_read(&counter->event_limit);
  2395. int throttle = counter->pmu->unthrottle != NULL;
  2396. int ret = 0;
  2397. if (!throttle) {
  2398. counter->hw.interrupts++;
  2399. } else {
  2400. if (counter->hw.interrupts != MAX_INTERRUPTS) {
  2401. counter->hw.interrupts++;
  2402. if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
  2403. counter->hw.interrupts = MAX_INTERRUPTS;
  2404. perf_log_throttle(counter, 0);
  2405. ret = 1;
  2406. }
  2407. } else {
  2408. /*
  2409. * Keep re-disabling counters even though on the previous
  2410. * pass we disabled it - just in case we raced with a
  2411. * sched-in and the counter got enabled again:
  2412. */
  2413. ret = 1;
  2414. }
  2415. }
  2416. /*
  2417. * XXX event_limit might not quite work as expected on inherited
  2418. * counters
  2419. */
  2420. counter->pending_kill = POLL_IN;
  2421. if (events && atomic_dec_and_test(&counter->event_limit)) {
  2422. ret = 1;
  2423. counter->pending_kill = POLL_HUP;
  2424. if (nmi) {
  2425. counter->pending_disable = 1;
  2426. perf_pending_queue(&counter->pending,
  2427. perf_pending_counter);
  2428. } else
  2429. perf_counter_disable(counter);
  2430. }
  2431. perf_counter_output(counter, nmi, regs, addr);
  2432. return ret;
  2433. }
  2434. /*
  2435. * Generic software counter infrastructure
  2436. */
  2437. static void perf_swcounter_update(struct perf_counter *counter)
  2438. {
  2439. struct hw_perf_counter *hwc = &counter->hw;
  2440. u64 prev, now;
  2441. s64 delta;
  2442. again:
  2443. prev = atomic64_read(&hwc->prev_count);
  2444. now = atomic64_read(&hwc->count);
  2445. if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
  2446. goto again;
  2447. delta = now - prev;
  2448. atomic64_add(delta, &counter->count);
  2449. atomic64_sub(delta, &hwc->period_left);
  2450. }
  2451. static void perf_swcounter_set_period(struct perf_counter *counter)
  2452. {
  2453. struct hw_perf_counter *hwc = &counter->hw;
  2454. s64 left = atomic64_read(&hwc->period_left);
  2455. s64 period = hwc->sample_period;
  2456. if (unlikely(left <= -period)) {
  2457. left = period;
  2458. atomic64_set(&hwc->period_left, left);
  2459. }
  2460. if (unlikely(left <= 0)) {
  2461. left += period;
  2462. atomic64_add(period, &hwc->period_left);
  2463. }
  2464. atomic64_set(&hwc->prev_count, -left);
  2465. atomic64_set(&hwc->count, -left);
  2466. }
  2467. static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
  2468. {
  2469. enum hrtimer_restart ret = HRTIMER_RESTART;
  2470. struct perf_counter *counter;
  2471. struct pt_regs *regs;
  2472. u64 period;
  2473. counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
  2474. counter->pmu->read(counter);
  2475. regs = get_irq_regs();
  2476. /*
  2477. * In case we exclude kernel IPs or are somehow not in interrupt
  2478. * context, provide the next best thing, the user IP.
  2479. */
  2480. if ((counter->attr.exclude_kernel || !regs) &&
  2481. !counter->attr.exclude_user)
  2482. regs = task_pt_regs(current);
  2483. if (regs) {
  2484. if (perf_counter_overflow(counter, 0, regs, 0))
  2485. ret = HRTIMER_NORESTART;
  2486. }
  2487. period = max_t(u64, 10000, counter->hw.sample_period);
  2488. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  2489. return ret;
  2490. }
  2491. static void perf_swcounter_overflow(struct perf_counter *counter,
  2492. int nmi, struct pt_regs *regs, u64 addr)
  2493. {
  2494. perf_swcounter_update(counter);
  2495. perf_swcounter_set_period(counter);
  2496. if (perf_counter_overflow(counter, nmi, regs, addr))
  2497. /* soft-disable the counter */
  2498. ;
  2499. }
  2500. static int perf_swcounter_is_counting(struct perf_counter *counter)
  2501. {
  2502. struct perf_counter_context *ctx;
  2503. unsigned long flags;
  2504. int count;
  2505. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  2506. return 1;
  2507. if (counter->state != PERF_COUNTER_STATE_INACTIVE)
  2508. return 0;
  2509. /*
  2510. * If the counter is inactive, it could be just because
  2511. * its task is scheduled out, or because it's in a group
  2512. * which could not go on the PMU. We want to count in
  2513. * the first case but not the second. If the context is
  2514. * currently active then an inactive software counter must
  2515. * be the second case. If it's not currently active then
  2516. * we need to know whether the counter was active when the
  2517. * context was last active, which we can determine by
  2518. * comparing counter->tstamp_stopped with ctx->time.
  2519. *
  2520. * We are within an RCU read-side critical section,
  2521. * which protects the existence of *ctx.
  2522. */
  2523. ctx = counter->ctx;
  2524. spin_lock_irqsave(&ctx->lock, flags);
  2525. count = 1;
  2526. /* Re-check state now we have the lock */
  2527. if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
  2528. counter->ctx->is_active ||
  2529. counter->tstamp_stopped < ctx->time)
  2530. count = 0;
  2531. spin_unlock_irqrestore(&ctx->lock, flags);
  2532. return count;
  2533. }
  2534. static int perf_swcounter_match(struct perf_counter *counter,
  2535. enum perf_event_types type,
  2536. u32 event, struct pt_regs *regs)
  2537. {
  2538. u64 event_config;
  2539. event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
  2540. if (!perf_swcounter_is_counting(counter))
  2541. return 0;
  2542. if (counter->attr.config != event_config)
  2543. return 0;
  2544. if (regs) {
  2545. if (counter->attr.exclude_user && user_mode(regs))
  2546. return 0;
  2547. if (counter->attr.exclude_kernel && !user_mode(regs))
  2548. return 0;
  2549. }
  2550. return 1;
  2551. }
  2552. static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
  2553. int nmi, struct pt_regs *regs, u64 addr)
  2554. {
  2555. int neg = atomic64_add_negative(nr, &counter->hw.count);
  2556. if (counter->hw.sample_period && !neg && regs)
  2557. perf_swcounter_overflow(counter, nmi, regs, addr);
  2558. }
  2559. static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
  2560. enum perf_event_types type, u32 event,
  2561. u64 nr, int nmi, struct pt_regs *regs,
  2562. u64 addr)
  2563. {
  2564. struct perf_counter *counter;
  2565. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2566. return;
  2567. rcu_read_lock();
  2568. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2569. if (perf_swcounter_match(counter, type, event, regs))
  2570. perf_swcounter_add(counter, nr, nmi, regs, addr);
  2571. }
  2572. rcu_read_unlock();
  2573. }
  2574. static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
  2575. {
  2576. if (in_nmi())
  2577. return &cpuctx->recursion[3];
  2578. if (in_irq())
  2579. return &cpuctx->recursion[2];
  2580. if (in_softirq())
  2581. return &cpuctx->recursion[1];
  2582. return &cpuctx->recursion[0];
  2583. }
  2584. static void __perf_swcounter_event(enum perf_event_types type, u32 event,
  2585. u64 nr, int nmi, struct pt_regs *regs,
  2586. u64 addr)
  2587. {
  2588. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  2589. int *recursion = perf_swcounter_recursion_context(cpuctx);
  2590. struct perf_counter_context *ctx;
  2591. if (*recursion)
  2592. goto out;
  2593. (*recursion)++;
  2594. barrier();
  2595. perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
  2596. nr, nmi, regs, addr);
  2597. rcu_read_lock();
  2598. /*
  2599. * doesn't really matter which of the child contexts the
  2600. * events ends up in.
  2601. */
  2602. ctx = rcu_dereference(current->perf_counter_ctxp);
  2603. if (ctx)
  2604. perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
  2605. rcu_read_unlock();
  2606. barrier();
  2607. (*recursion)--;
  2608. out:
  2609. put_cpu_var(perf_cpu_context);
  2610. }
  2611. void
  2612. perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
  2613. {
  2614. __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
  2615. }
  2616. static void perf_swcounter_read(struct perf_counter *counter)
  2617. {
  2618. perf_swcounter_update(counter);
  2619. }
  2620. static int perf_swcounter_enable(struct perf_counter *counter)
  2621. {
  2622. perf_swcounter_set_period(counter);
  2623. return 0;
  2624. }
  2625. static void perf_swcounter_disable(struct perf_counter *counter)
  2626. {
  2627. perf_swcounter_update(counter);
  2628. }
  2629. static const struct pmu perf_ops_generic = {
  2630. .enable = perf_swcounter_enable,
  2631. .disable = perf_swcounter_disable,
  2632. .read = perf_swcounter_read,
  2633. };
  2634. /*
  2635. * Software counter: cpu wall time clock
  2636. */
  2637. static void cpu_clock_perf_counter_update(struct perf_counter *counter)
  2638. {
  2639. int cpu = raw_smp_processor_id();
  2640. s64 prev;
  2641. u64 now;
  2642. now = cpu_clock(cpu);
  2643. prev = atomic64_read(&counter->hw.prev_count);
  2644. atomic64_set(&counter->hw.prev_count, now);
  2645. atomic64_add(now - prev, &counter->count);
  2646. }
  2647. static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
  2648. {
  2649. struct hw_perf_counter *hwc = &counter->hw;
  2650. int cpu = raw_smp_processor_id();
  2651. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  2652. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2653. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2654. if (hwc->sample_period) {
  2655. u64 period = max_t(u64, 10000, hwc->sample_period);
  2656. __hrtimer_start_range_ns(&hwc->hrtimer,
  2657. ns_to_ktime(period), 0,
  2658. HRTIMER_MODE_REL, 0);
  2659. }
  2660. return 0;
  2661. }
  2662. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  2663. {
  2664. if (counter->hw.sample_period)
  2665. hrtimer_cancel(&counter->hw.hrtimer);
  2666. cpu_clock_perf_counter_update(counter);
  2667. }
  2668. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  2669. {
  2670. cpu_clock_perf_counter_update(counter);
  2671. }
  2672. static const struct pmu perf_ops_cpu_clock = {
  2673. .enable = cpu_clock_perf_counter_enable,
  2674. .disable = cpu_clock_perf_counter_disable,
  2675. .read = cpu_clock_perf_counter_read,
  2676. };
  2677. /*
  2678. * Software counter: task time clock
  2679. */
  2680. static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
  2681. {
  2682. u64 prev;
  2683. s64 delta;
  2684. prev = atomic64_xchg(&counter->hw.prev_count, now);
  2685. delta = now - prev;
  2686. atomic64_add(delta, &counter->count);
  2687. }
  2688. static int task_clock_perf_counter_enable(struct perf_counter *counter)
  2689. {
  2690. struct hw_perf_counter *hwc = &counter->hw;
  2691. u64 now;
  2692. now = counter->ctx->time;
  2693. atomic64_set(&hwc->prev_count, now);
  2694. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2695. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2696. if (hwc->sample_period) {
  2697. u64 period = max_t(u64, 10000, hwc->sample_period);
  2698. __hrtimer_start_range_ns(&hwc->hrtimer,
  2699. ns_to_ktime(period), 0,
  2700. HRTIMER_MODE_REL, 0);
  2701. }
  2702. return 0;
  2703. }
  2704. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  2705. {
  2706. if (counter->hw.sample_period)
  2707. hrtimer_cancel(&counter->hw.hrtimer);
  2708. task_clock_perf_counter_update(counter, counter->ctx->time);
  2709. }
  2710. static void task_clock_perf_counter_read(struct perf_counter *counter)
  2711. {
  2712. u64 time;
  2713. if (!in_nmi()) {
  2714. update_context_time(counter->ctx);
  2715. time = counter->ctx->time;
  2716. } else {
  2717. u64 now = perf_clock();
  2718. u64 delta = now - counter->ctx->timestamp;
  2719. time = counter->ctx->time + delta;
  2720. }
  2721. task_clock_perf_counter_update(counter, time);
  2722. }
  2723. static const struct pmu perf_ops_task_clock = {
  2724. .enable = task_clock_perf_counter_enable,
  2725. .disable = task_clock_perf_counter_disable,
  2726. .read = task_clock_perf_counter_read,
  2727. };
  2728. /*
  2729. * Software counter: cpu migrations
  2730. */
  2731. void perf_counter_task_migration(struct task_struct *task, int cpu)
  2732. {
  2733. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  2734. struct perf_counter_context *ctx;
  2735. perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
  2736. PERF_COUNT_CPU_MIGRATIONS,
  2737. 1, 1, NULL, 0);
  2738. ctx = perf_pin_task_context(task);
  2739. if (ctx) {
  2740. perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
  2741. PERF_COUNT_CPU_MIGRATIONS,
  2742. 1, 1, NULL, 0);
  2743. perf_unpin_context(ctx);
  2744. }
  2745. }
  2746. #ifdef CONFIG_EVENT_PROFILE
  2747. void perf_tpcounter_event(int event_id)
  2748. {
  2749. struct pt_regs *regs = get_irq_regs();
  2750. if (!regs)
  2751. regs = task_pt_regs(current);
  2752. __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
  2753. }
  2754. EXPORT_SYMBOL_GPL(perf_tpcounter_event);
  2755. extern int ftrace_profile_enable(int);
  2756. extern void ftrace_profile_disable(int);
  2757. static void tp_perf_counter_destroy(struct perf_counter *counter)
  2758. {
  2759. ftrace_profile_disable(perf_event_id(&counter->attr));
  2760. }
  2761. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2762. {
  2763. int event_id = perf_event_id(&counter->attr);
  2764. int ret;
  2765. ret = ftrace_profile_enable(event_id);
  2766. if (ret)
  2767. return NULL;
  2768. counter->destroy = tp_perf_counter_destroy;
  2769. counter->hw.sample_period = counter->attr.sample_period;
  2770. return &perf_ops_generic;
  2771. }
  2772. #else
  2773. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2774. {
  2775. return NULL;
  2776. }
  2777. #endif
  2778. static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  2779. {
  2780. const struct pmu *pmu = NULL;
  2781. /*
  2782. * Software counters (currently) can't in general distinguish
  2783. * between user, kernel and hypervisor events.
  2784. * However, context switches and cpu migrations are considered
  2785. * to be kernel events, and page faults are never hypervisor
  2786. * events.
  2787. */
  2788. switch (perf_event_id(&counter->attr)) {
  2789. case PERF_COUNT_CPU_CLOCK:
  2790. pmu = &perf_ops_cpu_clock;
  2791. break;
  2792. case PERF_COUNT_TASK_CLOCK:
  2793. /*
  2794. * If the user instantiates this as a per-cpu counter,
  2795. * use the cpu_clock counter instead.
  2796. */
  2797. if (counter->ctx->task)
  2798. pmu = &perf_ops_task_clock;
  2799. else
  2800. pmu = &perf_ops_cpu_clock;
  2801. break;
  2802. case PERF_COUNT_PAGE_FAULTS:
  2803. case PERF_COUNT_PAGE_FAULTS_MIN:
  2804. case PERF_COUNT_PAGE_FAULTS_MAJ:
  2805. case PERF_COUNT_CONTEXT_SWITCHES:
  2806. case PERF_COUNT_CPU_MIGRATIONS:
  2807. pmu = &perf_ops_generic;
  2808. break;
  2809. }
  2810. return pmu;
  2811. }
  2812. /*
  2813. * Allocate and initialize a counter structure
  2814. */
  2815. static struct perf_counter *
  2816. perf_counter_alloc(struct perf_counter_attr *attr,
  2817. int cpu,
  2818. struct perf_counter_context *ctx,
  2819. struct perf_counter *group_leader,
  2820. gfp_t gfpflags)
  2821. {
  2822. const struct pmu *pmu;
  2823. struct perf_counter *counter;
  2824. struct hw_perf_counter *hwc;
  2825. long err;
  2826. counter = kzalloc(sizeof(*counter), gfpflags);
  2827. if (!counter)
  2828. return ERR_PTR(-ENOMEM);
  2829. /*
  2830. * Single counters are their own group leaders, with an
  2831. * empty sibling list:
  2832. */
  2833. if (!group_leader)
  2834. group_leader = counter;
  2835. mutex_init(&counter->child_mutex);
  2836. INIT_LIST_HEAD(&counter->child_list);
  2837. INIT_LIST_HEAD(&counter->list_entry);
  2838. INIT_LIST_HEAD(&counter->event_entry);
  2839. INIT_LIST_HEAD(&counter->sibling_list);
  2840. init_waitqueue_head(&counter->waitq);
  2841. mutex_init(&counter->mmap_mutex);
  2842. counter->cpu = cpu;
  2843. counter->attr = *attr;
  2844. counter->group_leader = group_leader;
  2845. counter->pmu = NULL;
  2846. counter->ctx = ctx;
  2847. counter->oncpu = -1;
  2848. counter->ns = get_pid_ns(current->nsproxy->pid_ns);
  2849. counter->id = atomic64_inc_return(&perf_counter_id);
  2850. counter->state = PERF_COUNTER_STATE_INACTIVE;
  2851. if (attr->disabled)
  2852. counter->state = PERF_COUNTER_STATE_OFF;
  2853. pmu = NULL;
  2854. hwc = &counter->hw;
  2855. if (attr->freq && attr->sample_freq)
  2856. hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
  2857. else
  2858. hwc->sample_period = attr->sample_period;
  2859. /*
  2860. * we currently do not support PERF_SAMPLE_GROUP on inherited counters
  2861. */
  2862. if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
  2863. goto done;
  2864. if (perf_event_raw(attr)) {
  2865. pmu = hw_perf_counter_init(counter);
  2866. goto done;
  2867. }
  2868. switch (perf_event_type(attr)) {
  2869. case PERF_TYPE_HARDWARE:
  2870. pmu = hw_perf_counter_init(counter);
  2871. break;
  2872. case PERF_TYPE_SOFTWARE:
  2873. pmu = sw_perf_counter_init(counter);
  2874. break;
  2875. case PERF_TYPE_TRACEPOINT:
  2876. pmu = tp_perf_counter_init(counter);
  2877. break;
  2878. }
  2879. done:
  2880. err = 0;
  2881. if (!pmu)
  2882. err = -EINVAL;
  2883. else if (IS_ERR(pmu))
  2884. err = PTR_ERR(pmu);
  2885. if (err) {
  2886. if (counter->ns)
  2887. put_pid_ns(counter->ns);
  2888. kfree(counter);
  2889. return ERR_PTR(err);
  2890. }
  2891. counter->pmu = pmu;
  2892. atomic_inc(&nr_counters);
  2893. if (counter->attr.mmap)
  2894. atomic_inc(&nr_mmap_counters);
  2895. if (counter->attr.comm)
  2896. atomic_inc(&nr_comm_counters);
  2897. return counter;
  2898. }
  2899. /**
  2900. * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  2901. *
  2902. * @attr_uptr: event type attributes for monitoring/sampling
  2903. * @pid: target pid
  2904. * @cpu: target cpu
  2905. * @group_fd: group leader counter fd
  2906. */
  2907. SYSCALL_DEFINE5(perf_counter_open,
  2908. const struct perf_counter_attr __user *, attr_uptr,
  2909. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  2910. {
  2911. struct perf_counter *counter, *group_leader;
  2912. struct perf_counter_attr attr;
  2913. struct perf_counter_context *ctx;
  2914. struct file *counter_file = NULL;
  2915. struct file *group_file = NULL;
  2916. int fput_needed = 0;
  2917. int fput_needed2 = 0;
  2918. int ret;
  2919. /* for future expandability... */
  2920. if (flags)
  2921. return -EINVAL;
  2922. if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
  2923. return -EFAULT;
  2924. /*
  2925. * Get the target context (task or percpu):
  2926. */
  2927. ctx = find_get_context(pid, cpu);
  2928. if (IS_ERR(ctx))
  2929. return PTR_ERR(ctx);
  2930. /*
  2931. * Look up the group leader (we will attach this counter to it):
  2932. */
  2933. group_leader = NULL;
  2934. if (group_fd != -1) {
  2935. ret = -EINVAL;
  2936. group_file = fget_light(group_fd, &fput_needed);
  2937. if (!group_file)
  2938. goto err_put_context;
  2939. if (group_file->f_op != &perf_fops)
  2940. goto err_put_context;
  2941. group_leader = group_file->private_data;
  2942. /*
  2943. * Do not allow a recursive hierarchy (this new sibling
  2944. * becoming part of another group-sibling):
  2945. */
  2946. if (group_leader->group_leader != group_leader)
  2947. goto err_put_context;
  2948. /*
  2949. * Do not allow to attach to a group in a different
  2950. * task or CPU context:
  2951. */
  2952. if (group_leader->ctx != ctx)
  2953. goto err_put_context;
  2954. /*
  2955. * Only a group leader can be exclusive or pinned
  2956. */
  2957. if (attr.exclusive || attr.pinned)
  2958. goto err_put_context;
  2959. }
  2960. counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
  2961. GFP_KERNEL);
  2962. ret = PTR_ERR(counter);
  2963. if (IS_ERR(counter))
  2964. goto err_put_context;
  2965. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  2966. if (ret < 0)
  2967. goto err_free_put_context;
  2968. counter_file = fget_light(ret, &fput_needed2);
  2969. if (!counter_file)
  2970. goto err_free_put_context;
  2971. counter->filp = counter_file;
  2972. WARN_ON_ONCE(ctx->parent_ctx);
  2973. mutex_lock(&ctx->mutex);
  2974. perf_install_in_context(ctx, counter, cpu);
  2975. ++ctx->generation;
  2976. mutex_unlock(&ctx->mutex);
  2977. counter->owner = current;
  2978. get_task_struct(current);
  2979. mutex_lock(&current->perf_counter_mutex);
  2980. list_add_tail(&counter->owner_entry, &current->perf_counter_list);
  2981. mutex_unlock(&current->perf_counter_mutex);
  2982. fput_light(counter_file, fput_needed2);
  2983. out_fput:
  2984. fput_light(group_file, fput_needed);
  2985. return ret;
  2986. err_free_put_context:
  2987. kfree(counter);
  2988. err_put_context:
  2989. put_ctx(ctx);
  2990. goto out_fput;
  2991. }
  2992. /*
  2993. * inherit a counter from parent task to child task:
  2994. */
  2995. static struct perf_counter *
  2996. inherit_counter(struct perf_counter *parent_counter,
  2997. struct task_struct *parent,
  2998. struct perf_counter_context *parent_ctx,
  2999. struct task_struct *child,
  3000. struct perf_counter *group_leader,
  3001. struct perf_counter_context *child_ctx)
  3002. {
  3003. struct perf_counter *child_counter;
  3004. /*
  3005. * Instead of creating recursive hierarchies of counters,
  3006. * we link inherited counters back to the original parent,
  3007. * which has a filp for sure, which we use as the reference
  3008. * count:
  3009. */
  3010. if (parent_counter->parent)
  3011. parent_counter = parent_counter->parent;
  3012. child_counter = perf_counter_alloc(&parent_counter->attr,
  3013. parent_counter->cpu, child_ctx,
  3014. group_leader, GFP_KERNEL);
  3015. if (IS_ERR(child_counter))
  3016. return child_counter;
  3017. get_ctx(child_ctx);
  3018. /*
  3019. * Make the child state follow the state of the parent counter,
  3020. * not its attr.disabled bit. We hold the parent's mutex,
  3021. * so we won't race with perf_counter_{en, dis}able_family.
  3022. */
  3023. if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
  3024. child_counter->state = PERF_COUNTER_STATE_INACTIVE;
  3025. else
  3026. child_counter->state = PERF_COUNTER_STATE_OFF;
  3027. /*
  3028. * Link it up in the child's context:
  3029. */
  3030. add_counter_to_ctx(child_counter, child_ctx);
  3031. child_counter->parent = parent_counter;
  3032. /*
  3033. * inherit into child's child as well:
  3034. */
  3035. child_counter->attr.inherit = 1;
  3036. /*
  3037. * Get a reference to the parent filp - we will fput it
  3038. * when the child counter exits. This is safe to do because
  3039. * we are in the parent and we know that the filp still
  3040. * exists and has a nonzero count:
  3041. */
  3042. atomic_long_inc(&parent_counter->filp->f_count);
  3043. /*
  3044. * Link this into the parent counter's child list
  3045. */
  3046. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3047. mutex_lock(&parent_counter->child_mutex);
  3048. list_add_tail(&child_counter->child_list, &parent_counter->child_list);
  3049. mutex_unlock(&parent_counter->child_mutex);
  3050. return child_counter;
  3051. }
  3052. static int inherit_group(struct perf_counter *parent_counter,
  3053. struct task_struct *parent,
  3054. struct perf_counter_context *parent_ctx,
  3055. struct task_struct *child,
  3056. struct perf_counter_context *child_ctx)
  3057. {
  3058. struct perf_counter *leader;
  3059. struct perf_counter *sub;
  3060. struct perf_counter *child_ctr;
  3061. leader = inherit_counter(parent_counter, parent, parent_ctx,
  3062. child, NULL, child_ctx);
  3063. if (IS_ERR(leader))
  3064. return PTR_ERR(leader);
  3065. list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
  3066. child_ctr = inherit_counter(sub, parent, parent_ctx,
  3067. child, leader, child_ctx);
  3068. if (IS_ERR(child_ctr))
  3069. return PTR_ERR(child_ctr);
  3070. }
  3071. return 0;
  3072. }
  3073. static void sync_child_counter(struct perf_counter *child_counter,
  3074. struct perf_counter *parent_counter)
  3075. {
  3076. u64 child_val;
  3077. child_val = atomic64_read(&child_counter->count);
  3078. /*
  3079. * Add back the child's count to the parent's count:
  3080. */
  3081. atomic64_add(child_val, &parent_counter->count);
  3082. atomic64_add(child_counter->total_time_enabled,
  3083. &parent_counter->child_total_time_enabled);
  3084. atomic64_add(child_counter->total_time_running,
  3085. &parent_counter->child_total_time_running);
  3086. /*
  3087. * Remove this counter from the parent's list
  3088. */
  3089. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3090. mutex_lock(&parent_counter->child_mutex);
  3091. list_del_init(&child_counter->child_list);
  3092. mutex_unlock(&parent_counter->child_mutex);
  3093. /*
  3094. * Release the parent counter, if this was the last
  3095. * reference to it.
  3096. */
  3097. fput(parent_counter->filp);
  3098. }
  3099. static void
  3100. __perf_counter_exit_task(struct perf_counter *child_counter,
  3101. struct perf_counter_context *child_ctx)
  3102. {
  3103. struct perf_counter *parent_counter;
  3104. update_counter_times(child_counter);
  3105. perf_counter_remove_from_context(child_counter);
  3106. parent_counter = child_counter->parent;
  3107. /*
  3108. * It can happen that parent exits first, and has counters
  3109. * that are still around due to the child reference. These
  3110. * counters need to be zapped - but otherwise linger.
  3111. */
  3112. if (parent_counter) {
  3113. sync_child_counter(child_counter, parent_counter);
  3114. free_counter(child_counter);
  3115. }
  3116. }
  3117. /*
  3118. * When a child task exits, feed back counter values to parent counters.
  3119. */
  3120. void perf_counter_exit_task(struct task_struct *child)
  3121. {
  3122. struct perf_counter *child_counter, *tmp;
  3123. struct perf_counter_context *child_ctx;
  3124. unsigned long flags;
  3125. if (likely(!child->perf_counter_ctxp))
  3126. return;
  3127. local_irq_save(flags);
  3128. /*
  3129. * We can't reschedule here because interrupts are disabled,
  3130. * and either child is current or it is a task that can't be
  3131. * scheduled, so we are now safe from rescheduling changing
  3132. * our context.
  3133. */
  3134. child_ctx = child->perf_counter_ctxp;
  3135. __perf_counter_task_sched_out(child_ctx);
  3136. /*
  3137. * Take the context lock here so that if find_get_context is
  3138. * reading child->perf_counter_ctxp, we wait until it has
  3139. * incremented the context's refcount before we do put_ctx below.
  3140. */
  3141. spin_lock(&child_ctx->lock);
  3142. child->perf_counter_ctxp = NULL;
  3143. if (child_ctx->parent_ctx) {
  3144. /*
  3145. * This context is a clone; unclone it so it can't get
  3146. * swapped to another process while we're removing all
  3147. * the counters from it.
  3148. */
  3149. put_ctx(child_ctx->parent_ctx);
  3150. child_ctx->parent_ctx = NULL;
  3151. }
  3152. spin_unlock(&child_ctx->lock);
  3153. local_irq_restore(flags);
  3154. mutex_lock(&child_ctx->mutex);
  3155. again:
  3156. list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
  3157. list_entry)
  3158. __perf_counter_exit_task(child_counter, child_ctx);
  3159. /*
  3160. * If the last counter was a group counter, it will have appended all
  3161. * its siblings to the list, but we obtained 'tmp' before that which
  3162. * will still point to the list head terminating the iteration.
  3163. */
  3164. if (!list_empty(&child_ctx->counter_list))
  3165. goto again;
  3166. mutex_unlock(&child_ctx->mutex);
  3167. put_ctx(child_ctx);
  3168. }
  3169. /*
  3170. * free an unexposed, unused context as created by inheritance by
  3171. * init_task below, used by fork() in case of fail.
  3172. */
  3173. void perf_counter_free_task(struct task_struct *task)
  3174. {
  3175. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  3176. struct perf_counter *counter, *tmp;
  3177. if (!ctx)
  3178. return;
  3179. mutex_lock(&ctx->mutex);
  3180. again:
  3181. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
  3182. struct perf_counter *parent = counter->parent;
  3183. if (WARN_ON_ONCE(!parent))
  3184. continue;
  3185. mutex_lock(&parent->child_mutex);
  3186. list_del_init(&counter->child_list);
  3187. mutex_unlock(&parent->child_mutex);
  3188. fput(parent->filp);
  3189. list_del_counter(counter, ctx);
  3190. free_counter(counter);
  3191. }
  3192. if (!list_empty(&ctx->counter_list))
  3193. goto again;
  3194. mutex_unlock(&ctx->mutex);
  3195. put_ctx(ctx);
  3196. }
  3197. /*
  3198. * Initialize the perf_counter context in task_struct
  3199. */
  3200. int perf_counter_init_task(struct task_struct *child)
  3201. {
  3202. struct perf_counter_context *child_ctx, *parent_ctx;
  3203. struct perf_counter_context *cloned_ctx;
  3204. struct perf_counter *counter;
  3205. struct task_struct *parent = current;
  3206. int inherited_all = 1;
  3207. int ret = 0;
  3208. child->perf_counter_ctxp = NULL;
  3209. mutex_init(&child->perf_counter_mutex);
  3210. INIT_LIST_HEAD(&child->perf_counter_list);
  3211. if (likely(!parent->perf_counter_ctxp))
  3212. return 0;
  3213. /*
  3214. * This is executed from the parent task context, so inherit
  3215. * counters that have been marked for cloning.
  3216. * First allocate and initialize a context for the child.
  3217. */
  3218. child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  3219. if (!child_ctx)
  3220. return -ENOMEM;
  3221. __perf_counter_init_context(child_ctx, child);
  3222. child->perf_counter_ctxp = child_ctx;
  3223. get_task_struct(child);
  3224. /*
  3225. * If the parent's context is a clone, pin it so it won't get
  3226. * swapped under us.
  3227. */
  3228. parent_ctx = perf_pin_task_context(parent);
  3229. /*
  3230. * No need to check if parent_ctx != NULL here; since we saw
  3231. * it non-NULL earlier, the only reason for it to become NULL
  3232. * is if we exit, and since we're currently in the middle of
  3233. * a fork we can't be exiting at the same time.
  3234. */
  3235. /*
  3236. * Lock the parent list. No need to lock the child - not PID
  3237. * hashed yet and not running, so nobody can access it.
  3238. */
  3239. mutex_lock(&parent_ctx->mutex);
  3240. /*
  3241. * We dont have to disable NMIs - we are only looking at
  3242. * the list, not manipulating it:
  3243. */
  3244. list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
  3245. if (counter != counter->group_leader)
  3246. continue;
  3247. if (!counter->attr.inherit) {
  3248. inherited_all = 0;
  3249. continue;
  3250. }
  3251. ret = inherit_group(counter, parent, parent_ctx,
  3252. child, child_ctx);
  3253. if (ret) {
  3254. inherited_all = 0;
  3255. break;
  3256. }
  3257. }
  3258. if (inherited_all) {
  3259. /*
  3260. * Mark the child context as a clone of the parent
  3261. * context, or of whatever the parent is a clone of.
  3262. * Note that if the parent is a clone, it could get
  3263. * uncloned at any point, but that doesn't matter
  3264. * because the list of counters and the generation
  3265. * count can't have changed since we took the mutex.
  3266. */
  3267. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  3268. if (cloned_ctx) {
  3269. child_ctx->parent_ctx = cloned_ctx;
  3270. child_ctx->parent_gen = parent_ctx->parent_gen;
  3271. } else {
  3272. child_ctx->parent_ctx = parent_ctx;
  3273. child_ctx->parent_gen = parent_ctx->generation;
  3274. }
  3275. get_ctx(child_ctx->parent_ctx);
  3276. }
  3277. mutex_unlock(&parent_ctx->mutex);
  3278. perf_unpin_context(parent_ctx);
  3279. return ret;
  3280. }
  3281. static void __cpuinit perf_counter_init_cpu(int cpu)
  3282. {
  3283. struct perf_cpu_context *cpuctx;
  3284. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3285. __perf_counter_init_context(&cpuctx->ctx, NULL);
  3286. spin_lock(&perf_resource_lock);
  3287. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  3288. spin_unlock(&perf_resource_lock);
  3289. hw_perf_counter_setup(cpu);
  3290. }
  3291. #ifdef CONFIG_HOTPLUG_CPU
  3292. static void __perf_counter_exit_cpu(void *info)
  3293. {
  3294. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  3295. struct perf_counter_context *ctx = &cpuctx->ctx;
  3296. struct perf_counter *counter, *tmp;
  3297. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  3298. __perf_counter_remove_from_context(counter);
  3299. }
  3300. static void perf_counter_exit_cpu(int cpu)
  3301. {
  3302. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  3303. struct perf_counter_context *ctx = &cpuctx->ctx;
  3304. mutex_lock(&ctx->mutex);
  3305. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  3306. mutex_unlock(&ctx->mutex);
  3307. }
  3308. #else
  3309. static inline void perf_counter_exit_cpu(int cpu) { }
  3310. #endif
  3311. static int __cpuinit
  3312. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  3313. {
  3314. unsigned int cpu = (long)hcpu;
  3315. switch (action) {
  3316. case CPU_UP_PREPARE:
  3317. case CPU_UP_PREPARE_FROZEN:
  3318. perf_counter_init_cpu(cpu);
  3319. break;
  3320. case CPU_DOWN_PREPARE:
  3321. case CPU_DOWN_PREPARE_FROZEN:
  3322. perf_counter_exit_cpu(cpu);
  3323. break;
  3324. default:
  3325. break;
  3326. }
  3327. return NOTIFY_OK;
  3328. }
  3329. /*
  3330. * This has to have a higher priority than migration_notifier in sched.c.
  3331. */
  3332. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  3333. .notifier_call = perf_cpu_notify,
  3334. .priority = 20,
  3335. };
  3336. void __init perf_counter_init(void)
  3337. {
  3338. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  3339. (void *)(long)smp_processor_id());
  3340. register_cpu_notifier(&perf_cpu_nb);
  3341. }
  3342. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  3343. {
  3344. return sprintf(buf, "%d\n", perf_reserved_percpu);
  3345. }
  3346. static ssize_t
  3347. perf_set_reserve_percpu(struct sysdev_class *class,
  3348. const char *buf,
  3349. size_t count)
  3350. {
  3351. struct perf_cpu_context *cpuctx;
  3352. unsigned long val;
  3353. int err, cpu, mpt;
  3354. err = strict_strtoul(buf, 10, &val);
  3355. if (err)
  3356. return err;
  3357. if (val > perf_max_counters)
  3358. return -EINVAL;
  3359. spin_lock(&perf_resource_lock);
  3360. perf_reserved_percpu = val;
  3361. for_each_online_cpu(cpu) {
  3362. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3363. spin_lock_irq(&cpuctx->ctx.lock);
  3364. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  3365. perf_max_counters - perf_reserved_percpu);
  3366. cpuctx->max_pertask = mpt;
  3367. spin_unlock_irq(&cpuctx->ctx.lock);
  3368. }
  3369. spin_unlock(&perf_resource_lock);
  3370. return count;
  3371. }
  3372. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  3373. {
  3374. return sprintf(buf, "%d\n", perf_overcommit);
  3375. }
  3376. static ssize_t
  3377. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  3378. {
  3379. unsigned long val;
  3380. int err;
  3381. err = strict_strtoul(buf, 10, &val);
  3382. if (err)
  3383. return err;
  3384. if (val > 1)
  3385. return -EINVAL;
  3386. spin_lock(&perf_resource_lock);
  3387. perf_overcommit = val;
  3388. spin_unlock(&perf_resource_lock);
  3389. return count;
  3390. }
  3391. static SYSDEV_CLASS_ATTR(
  3392. reserve_percpu,
  3393. 0644,
  3394. perf_show_reserve_percpu,
  3395. perf_set_reserve_percpu
  3396. );
  3397. static SYSDEV_CLASS_ATTR(
  3398. overcommit,
  3399. 0644,
  3400. perf_show_overcommit,
  3401. perf_set_overcommit
  3402. );
  3403. static struct attribute *perfclass_attrs[] = {
  3404. &attr_reserve_percpu.attr,
  3405. &attr_overcommit.attr,
  3406. NULL
  3407. };
  3408. static struct attribute_group perfclass_attr_group = {
  3409. .attrs = perfclass_attrs,
  3410. .name = "perf_counters",
  3411. };
  3412. static int __init perf_counter_sysfs_init(void)
  3413. {
  3414. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  3415. &perfclass_attr_group);
  3416. }
  3417. device_initcall(perf_counter_sysfs_init);