perf_counter.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/dcache.h>
  19. #include <linux/percpu.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/rculist.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/anon_inodes.h>
  27. #include <linux/kernel_stat.h>
  28. #include <linux/perf_counter.h>
  29. #include <asm/irq_regs.h>
  30. /*
  31. * Each CPU has a list of per CPU counters:
  32. */
  33. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  34. int perf_max_counters __read_mostly = 1;
  35. static int perf_reserved_percpu __read_mostly;
  36. static int perf_overcommit __read_mostly = 1;
  37. static atomic_t nr_counters __read_mostly;
  38. static atomic_t nr_mmap_tracking __read_mostly;
  39. static atomic_t nr_munmap_tracking __read_mostly;
  40. static atomic_t nr_comm_tracking __read_mostly;
  41. int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
  42. int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
  43. int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
  44. /*
  45. * Lock for (sysadmin-configurable) counter reservations:
  46. */
  47. static DEFINE_SPINLOCK(perf_resource_lock);
  48. /*
  49. * Architecture provided APIs - weak aliases:
  50. */
  51. extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  52. {
  53. return NULL;
  54. }
  55. void __weak hw_perf_disable(void) { barrier(); }
  56. void __weak hw_perf_enable(void) { barrier(); }
  57. void __weak hw_perf_counter_setup(int cpu) { barrier(); }
  58. int __weak
  59. hw_perf_group_sched_in(struct perf_counter *group_leader,
  60. struct perf_cpu_context *cpuctx,
  61. struct perf_counter_context *ctx, int cpu)
  62. {
  63. return 0;
  64. }
  65. void __weak perf_counter_print_debug(void) { }
  66. static DEFINE_PER_CPU(int, disable_count);
  67. void __perf_disable(void)
  68. {
  69. __get_cpu_var(disable_count)++;
  70. }
  71. bool __perf_enable(void)
  72. {
  73. return !--__get_cpu_var(disable_count);
  74. }
  75. void perf_disable(void)
  76. {
  77. __perf_disable();
  78. hw_perf_disable();
  79. }
  80. void perf_enable(void)
  81. {
  82. if (__perf_enable())
  83. hw_perf_enable();
  84. }
  85. static void get_ctx(struct perf_counter_context *ctx)
  86. {
  87. atomic_inc(&ctx->refcount);
  88. }
  89. static void free_ctx(struct rcu_head *head)
  90. {
  91. struct perf_counter_context *ctx;
  92. ctx = container_of(head, struct perf_counter_context, rcu_head);
  93. kfree(ctx);
  94. }
  95. static void put_ctx(struct perf_counter_context *ctx)
  96. {
  97. if (atomic_dec_and_test(&ctx->refcount)) {
  98. if (ctx->parent_ctx)
  99. put_ctx(ctx->parent_ctx);
  100. if (ctx->task)
  101. put_task_struct(ctx->task);
  102. call_rcu(&ctx->rcu_head, free_ctx);
  103. }
  104. }
  105. /*
  106. * Get the perf_counter_context for a task and lock it.
  107. * This has to cope with with the fact that until it is locked,
  108. * the context could get moved to another task.
  109. */
  110. static struct perf_counter_context *
  111. perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  112. {
  113. struct perf_counter_context *ctx;
  114. rcu_read_lock();
  115. retry:
  116. ctx = rcu_dereference(task->perf_counter_ctxp);
  117. if (ctx) {
  118. /*
  119. * If this context is a clone of another, it might
  120. * get swapped for another underneath us by
  121. * perf_counter_task_sched_out, though the
  122. * rcu_read_lock() protects us from any context
  123. * getting freed. Lock the context and check if it
  124. * got swapped before we could get the lock, and retry
  125. * if so. If we locked the right context, then it
  126. * can't get swapped on us any more.
  127. */
  128. spin_lock_irqsave(&ctx->lock, *flags);
  129. if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
  130. spin_unlock_irqrestore(&ctx->lock, *flags);
  131. goto retry;
  132. }
  133. }
  134. rcu_read_unlock();
  135. return ctx;
  136. }
  137. /*
  138. * Get the context for a task and increment its pin_count so it
  139. * can't get swapped to another task. This also increments its
  140. * reference count so that the context can't get freed.
  141. */
  142. static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
  143. {
  144. struct perf_counter_context *ctx;
  145. unsigned long flags;
  146. ctx = perf_lock_task_context(task, &flags);
  147. if (ctx) {
  148. ++ctx->pin_count;
  149. get_ctx(ctx);
  150. spin_unlock_irqrestore(&ctx->lock, flags);
  151. }
  152. return ctx;
  153. }
  154. static void perf_unpin_context(struct perf_counter_context *ctx)
  155. {
  156. unsigned long flags;
  157. spin_lock_irqsave(&ctx->lock, flags);
  158. --ctx->pin_count;
  159. spin_unlock_irqrestore(&ctx->lock, flags);
  160. put_ctx(ctx);
  161. }
  162. /*
  163. * Add a counter from the lists for its context.
  164. * Must be called with ctx->mutex and ctx->lock held.
  165. */
  166. static void
  167. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  168. {
  169. struct perf_counter *group_leader = counter->group_leader;
  170. /*
  171. * Depending on whether it is a standalone or sibling counter,
  172. * add it straight to the context's counter list, or to the group
  173. * leader's sibling list:
  174. */
  175. if (group_leader == counter)
  176. list_add_tail(&counter->list_entry, &ctx->counter_list);
  177. else {
  178. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  179. group_leader->nr_siblings++;
  180. }
  181. list_add_rcu(&counter->event_entry, &ctx->event_list);
  182. ctx->nr_counters++;
  183. }
  184. /*
  185. * Remove a counter from the lists for its context.
  186. * Must be called with ctx->mutex and ctx->lock held.
  187. */
  188. static void
  189. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  190. {
  191. struct perf_counter *sibling, *tmp;
  192. if (list_empty(&counter->list_entry))
  193. return;
  194. ctx->nr_counters--;
  195. list_del_init(&counter->list_entry);
  196. list_del_rcu(&counter->event_entry);
  197. if (counter->group_leader != counter)
  198. counter->group_leader->nr_siblings--;
  199. /*
  200. * If this was a group counter with sibling counters then
  201. * upgrade the siblings to singleton counters by adding them
  202. * to the context list directly:
  203. */
  204. list_for_each_entry_safe(sibling, tmp,
  205. &counter->sibling_list, list_entry) {
  206. list_move_tail(&sibling->list_entry, &ctx->counter_list);
  207. sibling->group_leader = sibling;
  208. }
  209. }
  210. static void
  211. counter_sched_out(struct perf_counter *counter,
  212. struct perf_cpu_context *cpuctx,
  213. struct perf_counter_context *ctx)
  214. {
  215. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  216. return;
  217. counter->state = PERF_COUNTER_STATE_INACTIVE;
  218. counter->tstamp_stopped = ctx->time;
  219. counter->pmu->disable(counter);
  220. counter->oncpu = -1;
  221. if (!is_software_counter(counter))
  222. cpuctx->active_oncpu--;
  223. ctx->nr_active--;
  224. if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
  225. cpuctx->exclusive = 0;
  226. }
  227. static void
  228. group_sched_out(struct perf_counter *group_counter,
  229. struct perf_cpu_context *cpuctx,
  230. struct perf_counter_context *ctx)
  231. {
  232. struct perf_counter *counter;
  233. if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
  234. return;
  235. counter_sched_out(group_counter, cpuctx, ctx);
  236. /*
  237. * Schedule out siblings (if any):
  238. */
  239. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  240. counter_sched_out(counter, cpuctx, ctx);
  241. if (group_counter->hw_event.exclusive)
  242. cpuctx->exclusive = 0;
  243. }
  244. /*
  245. * Cross CPU call to remove a performance counter
  246. *
  247. * We disable the counter on the hardware level first. After that we
  248. * remove it from the context list.
  249. */
  250. static void __perf_counter_remove_from_context(void *info)
  251. {
  252. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  253. struct perf_counter *counter = info;
  254. struct perf_counter_context *ctx = counter->ctx;
  255. /*
  256. * If this is a task context, we need to check whether it is
  257. * the current task context of this cpu. If not it has been
  258. * scheduled out before the smp call arrived.
  259. */
  260. if (ctx->task && cpuctx->task_ctx != ctx)
  261. return;
  262. spin_lock(&ctx->lock);
  263. /*
  264. * Protect the list operation against NMI by disabling the
  265. * counters on a global level.
  266. */
  267. perf_disable();
  268. counter_sched_out(counter, cpuctx, ctx);
  269. list_del_counter(counter, ctx);
  270. if (!ctx->task) {
  271. /*
  272. * Allow more per task counters with respect to the
  273. * reservation:
  274. */
  275. cpuctx->max_pertask =
  276. min(perf_max_counters - ctx->nr_counters,
  277. perf_max_counters - perf_reserved_percpu);
  278. }
  279. perf_enable();
  280. spin_unlock(&ctx->lock);
  281. }
  282. /*
  283. * Remove the counter from a task's (or a CPU's) list of counters.
  284. *
  285. * Must be called with ctx->mutex held.
  286. *
  287. * CPU counters are removed with a smp call. For task counters we only
  288. * call when the task is on a CPU.
  289. *
  290. * If counter->ctx is a cloned context, callers must make sure that
  291. * every task struct that counter->ctx->task could possibly point to
  292. * remains valid. This is OK when called from perf_release since
  293. * that only calls us on the top-level context, which can't be a clone.
  294. * When called from perf_counter_exit_task, it's OK because the
  295. * context has been detached from its task.
  296. */
  297. static void perf_counter_remove_from_context(struct perf_counter *counter)
  298. {
  299. struct perf_counter_context *ctx = counter->ctx;
  300. struct task_struct *task = ctx->task;
  301. if (!task) {
  302. /*
  303. * Per cpu counters are removed via an smp call and
  304. * the removal is always sucessful.
  305. */
  306. smp_call_function_single(counter->cpu,
  307. __perf_counter_remove_from_context,
  308. counter, 1);
  309. return;
  310. }
  311. retry:
  312. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  313. counter);
  314. spin_lock_irq(&ctx->lock);
  315. /*
  316. * If the context is active we need to retry the smp call.
  317. */
  318. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  319. spin_unlock_irq(&ctx->lock);
  320. goto retry;
  321. }
  322. /*
  323. * The lock prevents that this context is scheduled in so we
  324. * can remove the counter safely, if the call above did not
  325. * succeed.
  326. */
  327. if (!list_empty(&counter->list_entry)) {
  328. list_del_counter(counter, ctx);
  329. }
  330. spin_unlock_irq(&ctx->lock);
  331. }
  332. static inline u64 perf_clock(void)
  333. {
  334. return cpu_clock(smp_processor_id());
  335. }
  336. /*
  337. * Update the record of the current time in a context.
  338. */
  339. static void update_context_time(struct perf_counter_context *ctx)
  340. {
  341. u64 now = perf_clock();
  342. ctx->time += now - ctx->timestamp;
  343. ctx->timestamp = now;
  344. }
  345. /*
  346. * Update the total_time_enabled and total_time_running fields for a counter.
  347. */
  348. static void update_counter_times(struct perf_counter *counter)
  349. {
  350. struct perf_counter_context *ctx = counter->ctx;
  351. u64 run_end;
  352. if (counter->state < PERF_COUNTER_STATE_INACTIVE)
  353. return;
  354. counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
  355. if (counter->state == PERF_COUNTER_STATE_INACTIVE)
  356. run_end = counter->tstamp_stopped;
  357. else
  358. run_end = ctx->time;
  359. counter->total_time_running = run_end - counter->tstamp_running;
  360. }
  361. /*
  362. * Update total_time_enabled and total_time_running for all counters in a group.
  363. */
  364. static void update_group_times(struct perf_counter *leader)
  365. {
  366. struct perf_counter *counter;
  367. update_counter_times(leader);
  368. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  369. update_counter_times(counter);
  370. }
  371. /*
  372. * Cross CPU call to disable a performance counter
  373. */
  374. static void __perf_counter_disable(void *info)
  375. {
  376. struct perf_counter *counter = info;
  377. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  378. struct perf_counter_context *ctx = counter->ctx;
  379. /*
  380. * If this is a per-task counter, need to check whether this
  381. * counter's task is the current task on this cpu.
  382. */
  383. if (ctx->task && cpuctx->task_ctx != ctx)
  384. return;
  385. spin_lock(&ctx->lock);
  386. /*
  387. * If the counter is on, turn it off.
  388. * If it is in error state, leave it in error state.
  389. */
  390. if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
  391. update_context_time(ctx);
  392. update_counter_times(counter);
  393. if (counter == counter->group_leader)
  394. group_sched_out(counter, cpuctx, ctx);
  395. else
  396. counter_sched_out(counter, cpuctx, ctx);
  397. counter->state = PERF_COUNTER_STATE_OFF;
  398. }
  399. spin_unlock(&ctx->lock);
  400. }
  401. /*
  402. * Disable a counter.
  403. *
  404. * If counter->ctx is a cloned context, callers must make sure that
  405. * every task struct that counter->ctx->task could possibly point to
  406. * remains valid. This condition is satisifed when called through
  407. * perf_counter_for_each_child or perf_counter_for_each because they
  408. * hold the top-level counter's child_mutex, so any descendant that
  409. * goes to exit will block in sync_child_counter.
  410. * When called from perf_pending_counter it's OK because counter->ctx
  411. * is the current context on this CPU and preemption is disabled,
  412. * hence we can't get into perf_counter_task_sched_out for this context.
  413. */
  414. static void perf_counter_disable(struct perf_counter *counter)
  415. {
  416. struct perf_counter_context *ctx = counter->ctx;
  417. struct task_struct *task = ctx->task;
  418. if (!task) {
  419. /*
  420. * Disable the counter on the cpu that it's on
  421. */
  422. smp_call_function_single(counter->cpu, __perf_counter_disable,
  423. counter, 1);
  424. return;
  425. }
  426. retry:
  427. task_oncpu_function_call(task, __perf_counter_disable, counter);
  428. spin_lock_irq(&ctx->lock);
  429. /*
  430. * If the counter is still active, we need to retry the cross-call.
  431. */
  432. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  433. spin_unlock_irq(&ctx->lock);
  434. goto retry;
  435. }
  436. /*
  437. * Since we have the lock this context can't be scheduled
  438. * in, so we can change the state safely.
  439. */
  440. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  441. update_counter_times(counter);
  442. counter->state = PERF_COUNTER_STATE_OFF;
  443. }
  444. spin_unlock_irq(&ctx->lock);
  445. }
  446. static int
  447. counter_sched_in(struct perf_counter *counter,
  448. struct perf_cpu_context *cpuctx,
  449. struct perf_counter_context *ctx,
  450. int cpu)
  451. {
  452. if (counter->state <= PERF_COUNTER_STATE_OFF)
  453. return 0;
  454. counter->state = PERF_COUNTER_STATE_ACTIVE;
  455. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  456. /*
  457. * The new state must be visible before we turn it on in the hardware:
  458. */
  459. smp_wmb();
  460. if (counter->pmu->enable(counter)) {
  461. counter->state = PERF_COUNTER_STATE_INACTIVE;
  462. counter->oncpu = -1;
  463. return -EAGAIN;
  464. }
  465. counter->tstamp_running += ctx->time - counter->tstamp_stopped;
  466. if (!is_software_counter(counter))
  467. cpuctx->active_oncpu++;
  468. ctx->nr_active++;
  469. if (counter->hw_event.exclusive)
  470. cpuctx->exclusive = 1;
  471. return 0;
  472. }
  473. static int
  474. group_sched_in(struct perf_counter *group_counter,
  475. struct perf_cpu_context *cpuctx,
  476. struct perf_counter_context *ctx,
  477. int cpu)
  478. {
  479. struct perf_counter *counter, *partial_group;
  480. int ret;
  481. if (group_counter->state == PERF_COUNTER_STATE_OFF)
  482. return 0;
  483. ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
  484. if (ret)
  485. return ret < 0 ? ret : 0;
  486. if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
  487. return -EAGAIN;
  488. /*
  489. * Schedule in siblings as one group (if any):
  490. */
  491. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  492. if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
  493. partial_group = counter;
  494. goto group_error;
  495. }
  496. }
  497. return 0;
  498. group_error:
  499. /*
  500. * Groups can be scheduled in as one unit only, so undo any
  501. * partial group before returning:
  502. */
  503. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  504. if (counter == partial_group)
  505. break;
  506. counter_sched_out(counter, cpuctx, ctx);
  507. }
  508. counter_sched_out(group_counter, cpuctx, ctx);
  509. return -EAGAIN;
  510. }
  511. /*
  512. * Return 1 for a group consisting entirely of software counters,
  513. * 0 if the group contains any hardware counters.
  514. */
  515. static int is_software_only_group(struct perf_counter *leader)
  516. {
  517. struct perf_counter *counter;
  518. if (!is_software_counter(leader))
  519. return 0;
  520. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  521. if (!is_software_counter(counter))
  522. return 0;
  523. return 1;
  524. }
  525. /*
  526. * Work out whether we can put this counter group on the CPU now.
  527. */
  528. static int group_can_go_on(struct perf_counter *counter,
  529. struct perf_cpu_context *cpuctx,
  530. int can_add_hw)
  531. {
  532. /*
  533. * Groups consisting entirely of software counters can always go on.
  534. */
  535. if (is_software_only_group(counter))
  536. return 1;
  537. /*
  538. * If an exclusive group is already on, no other hardware
  539. * counters can go on.
  540. */
  541. if (cpuctx->exclusive)
  542. return 0;
  543. /*
  544. * If this group is exclusive and there are already
  545. * counters on the CPU, it can't go on.
  546. */
  547. if (counter->hw_event.exclusive && cpuctx->active_oncpu)
  548. return 0;
  549. /*
  550. * Otherwise, try to add it if all previous groups were able
  551. * to go on.
  552. */
  553. return can_add_hw;
  554. }
  555. static void add_counter_to_ctx(struct perf_counter *counter,
  556. struct perf_counter_context *ctx)
  557. {
  558. list_add_counter(counter, ctx);
  559. counter->tstamp_enabled = ctx->time;
  560. counter->tstamp_running = ctx->time;
  561. counter->tstamp_stopped = ctx->time;
  562. }
  563. /*
  564. * Cross CPU call to install and enable a performance counter
  565. *
  566. * Must be called with ctx->mutex held
  567. */
  568. static void __perf_install_in_context(void *info)
  569. {
  570. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  571. struct perf_counter *counter = info;
  572. struct perf_counter_context *ctx = counter->ctx;
  573. struct perf_counter *leader = counter->group_leader;
  574. int cpu = smp_processor_id();
  575. int err;
  576. /*
  577. * If this is a task context, we need to check whether it is
  578. * the current task context of this cpu. If not it has been
  579. * scheduled out before the smp call arrived.
  580. * Or possibly this is the right context but it isn't
  581. * on this cpu because it had no counters.
  582. */
  583. if (ctx->task && cpuctx->task_ctx != ctx) {
  584. if (cpuctx->task_ctx || ctx->task != current)
  585. return;
  586. cpuctx->task_ctx = ctx;
  587. }
  588. spin_lock(&ctx->lock);
  589. ctx->is_active = 1;
  590. update_context_time(ctx);
  591. /*
  592. * Protect the list operation against NMI by disabling the
  593. * counters on a global level. NOP for non NMI based counters.
  594. */
  595. perf_disable();
  596. add_counter_to_ctx(counter, ctx);
  597. /*
  598. * Don't put the counter on if it is disabled or if
  599. * it is in a group and the group isn't on.
  600. */
  601. if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
  602. (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
  603. goto unlock;
  604. /*
  605. * An exclusive counter can't go on if there are already active
  606. * hardware counters, and no hardware counter can go on if there
  607. * is already an exclusive counter on.
  608. */
  609. if (!group_can_go_on(counter, cpuctx, 1))
  610. err = -EEXIST;
  611. else
  612. err = counter_sched_in(counter, cpuctx, ctx, cpu);
  613. if (err) {
  614. /*
  615. * This counter couldn't go on. If it is in a group
  616. * then we have to pull the whole group off.
  617. * If the counter group is pinned then put it in error state.
  618. */
  619. if (leader != counter)
  620. group_sched_out(leader, cpuctx, ctx);
  621. if (leader->hw_event.pinned) {
  622. update_group_times(leader);
  623. leader->state = PERF_COUNTER_STATE_ERROR;
  624. }
  625. }
  626. if (!err && !ctx->task && cpuctx->max_pertask)
  627. cpuctx->max_pertask--;
  628. unlock:
  629. perf_enable();
  630. spin_unlock(&ctx->lock);
  631. }
  632. /*
  633. * Attach a performance counter to a context
  634. *
  635. * First we add the counter to the list with the hardware enable bit
  636. * in counter->hw_config cleared.
  637. *
  638. * If the counter is attached to a task which is on a CPU we use a smp
  639. * call to enable it in the task context. The task might have been
  640. * scheduled away, but we check this in the smp call again.
  641. *
  642. * Must be called with ctx->mutex held.
  643. */
  644. static void
  645. perf_install_in_context(struct perf_counter_context *ctx,
  646. struct perf_counter *counter,
  647. int cpu)
  648. {
  649. struct task_struct *task = ctx->task;
  650. if (!task) {
  651. /*
  652. * Per cpu counters are installed via an smp call and
  653. * the install is always sucessful.
  654. */
  655. smp_call_function_single(cpu, __perf_install_in_context,
  656. counter, 1);
  657. return;
  658. }
  659. retry:
  660. task_oncpu_function_call(task, __perf_install_in_context,
  661. counter);
  662. spin_lock_irq(&ctx->lock);
  663. /*
  664. * we need to retry the smp call.
  665. */
  666. if (ctx->is_active && list_empty(&counter->list_entry)) {
  667. spin_unlock_irq(&ctx->lock);
  668. goto retry;
  669. }
  670. /*
  671. * The lock prevents that this context is scheduled in so we
  672. * can add the counter safely, if it the call above did not
  673. * succeed.
  674. */
  675. if (list_empty(&counter->list_entry))
  676. add_counter_to_ctx(counter, ctx);
  677. spin_unlock_irq(&ctx->lock);
  678. }
  679. /*
  680. * Cross CPU call to enable a performance counter
  681. */
  682. static void __perf_counter_enable(void *info)
  683. {
  684. struct perf_counter *counter = info;
  685. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  686. struct perf_counter_context *ctx = counter->ctx;
  687. struct perf_counter *leader = counter->group_leader;
  688. int err;
  689. /*
  690. * If this is a per-task counter, need to check whether this
  691. * counter's task is the current task on this cpu.
  692. */
  693. if (ctx->task && cpuctx->task_ctx != ctx) {
  694. if (cpuctx->task_ctx || ctx->task != current)
  695. return;
  696. cpuctx->task_ctx = ctx;
  697. }
  698. spin_lock(&ctx->lock);
  699. ctx->is_active = 1;
  700. update_context_time(ctx);
  701. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  702. goto unlock;
  703. counter->state = PERF_COUNTER_STATE_INACTIVE;
  704. counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
  705. /*
  706. * If the counter is in a group and isn't the group leader,
  707. * then don't put it on unless the group is on.
  708. */
  709. if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
  710. goto unlock;
  711. if (!group_can_go_on(counter, cpuctx, 1)) {
  712. err = -EEXIST;
  713. } else {
  714. perf_disable();
  715. if (counter == leader)
  716. err = group_sched_in(counter, cpuctx, ctx,
  717. smp_processor_id());
  718. else
  719. err = counter_sched_in(counter, cpuctx, ctx,
  720. smp_processor_id());
  721. perf_enable();
  722. }
  723. if (err) {
  724. /*
  725. * If this counter can't go on and it's part of a
  726. * group, then the whole group has to come off.
  727. */
  728. if (leader != counter)
  729. group_sched_out(leader, cpuctx, ctx);
  730. if (leader->hw_event.pinned) {
  731. update_group_times(leader);
  732. leader->state = PERF_COUNTER_STATE_ERROR;
  733. }
  734. }
  735. unlock:
  736. spin_unlock(&ctx->lock);
  737. }
  738. /*
  739. * Enable a counter.
  740. *
  741. * If counter->ctx is a cloned context, callers must make sure that
  742. * every task struct that counter->ctx->task could possibly point to
  743. * remains valid. This condition is satisfied when called through
  744. * perf_counter_for_each_child or perf_counter_for_each as described
  745. * for perf_counter_disable.
  746. */
  747. static void perf_counter_enable(struct perf_counter *counter)
  748. {
  749. struct perf_counter_context *ctx = counter->ctx;
  750. struct task_struct *task = ctx->task;
  751. if (!task) {
  752. /*
  753. * Enable the counter on the cpu that it's on
  754. */
  755. smp_call_function_single(counter->cpu, __perf_counter_enable,
  756. counter, 1);
  757. return;
  758. }
  759. spin_lock_irq(&ctx->lock);
  760. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  761. goto out;
  762. /*
  763. * If the counter is in error state, clear that first.
  764. * That way, if we see the counter in error state below, we
  765. * know that it has gone back into error state, as distinct
  766. * from the task having been scheduled away before the
  767. * cross-call arrived.
  768. */
  769. if (counter->state == PERF_COUNTER_STATE_ERROR)
  770. counter->state = PERF_COUNTER_STATE_OFF;
  771. retry:
  772. spin_unlock_irq(&ctx->lock);
  773. task_oncpu_function_call(task, __perf_counter_enable, counter);
  774. spin_lock_irq(&ctx->lock);
  775. /*
  776. * If the context is active and the counter is still off,
  777. * we need to retry the cross-call.
  778. */
  779. if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
  780. goto retry;
  781. /*
  782. * Since we have the lock this context can't be scheduled
  783. * in, so we can change the state safely.
  784. */
  785. if (counter->state == PERF_COUNTER_STATE_OFF) {
  786. counter->state = PERF_COUNTER_STATE_INACTIVE;
  787. counter->tstamp_enabled =
  788. ctx->time - counter->total_time_enabled;
  789. }
  790. out:
  791. spin_unlock_irq(&ctx->lock);
  792. }
  793. static int perf_counter_refresh(struct perf_counter *counter, int refresh)
  794. {
  795. /*
  796. * not supported on inherited counters
  797. */
  798. if (counter->hw_event.inherit)
  799. return -EINVAL;
  800. atomic_add(refresh, &counter->event_limit);
  801. perf_counter_enable(counter);
  802. return 0;
  803. }
  804. void __perf_counter_sched_out(struct perf_counter_context *ctx,
  805. struct perf_cpu_context *cpuctx)
  806. {
  807. struct perf_counter *counter;
  808. spin_lock(&ctx->lock);
  809. ctx->is_active = 0;
  810. if (likely(!ctx->nr_counters))
  811. goto out;
  812. update_context_time(ctx);
  813. perf_disable();
  814. if (ctx->nr_active) {
  815. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  816. if (counter != counter->group_leader)
  817. counter_sched_out(counter, cpuctx, ctx);
  818. else
  819. group_sched_out(counter, cpuctx, ctx);
  820. }
  821. }
  822. perf_enable();
  823. out:
  824. spin_unlock(&ctx->lock);
  825. }
  826. /*
  827. * Test whether two contexts are equivalent, i.e. whether they
  828. * have both been cloned from the same version of the same context
  829. * and they both have the same number of enabled counters.
  830. * If the number of enabled counters is the same, then the set
  831. * of enabled counters should be the same, because these are both
  832. * inherited contexts, therefore we can't access individual counters
  833. * in them directly with an fd; we can only enable/disable all
  834. * counters via prctl, or enable/disable all counters in a family
  835. * via ioctl, which will have the same effect on both contexts.
  836. */
  837. static int context_equiv(struct perf_counter_context *ctx1,
  838. struct perf_counter_context *ctx2)
  839. {
  840. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  841. && ctx1->parent_gen == ctx2->parent_gen
  842. && !ctx1->pin_count && !ctx2->pin_count;
  843. }
  844. /*
  845. * Called from scheduler to remove the counters of the current task,
  846. * with interrupts disabled.
  847. *
  848. * We stop each counter and update the counter value in counter->count.
  849. *
  850. * This does not protect us against NMI, but disable()
  851. * sets the disabled bit in the control field of counter _before_
  852. * accessing the counter control register. If a NMI hits, then it will
  853. * not restart the counter.
  854. */
  855. void perf_counter_task_sched_out(struct task_struct *task,
  856. struct task_struct *next, int cpu)
  857. {
  858. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  859. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  860. struct perf_counter_context *next_ctx;
  861. struct perf_counter_context *parent;
  862. struct pt_regs *regs;
  863. int do_switch = 1;
  864. regs = task_pt_regs(task);
  865. perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
  866. if (likely(!ctx || !cpuctx->task_ctx))
  867. return;
  868. update_context_time(ctx);
  869. rcu_read_lock();
  870. parent = rcu_dereference(ctx->parent_ctx);
  871. next_ctx = next->perf_counter_ctxp;
  872. if (parent && next_ctx &&
  873. rcu_dereference(next_ctx->parent_ctx) == parent) {
  874. /*
  875. * Looks like the two contexts are clones, so we might be
  876. * able to optimize the context switch. We lock both
  877. * contexts and check that they are clones under the
  878. * lock (including re-checking that neither has been
  879. * uncloned in the meantime). It doesn't matter which
  880. * order we take the locks because no other cpu could
  881. * be trying to lock both of these tasks.
  882. */
  883. spin_lock(&ctx->lock);
  884. spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  885. if (context_equiv(ctx, next_ctx)) {
  886. /*
  887. * XXX do we need a memory barrier of sorts
  888. * wrt to rcu_dereference() of perf_counter_ctxp
  889. */
  890. task->perf_counter_ctxp = next_ctx;
  891. next->perf_counter_ctxp = ctx;
  892. ctx->task = next;
  893. next_ctx->task = task;
  894. do_switch = 0;
  895. }
  896. spin_unlock(&next_ctx->lock);
  897. spin_unlock(&ctx->lock);
  898. }
  899. rcu_read_unlock();
  900. if (do_switch) {
  901. __perf_counter_sched_out(ctx, cpuctx);
  902. cpuctx->task_ctx = NULL;
  903. }
  904. }
  905. /*
  906. * Called with IRQs disabled
  907. */
  908. static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
  909. {
  910. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  911. if (!cpuctx->task_ctx)
  912. return;
  913. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  914. return;
  915. __perf_counter_sched_out(ctx, cpuctx);
  916. cpuctx->task_ctx = NULL;
  917. }
  918. /*
  919. * Called with IRQs disabled
  920. */
  921. static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
  922. {
  923. __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
  924. }
  925. static void
  926. __perf_counter_sched_in(struct perf_counter_context *ctx,
  927. struct perf_cpu_context *cpuctx, int cpu)
  928. {
  929. struct perf_counter *counter;
  930. int can_add_hw = 1;
  931. spin_lock(&ctx->lock);
  932. ctx->is_active = 1;
  933. if (likely(!ctx->nr_counters))
  934. goto out;
  935. ctx->timestamp = perf_clock();
  936. perf_disable();
  937. /*
  938. * First go through the list and put on any pinned groups
  939. * in order to give them the best chance of going on.
  940. */
  941. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  942. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  943. !counter->hw_event.pinned)
  944. continue;
  945. if (counter->cpu != -1 && counter->cpu != cpu)
  946. continue;
  947. if (counter != counter->group_leader)
  948. counter_sched_in(counter, cpuctx, ctx, cpu);
  949. else {
  950. if (group_can_go_on(counter, cpuctx, 1))
  951. group_sched_in(counter, cpuctx, ctx, cpu);
  952. }
  953. /*
  954. * If this pinned group hasn't been scheduled,
  955. * put it in error state.
  956. */
  957. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  958. update_group_times(counter);
  959. counter->state = PERF_COUNTER_STATE_ERROR;
  960. }
  961. }
  962. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  963. /*
  964. * Ignore counters in OFF or ERROR state, and
  965. * ignore pinned counters since we did them already.
  966. */
  967. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  968. counter->hw_event.pinned)
  969. continue;
  970. /*
  971. * Listen to the 'cpu' scheduling filter constraint
  972. * of counters:
  973. */
  974. if (counter->cpu != -1 && counter->cpu != cpu)
  975. continue;
  976. if (counter != counter->group_leader) {
  977. if (counter_sched_in(counter, cpuctx, ctx, cpu))
  978. can_add_hw = 0;
  979. } else {
  980. if (group_can_go_on(counter, cpuctx, can_add_hw)) {
  981. if (group_sched_in(counter, cpuctx, ctx, cpu))
  982. can_add_hw = 0;
  983. }
  984. }
  985. }
  986. perf_enable();
  987. out:
  988. spin_unlock(&ctx->lock);
  989. }
  990. /*
  991. * Called from scheduler to add the counters of the current task
  992. * with interrupts disabled.
  993. *
  994. * We restore the counter value and then enable it.
  995. *
  996. * This does not protect us against NMI, but enable()
  997. * sets the enabled bit in the control field of counter _before_
  998. * accessing the counter control register. If a NMI hits, then it will
  999. * keep the counter running.
  1000. */
  1001. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  1002. {
  1003. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  1004. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  1005. if (likely(!ctx))
  1006. return;
  1007. if (cpuctx->task_ctx == ctx)
  1008. return;
  1009. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1010. cpuctx->task_ctx = ctx;
  1011. }
  1012. static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
  1013. {
  1014. struct perf_counter_context *ctx = &cpuctx->ctx;
  1015. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1016. }
  1017. #define MAX_INTERRUPTS (~0ULL)
  1018. static void perf_log_throttle(struct perf_counter *counter, int enable);
  1019. static void perf_log_period(struct perf_counter *counter, u64 period);
  1020. static void perf_adjust_freq(struct perf_counter_context *ctx)
  1021. {
  1022. struct perf_counter *counter;
  1023. u64 interrupts, sample_period;
  1024. u64 events, period;
  1025. s64 delta;
  1026. spin_lock(&ctx->lock);
  1027. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1028. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1029. continue;
  1030. interrupts = counter->hw.interrupts;
  1031. counter->hw.interrupts = 0;
  1032. if (interrupts == MAX_INTERRUPTS) {
  1033. perf_log_throttle(counter, 1);
  1034. counter->pmu->unthrottle(counter);
  1035. interrupts = 2*sysctl_perf_counter_limit/HZ;
  1036. }
  1037. if (!counter->hw_event.freq || !counter->hw_event.sample_freq)
  1038. continue;
  1039. events = HZ * interrupts * counter->hw.sample_period;
  1040. period = div64_u64(events, counter->hw_event.sample_freq);
  1041. delta = (s64)(1 + period - counter->hw.sample_period);
  1042. delta >>= 1;
  1043. sample_period = counter->hw.sample_period + delta;
  1044. if (!sample_period)
  1045. sample_period = 1;
  1046. perf_log_period(counter, sample_period);
  1047. counter->hw.sample_period = sample_period;
  1048. }
  1049. spin_unlock(&ctx->lock);
  1050. }
  1051. /*
  1052. * Round-robin a context's counters:
  1053. */
  1054. static void rotate_ctx(struct perf_counter_context *ctx)
  1055. {
  1056. struct perf_counter *counter;
  1057. if (!ctx->nr_counters)
  1058. return;
  1059. spin_lock(&ctx->lock);
  1060. /*
  1061. * Rotate the first entry last (works just fine for group counters too):
  1062. */
  1063. perf_disable();
  1064. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1065. list_move_tail(&counter->list_entry, &ctx->counter_list);
  1066. break;
  1067. }
  1068. perf_enable();
  1069. spin_unlock(&ctx->lock);
  1070. }
  1071. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  1072. {
  1073. struct perf_cpu_context *cpuctx;
  1074. struct perf_counter_context *ctx;
  1075. if (!atomic_read(&nr_counters))
  1076. return;
  1077. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1078. ctx = curr->perf_counter_ctxp;
  1079. perf_adjust_freq(&cpuctx->ctx);
  1080. if (ctx)
  1081. perf_adjust_freq(ctx);
  1082. perf_counter_cpu_sched_out(cpuctx);
  1083. if (ctx)
  1084. __perf_counter_task_sched_out(ctx);
  1085. rotate_ctx(&cpuctx->ctx);
  1086. if (ctx)
  1087. rotate_ctx(ctx);
  1088. perf_counter_cpu_sched_in(cpuctx, cpu);
  1089. if (ctx)
  1090. perf_counter_task_sched_in(curr, cpu);
  1091. }
  1092. /*
  1093. * Cross CPU call to read the hardware counter
  1094. */
  1095. static void __read(void *info)
  1096. {
  1097. struct perf_counter *counter = info;
  1098. struct perf_counter_context *ctx = counter->ctx;
  1099. unsigned long flags;
  1100. local_irq_save(flags);
  1101. if (ctx->is_active)
  1102. update_context_time(ctx);
  1103. counter->pmu->read(counter);
  1104. update_counter_times(counter);
  1105. local_irq_restore(flags);
  1106. }
  1107. static u64 perf_counter_read(struct perf_counter *counter)
  1108. {
  1109. /*
  1110. * If counter is enabled and currently active on a CPU, update the
  1111. * value in the counter structure:
  1112. */
  1113. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  1114. smp_call_function_single(counter->oncpu,
  1115. __read, counter, 1);
  1116. } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  1117. update_counter_times(counter);
  1118. }
  1119. return atomic64_read(&counter->count);
  1120. }
  1121. /*
  1122. * Initialize the perf_counter context in a task_struct:
  1123. */
  1124. static void
  1125. __perf_counter_init_context(struct perf_counter_context *ctx,
  1126. struct task_struct *task)
  1127. {
  1128. memset(ctx, 0, sizeof(*ctx));
  1129. spin_lock_init(&ctx->lock);
  1130. mutex_init(&ctx->mutex);
  1131. INIT_LIST_HEAD(&ctx->counter_list);
  1132. INIT_LIST_HEAD(&ctx->event_list);
  1133. atomic_set(&ctx->refcount, 1);
  1134. ctx->task = task;
  1135. }
  1136. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  1137. {
  1138. struct perf_counter_context *parent_ctx;
  1139. struct perf_counter_context *ctx;
  1140. struct perf_cpu_context *cpuctx;
  1141. struct task_struct *task;
  1142. unsigned long flags;
  1143. int err;
  1144. /*
  1145. * If cpu is not a wildcard then this is a percpu counter:
  1146. */
  1147. if (cpu != -1) {
  1148. /* Must be root to operate on a CPU counter: */
  1149. if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
  1150. return ERR_PTR(-EACCES);
  1151. if (cpu < 0 || cpu > num_possible_cpus())
  1152. return ERR_PTR(-EINVAL);
  1153. /*
  1154. * We could be clever and allow to attach a counter to an
  1155. * offline CPU and activate it when the CPU comes up, but
  1156. * that's for later.
  1157. */
  1158. if (!cpu_isset(cpu, cpu_online_map))
  1159. return ERR_PTR(-ENODEV);
  1160. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1161. ctx = &cpuctx->ctx;
  1162. get_ctx(ctx);
  1163. return ctx;
  1164. }
  1165. rcu_read_lock();
  1166. if (!pid)
  1167. task = current;
  1168. else
  1169. task = find_task_by_vpid(pid);
  1170. if (task)
  1171. get_task_struct(task);
  1172. rcu_read_unlock();
  1173. if (!task)
  1174. return ERR_PTR(-ESRCH);
  1175. /*
  1176. * Can't attach counters to a dying task.
  1177. */
  1178. err = -ESRCH;
  1179. if (task->flags & PF_EXITING)
  1180. goto errout;
  1181. /* Reuse ptrace permission checks for now. */
  1182. err = -EACCES;
  1183. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1184. goto errout;
  1185. retry:
  1186. ctx = perf_lock_task_context(task, &flags);
  1187. if (ctx) {
  1188. parent_ctx = ctx->parent_ctx;
  1189. if (parent_ctx) {
  1190. put_ctx(parent_ctx);
  1191. ctx->parent_ctx = NULL; /* no longer a clone */
  1192. }
  1193. /*
  1194. * Get an extra reference before dropping the lock so that
  1195. * this context won't get freed if the task exits.
  1196. */
  1197. get_ctx(ctx);
  1198. spin_unlock_irqrestore(&ctx->lock, flags);
  1199. }
  1200. if (!ctx) {
  1201. ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  1202. err = -ENOMEM;
  1203. if (!ctx)
  1204. goto errout;
  1205. __perf_counter_init_context(ctx, task);
  1206. get_ctx(ctx);
  1207. if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
  1208. /*
  1209. * We raced with some other task; use
  1210. * the context they set.
  1211. */
  1212. kfree(ctx);
  1213. goto retry;
  1214. }
  1215. get_task_struct(task);
  1216. }
  1217. put_task_struct(task);
  1218. return ctx;
  1219. errout:
  1220. put_task_struct(task);
  1221. return ERR_PTR(err);
  1222. }
  1223. static void free_counter_rcu(struct rcu_head *head)
  1224. {
  1225. struct perf_counter *counter;
  1226. counter = container_of(head, struct perf_counter, rcu_head);
  1227. if (counter->ns)
  1228. put_pid_ns(counter->ns);
  1229. kfree(counter);
  1230. }
  1231. static void perf_pending_sync(struct perf_counter *counter);
  1232. static void free_counter(struct perf_counter *counter)
  1233. {
  1234. perf_pending_sync(counter);
  1235. atomic_dec(&nr_counters);
  1236. if (counter->hw_event.mmap)
  1237. atomic_dec(&nr_mmap_tracking);
  1238. if (counter->hw_event.munmap)
  1239. atomic_dec(&nr_munmap_tracking);
  1240. if (counter->hw_event.comm)
  1241. atomic_dec(&nr_comm_tracking);
  1242. if (counter->destroy)
  1243. counter->destroy(counter);
  1244. put_ctx(counter->ctx);
  1245. call_rcu(&counter->rcu_head, free_counter_rcu);
  1246. }
  1247. /*
  1248. * Called when the last reference to the file is gone.
  1249. */
  1250. static int perf_release(struct inode *inode, struct file *file)
  1251. {
  1252. struct perf_counter *counter = file->private_data;
  1253. struct perf_counter_context *ctx = counter->ctx;
  1254. file->private_data = NULL;
  1255. WARN_ON_ONCE(ctx->parent_ctx);
  1256. mutex_lock(&ctx->mutex);
  1257. perf_counter_remove_from_context(counter);
  1258. mutex_unlock(&ctx->mutex);
  1259. mutex_lock(&counter->owner->perf_counter_mutex);
  1260. list_del_init(&counter->owner_entry);
  1261. mutex_unlock(&counter->owner->perf_counter_mutex);
  1262. put_task_struct(counter->owner);
  1263. free_counter(counter);
  1264. return 0;
  1265. }
  1266. /*
  1267. * Read the performance counter - simple non blocking version for now
  1268. */
  1269. static ssize_t
  1270. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  1271. {
  1272. u64 values[3];
  1273. int n;
  1274. /*
  1275. * Return end-of-file for a read on a counter that is in
  1276. * error state (i.e. because it was pinned but it couldn't be
  1277. * scheduled on to the CPU at some point).
  1278. */
  1279. if (counter->state == PERF_COUNTER_STATE_ERROR)
  1280. return 0;
  1281. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1282. mutex_lock(&counter->child_mutex);
  1283. values[0] = perf_counter_read(counter);
  1284. n = 1;
  1285. if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1286. values[n++] = counter->total_time_enabled +
  1287. atomic64_read(&counter->child_total_time_enabled);
  1288. if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1289. values[n++] = counter->total_time_running +
  1290. atomic64_read(&counter->child_total_time_running);
  1291. if (counter->hw_event.read_format & PERF_FORMAT_ID)
  1292. values[n++] = counter->id;
  1293. mutex_unlock(&counter->child_mutex);
  1294. if (count < n * sizeof(u64))
  1295. return -EINVAL;
  1296. count = n * sizeof(u64);
  1297. if (copy_to_user(buf, values, count))
  1298. return -EFAULT;
  1299. return count;
  1300. }
  1301. static ssize_t
  1302. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1303. {
  1304. struct perf_counter *counter = file->private_data;
  1305. return perf_read_hw(counter, buf, count);
  1306. }
  1307. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1308. {
  1309. struct perf_counter *counter = file->private_data;
  1310. struct perf_mmap_data *data;
  1311. unsigned int events = POLL_HUP;
  1312. rcu_read_lock();
  1313. data = rcu_dereference(counter->data);
  1314. if (data)
  1315. events = atomic_xchg(&data->poll, 0);
  1316. rcu_read_unlock();
  1317. poll_wait(file, &counter->waitq, wait);
  1318. return events;
  1319. }
  1320. static void perf_counter_reset(struct perf_counter *counter)
  1321. {
  1322. (void)perf_counter_read(counter);
  1323. atomic64_set(&counter->count, 0);
  1324. perf_counter_update_userpage(counter);
  1325. }
  1326. static void perf_counter_for_each_sibling(struct perf_counter *counter,
  1327. void (*func)(struct perf_counter *))
  1328. {
  1329. struct perf_counter_context *ctx = counter->ctx;
  1330. struct perf_counter *sibling;
  1331. WARN_ON_ONCE(ctx->parent_ctx);
  1332. mutex_lock(&ctx->mutex);
  1333. counter = counter->group_leader;
  1334. func(counter);
  1335. list_for_each_entry(sibling, &counter->sibling_list, list_entry)
  1336. func(sibling);
  1337. mutex_unlock(&ctx->mutex);
  1338. }
  1339. /*
  1340. * Holding the top-level counter's child_mutex means that any
  1341. * descendant process that has inherited this counter will block
  1342. * in sync_child_counter if it goes to exit, thus satisfying the
  1343. * task existence requirements of perf_counter_enable/disable.
  1344. */
  1345. static void perf_counter_for_each_child(struct perf_counter *counter,
  1346. void (*func)(struct perf_counter *))
  1347. {
  1348. struct perf_counter *child;
  1349. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1350. mutex_lock(&counter->child_mutex);
  1351. func(counter);
  1352. list_for_each_entry(child, &counter->child_list, child_list)
  1353. func(child);
  1354. mutex_unlock(&counter->child_mutex);
  1355. }
  1356. static void perf_counter_for_each(struct perf_counter *counter,
  1357. void (*func)(struct perf_counter *))
  1358. {
  1359. struct perf_counter *child;
  1360. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1361. mutex_lock(&counter->child_mutex);
  1362. perf_counter_for_each_sibling(counter, func);
  1363. list_for_each_entry(child, &counter->child_list, child_list)
  1364. perf_counter_for_each_sibling(child, func);
  1365. mutex_unlock(&counter->child_mutex);
  1366. }
  1367. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1368. {
  1369. struct perf_counter *counter = file->private_data;
  1370. void (*func)(struct perf_counter *);
  1371. u32 flags = arg;
  1372. switch (cmd) {
  1373. case PERF_COUNTER_IOC_ENABLE:
  1374. func = perf_counter_enable;
  1375. break;
  1376. case PERF_COUNTER_IOC_DISABLE:
  1377. func = perf_counter_disable;
  1378. break;
  1379. case PERF_COUNTER_IOC_RESET:
  1380. func = perf_counter_reset;
  1381. break;
  1382. case PERF_COUNTER_IOC_REFRESH:
  1383. return perf_counter_refresh(counter, arg);
  1384. default:
  1385. return -ENOTTY;
  1386. }
  1387. if (flags & PERF_IOC_FLAG_GROUP)
  1388. perf_counter_for_each(counter, func);
  1389. else
  1390. perf_counter_for_each_child(counter, func);
  1391. return 0;
  1392. }
  1393. int perf_counter_task_enable(void)
  1394. {
  1395. struct perf_counter *counter;
  1396. mutex_lock(&current->perf_counter_mutex);
  1397. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1398. perf_counter_for_each_child(counter, perf_counter_enable);
  1399. mutex_unlock(&current->perf_counter_mutex);
  1400. return 0;
  1401. }
  1402. int perf_counter_task_disable(void)
  1403. {
  1404. struct perf_counter *counter;
  1405. mutex_lock(&current->perf_counter_mutex);
  1406. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1407. perf_counter_for_each_child(counter, perf_counter_disable);
  1408. mutex_unlock(&current->perf_counter_mutex);
  1409. return 0;
  1410. }
  1411. /*
  1412. * Callers need to ensure there can be no nesting of this function, otherwise
  1413. * the seqlock logic goes bad. We can not serialize this because the arch
  1414. * code calls this from NMI context.
  1415. */
  1416. void perf_counter_update_userpage(struct perf_counter *counter)
  1417. {
  1418. struct perf_counter_mmap_page *userpg;
  1419. struct perf_mmap_data *data;
  1420. rcu_read_lock();
  1421. data = rcu_dereference(counter->data);
  1422. if (!data)
  1423. goto unlock;
  1424. userpg = data->user_page;
  1425. /*
  1426. * Disable preemption so as to not let the corresponding user-space
  1427. * spin too long if we get preempted.
  1428. */
  1429. preempt_disable();
  1430. ++userpg->lock;
  1431. barrier();
  1432. userpg->index = counter->hw.idx;
  1433. userpg->offset = atomic64_read(&counter->count);
  1434. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  1435. userpg->offset -= atomic64_read(&counter->hw.prev_count);
  1436. barrier();
  1437. ++userpg->lock;
  1438. preempt_enable();
  1439. unlock:
  1440. rcu_read_unlock();
  1441. }
  1442. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1443. {
  1444. struct perf_counter *counter = vma->vm_file->private_data;
  1445. struct perf_mmap_data *data;
  1446. int ret = VM_FAULT_SIGBUS;
  1447. rcu_read_lock();
  1448. data = rcu_dereference(counter->data);
  1449. if (!data)
  1450. goto unlock;
  1451. if (vmf->pgoff == 0) {
  1452. vmf->page = virt_to_page(data->user_page);
  1453. } else {
  1454. int nr = vmf->pgoff - 1;
  1455. if ((unsigned)nr > data->nr_pages)
  1456. goto unlock;
  1457. vmf->page = virt_to_page(data->data_pages[nr]);
  1458. }
  1459. get_page(vmf->page);
  1460. ret = 0;
  1461. unlock:
  1462. rcu_read_unlock();
  1463. return ret;
  1464. }
  1465. static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
  1466. {
  1467. struct perf_mmap_data *data;
  1468. unsigned long size;
  1469. int i;
  1470. WARN_ON(atomic_read(&counter->mmap_count));
  1471. size = sizeof(struct perf_mmap_data);
  1472. size += nr_pages * sizeof(void *);
  1473. data = kzalloc(size, GFP_KERNEL);
  1474. if (!data)
  1475. goto fail;
  1476. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1477. if (!data->user_page)
  1478. goto fail_user_page;
  1479. for (i = 0; i < nr_pages; i++) {
  1480. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1481. if (!data->data_pages[i])
  1482. goto fail_data_pages;
  1483. }
  1484. data->nr_pages = nr_pages;
  1485. atomic_set(&data->lock, -1);
  1486. rcu_assign_pointer(counter->data, data);
  1487. return 0;
  1488. fail_data_pages:
  1489. for (i--; i >= 0; i--)
  1490. free_page((unsigned long)data->data_pages[i]);
  1491. free_page((unsigned long)data->user_page);
  1492. fail_user_page:
  1493. kfree(data);
  1494. fail:
  1495. return -ENOMEM;
  1496. }
  1497. static void __perf_mmap_data_free(struct rcu_head *rcu_head)
  1498. {
  1499. struct perf_mmap_data *data;
  1500. int i;
  1501. data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
  1502. free_page((unsigned long)data->user_page);
  1503. for (i = 0; i < data->nr_pages; i++)
  1504. free_page((unsigned long)data->data_pages[i]);
  1505. kfree(data);
  1506. }
  1507. static void perf_mmap_data_free(struct perf_counter *counter)
  1508. {
  1509. struct perf_mmap_data *data = counter->data;
  1510. WARN_ON(atomic_read(&counter->mmap_count));
  1511. rcu_assign_pointer(counter->data, NULL);
  1512. call_rcu(&data->rcu_head, __perf_mmap_data_free);
  1513. }
  1514. static void perf_mmap_open(struct vm_area_struct *vma)
  1515. {
  1516. struct perf_counter *counter = vma->vm_file->private_data;
  1517. atomic_inc(&counter->mmap_count);
  1518. }
  1519. static void perf_mmap_close(struct vm_area_struct *vma)
  1520. {
  1521. struct perf_counter *counter = vma->vm_file->private_data;
  1522. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1523. if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
  1524. struct user_struct *user = current_user();
  1525. atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
  1526. vma->vm_mm->locked_vm -= counter->data->nr_locked;
  1527. perf_mmap_data_free(counter);
  1528. mutex_unlock(&counter->mmap_mutex);
  1529. }
  1530. }
  1531. static struct vm_operations_struct perf_mmap_vmops = {
  1532. .open = perf_mmap_open,
  1533. .close = perf_mmap_close,
  1534. .fault = perf_mmap_fault,
  1535. };
  1536. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  1537. {
  1538. struct perf_counter *counter = file->private_data;
  1539. unsigned long user_locked, user_lock_limit;
  1540. struct user_struct *user = current_user();
  1541. unsigned long locked, lock_limit;
  1542. unsigned long vma_size;
  1543. unsigned long nr_pages;
  1544. long user_extra, extra;
  1545. int ret = 0;
  1546. if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
  1547. return -EINVAL;
  1548. vma_size = vma->vm_end - vma->vm_start;
  1549. nr_pages = (vma_size / PAGE_SIZE) - 1;
  1550. /*
  1551. * If we have data pages ensure they're a power-of-two number, so we
  1552. * can do bitmasks instead of modulo.
  1553. */
  1554. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  1555. return -EINVAL;
  1556. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  1557. return -EINVAL;
  1558. if (vma->vm_pgoff != 0)
  1559. return -EINVAL;
  1560. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1561. mutex_lock(&counter->mmap_mutex);
  1562. if (atomic_inc_not_zero(&counter->mmap_count)) {
  1563. if (nr_pages != counter->data->nr_pages)
  1564. ret = -EINVAL;
  1565. goto unlock;
  1566. }
  1567. user_extra = nr_pages + 1;
  1568. user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
  1569. /*
  1570. * Increase the limit linearly with more CPUs:
  1571. */
  1572. user_lock_limit *= num_online_cpus();
  1573. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  1574. extra = 0;
  1575. if (user_locked > user_lock_limit)
  1576. extra = user_locked - user_lock_limit;
  1577. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  1578. lock_limit >>= PAGE_SHIFT;
  1579. locked = vma->vm_mm->locked_vm + extra;
  1580. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  1581. ret = -EPERM;
  1582. goto unlock;
  1583. }
  1584. WARN_ON(counter->data);
  1585. ret = perf_mmap_data_alloc(counter, nr_pages);
  1586. if (ret)
  1587. goto unlock;
  1588. atomic_set(&counter->mmap_count, 1);
  1589. atomic_long_add(user_extra, &user->locked_vm);
  1590. vma->vm_mm->locked_vm += extra;
  1591. counter->data->nr_locked = extra;
  1592. unlock:
  1593. mutex_unlock(&counter->mmap_mutex);
  1594. vma->vm_flags &= ~VM_MAYWRITE;
  1595. vma->vm_flags |= VM_RESERVED;
  1596. vma->vm_ops = &perf_mmap_vmops;
  1597. return ret;
  1598. }
  1599. static int perf_fasync(int fd, struct file *filp, int on)
  1600. {
  1601. struct inode *inode = filp->f_path.dentry->d_inode;
  1602. struct perf_counter *counter = filp->private_data;
  1603. int retval;
  1604. mutex_lock(&inode->i_mutex);
  1605. retval = fasync_helper(fd, filp, on, &counter->fasync);
  1606. mutex_unlock(&inode->i_mutex);
  1607. if (retval < 0)
  1608. return retval;
  1609. return 0;
  1610. }
  1611. static const struct file_operations perf_fops = {
  1612. .release = perf_release,
  1613. .read = perf_read,
  1614. .poll = perf_poll,
  1615. .unlocked_ioctl = perf_ioctl,
  1616. .compat_ioctl = perf_ioctl,
  1617. .mmap = perf_mmap,
  1618. .fasync = perf_fasync,
  1619. };
  1620. /*
  1621. * Perf counter wakeup
  1622. *
  1623. * If there's data, ensure we set the poll() state and publish everything
  1624. * to user-space before waking everybody up.
  1625. */
  1626. void perf_counter_wakeup(struct perf_counter *counter)
  1627. {
  1628. wake_up_all(&counter->waitq);
  1629. if (counter->pending_kill) {
  1630. kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
  1631. counter->pending_kill = 0;
  1632. }
  1633. }
  1634. /*
  1635. * Pending wakeups
  1636. *
  1637. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  1638. *
  1639. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  1640. * single linked list and use cmpxchg() to add entries lockless.
  1641. */
  1642. static void perf_pending_counter(struct perf_pending_entry *entry)
  1643. {
  1644. struct perf_counter *counter = container_of(entry,
  1645. struct perf_counter, pending);
  1646. if (counter->pending_disable) {
  1647. counter->pending_disable = 0;
  1648. perf_counter_disable(counter);
  1649. }
  1650. if (counter->pending_wakeup) {
  1651. counter->pending_wakeup = 0;
  1652. perf_counter_wakeup(counter);
  1653. }
  1654. }
  1655. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  1656. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  1657. PENDING_TAIL,
  1658. };
  1659. static void perf_pending_queue(struct perf_pending_entry *entry,
  1660. void (*func)(struct perf_pending_entry *))
  1661. {
  1662. struct perf_pending_entry **head;
  1663. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  1664. return;
  1665. entry->func = func;
  1666. head = &get_cpu_var(perf_pending_head);
  1667. do {
  1668. entry->next = *head;
  1669. } while (cmpxchg(head, entry->next, entry) != entry->next);
  1670. set_perf_counter_pending();
  1671. put_cpu_var(perf_pending_head);
  1672. }
  1673. static int __perf_pending_run(void)
  1674. {
  1675. struct perf_pending_entry *list;
  1676. int nr = 0;
  1677. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  1678. while (list != PENDING_TAIL) {
  1679. void (*func)(struct perf_pending_entry *);
  1680. struct perf_pending_entry *entry = list;
  1681. list = list->next;
  1682. func = entry->func;
  1683. entry->next = NULL;
  1684. /*
  1685. * Ensure we observe the unqueue before we issue the wakeup,
  1686. * so that we won't be waiting forever.
  1687. * -- see perf_not_pending().
  1688. */
  1689. smp_wmb();
  1690. func(entry);
  1691. nr++;
  1692. }
  1693. return nr;
  1694. }
  1695. static inline int perf_not_pending(struct perf_counter *counter)
  1696. {
  1697. /*
  1698. * If we flush on whatever cpu we run, there is a chance we don't
  1699. * need to wait.
  1700. */
  1701. get_cpu();
  1702. __perf_pending_run();
  1703. put_cpu();
  1704. /*
  1705. * Ensure we see the proper queue state before going to sleep
  1706. * so that we do not miss the wakeup. -- see perf_pending_handle()
  1707. */
  1708. smp_rmb();
  1709. return counter->pending.next == NULL;
  1710. }
  1711. static void perf_pending_sync(struct perf_counter *counter)
  1712. {
  1713. wait_event(counter->waitq, perf_not_pending(counter));
  1714. }
  1715. void perf_counter_do_pending(void)
  1716. {
  1717. __perf_pending_run();
  1718. }
  1719. /*
  1720. * Callchain support -- arch specific
  1721. */
  1722. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1723. {
  1724. return NULL;
  1725. }
  1726. /*
  1727. * Output
  1728. */
  1729. struct perf_output_handle {
  1730. struct perf_counter *counter;
  1731. struct perf_mmap_data *data;
  1732. unsigned long head;
  1733. unsigned long offset;
  1734. int nmi;
  1735. int overflow;
  1736. int locked;
  1737. unsigned long flags;
  1738. };
  1739. static void perf_output_wakeup(struct perf_output_handle *handle)
  1740. {
  1741. atomic_set(&handle->data->poll, POLL_IN);
  1742. if (handle->nmi) {
  1743. handle->counter->pending_wakeup = 1;
  1744. perf_pending_queue(&handle->counter->pending,
  1745. perf_pending_counter);
  1746. } else
  1747. perf_counter_wakeup(handle->counter);
  1748. }
  1749. /*
  1750. * Curious locking construct.
  1751. *
  1752. * We need to ensure a later event doesn't publish a head when a former
  1753. * event isn't done writing. However since we need to deal with NMIs we
  1754. * cannot fully serialize things.
  1755. *
  1756. * What we do is serialize between CPUs so we only have to deal with NMI
  1757. * nesting on a single CPU.
  1758. *
  1759. * We only publish the head (and generate a wakeup) when the outer-most
  1760. * event completes.
  1761. */
  1762. static void perf_output_lock(struct perf_output_handle *handle)
  1763. {
  1764. struct perf_mmap_data *data = handle->data;
  1765. int cpu;
  1766. handle->locked = 0;
  1767. local_irq_save(handle->flags);
  1768. cpu = smp_processor_id();
  1769. if (in_nmi() && atomic_read(&data->lock) == cpu)
  1770. return;
  1771. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1772. cpu_relax();
  1773. handle->locked = 1;
  1774. }
  1775. static void perf_output_unlock(struct perf_output_handle *handle)
  1776. {
  1777. struct perf_mmap_data *data = handle->data;
  1778. unsigned long head;
  1779. int cpu;
  1780. data->done_head = data->head;
  1781. if (!handle->locked)
  1782. goto out;
  1783. again:
  1784. /*
  1785. * The xchg implies a full barrier that ensures all writes are done
  1786. * before we publish the new head, matched by a rmb() in userspace when
  1787. * reading this position.
  1788. */
  1789. while ((head = atomic_long_xchg(&data->done_head, 0)))
  1790. data->user_page->data_head = head;
  1791. /*
  1792. * NMI can happen here, which means we can miss a done_head update.
  1793. */
  1794. cpu = atomic_xchg(&data->lock, -1);
  1795. WARN_ON_ONCE(cpu != smp_processor_id());
  1796. /*
  1797. * Therefore we have to validate we did not indeed do so.
  1798. */
  1799. if (unlikely(atomic_long_read(&data->done_head))) {
  1800. /*
  1801. * Since we had it locked, we can lock it again.
  1802. */
  1803. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1804. cpu_relax();
  1805. goto again;
  1806. }
  1807. if (atomic_xchg(&data->wakeup, 0))
  1808. perf_output_wakeup(handle);
  1809. out:
  1810. local_irq_restore(handle->flags);
  1811. }
  1812. static int perf_output_begin(struct perf_output_handle *handle,
  1813. struct perf_counter *counter, unsigned int size,
  1814. int nmi, int overflow)
  1815. {
  1816. struct perf_mmap_data *data;
  1817. unsigned int offset, head;
  1818. /*
  1819. * For inherited counters we send all the output towards the parent.
  1820. */
  1821. if (counter->parent)
  1822. counter = counter->parent;
  1823. rcu_read_lock();
  1824. data = rcu_dereference(counter->data);
  1825. if (!data)
  1826. goto out;
  1827. handle->data = data;
  1828. handle->counter = counter;
  1829. handle->nmi = nmi;
  1830. handle->overflow = overflow;
  1831. if (!data->nr_pages)
  1832. goto fail;
  1833. perf_output_lock(handle);
  1834. do {
  1835. offset = head = atomic_read(&data->head);
  1836. head += size;
  1837. } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
  1838. handle->offset = offset;
  1839. handle->head = head;
  1840. if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
  1841. atomic_set(&data->wakeup, 1);
  1842. return 0;
  1843. fail:
  1844. perf_output_wakeup(handle);
  1845. out:
  1846. rcu_read_unlock();
  1847. return -ENOSPC;
  1848. }
  1849. static void perf_output_copy(struct perf_output_handle *handle,
  1850. void *buf, unsigned int len)
  1851. {
  1852. unsigned int pages_mask;
  1853. unsigned int offset;
  1854. unsigned int size;
  1855. void **pages;
  1856. offset = handle->offset;
  1857. pages_mask = handle->data->nr_pages - 1;
  1858. pages = handle->data->data_pages;
  1859. do {
  1860. unsigned int page_offset;
  1861. int nr;
  1862. nr = (offset >> PAGE_SHIFT) & pages_mask;
  1863. page_offset = offset & (PAGE_SIZE - 1);
  1864. size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
  1865. memcpy(pages[nr] + page_offset, buf, size);
  1866. len -= size;
  1867. buf += size;
  1868. offset += size;
  1869. } while (len);
  1870. handle->offset = offset;
  1871. /*
  1872. * Check we didn't copy past our reservation window, taking the
  1873. * possible unsigned int wrap into account.
  1874. */
  1875. WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
  1876. }
  1877. #define perf_output_put(handle, x) \
  1878. perf_output_copy((handle), &(x), sizeof(x))
  1879. static void perf_output_end(struct perf_output_handle *handle)
  1880. {
  1881. struct perf_counter *counter = handle->counter;
  1882. struct perf_mmap_data *data = handle->data;
  1883. int wakeup_events = counter->hw_event.wakeup_events;
  1884. if (handle->overflow && wakeup_events) {
  1885. int events = atomic_inc_return(&data->events);
  1886. if (events >= wakeup_events) {
  1887. atomic_sub(wakeup_events, &data->events);
  1888. atomic_set(&data->wakeup, 1);
  1889. }
  1890. }
  1891. perf_output_unlock(handle);
  1892. rcu_read_unlock();
  1893. }
  1894. static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
  1895. {
  1896. /*
  1897. * only top level counters have the pid namespace they were created in
  1898. */
  1899. if (counter->parent)
  1900. counter = counter->parent;
  1901. return task_tgid_nr_ns(p, counter->ns);
  1902. }
  1903. static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
  1904. {
  1905. /*
  1906. * only top level counters have the pid namespace they were created in
  1907. */
  1908. if (counter->parent)
  1909. counter = counter->parent;
  1910. return task_pid_nr_ns(p, counter->ns);
  1911. }
  1912. static void perf_counter_output(struct perf_counter *counter,
  1913. int nmi, struct pt_regs *regs, u64 addr)
  1914. {
  1915. int ret;
  1916. u64 sample_type = counter->hw_event.sample_type;
  1917. struct perf_output_handle handle;
  1918. struct perf_event_header header;
  1919. u64 ip;
  1920. struct {
  1921. u32 pid, tid;
  1922. } tid_entry;
  1923. struct {
  1924. u64 id;
  1925. u64 counter;
  1926. } group_entry;
  1927. struct perf_callchain_entry *callchain = NULL;
  1928. int callchain_size = 0;
  1929. u64 time;
  1930. struct {
  1931. u32 cpu, reserved;
  1932. } cpu_entry;
  1933. header.type = 0;
  1934. header.size = sizeof(header);
  1935. header.misc = PERF_EVENT_MISC_OVERFLOW;
  1936. header.misc |= perf_misc_flags(regs);
  1937. if (sample_type & PERF_SAMPLE_IP) {
  1938. ip = perf_instruction_pointer(regs);
  1939. header.type |= PERF_SAMPLE_IP;
  1940. header.size += sizeof(ip);
  1941. }
  1942. if (sample_type & PERF_SAMPLE_TID) {
  1943. /* namespace issues */
  1944. tid_entry.pid = perf_counter_pid(counter, current);
  1945. tid_entry.tid = perf_counter_tid(counter, current);
  1946. header.type |= PERF_SAMPLE_TID;
  1947. header.size += sizeof(tid_entry);
  1948. }
  1949. if (sample_type & PERF_SAMPLE_TIME) {
  1950. /*
  1951. * Maybe do better on x86 and provide cpu_clock_nmi()
  1952. */
  1953. time = sched_clock();
  1954. header.type |= PERF_SAMPLE_TIME;
  1955. header.size += sizeof(u64);
  1956. }
  1957. if (sample_type & PERF_SAMPLE_ADDR) {
  1958. header.type |= PERF_SAMPLE_ADDR;
  1959. header.size += sizeof(u64);
  1960. }
  1961. if (sample_type & PERF_SAMPLE_CONFIG) {
  1962. header.type |= PERF_SAMPLE_CONFIG;
  1963. header.size += sizeof(u64);
  1964. }
  1965. if (sample_type & PERF_SAMPLE_CPU) {
  1966. header.type |= PERF_SAMPLE_CPU;
  1967. header.size += sizeof(cpu_entry);
  1968. cpu_entry.cpu = raw_smp_processor_id();
  1969. }
  1970. if (sample_type & PERF_SAMPLE_GROUP) {
  1971. header.type |= PERF_SAMPLE_GROUP;
  1972. header.size += sizeof(u64) +
  1973. counter->nr_siblings * sizeof(group_entry);
  1974. }
  1975. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  1976. callchain = perf_callchain(regs);
  1977. if (callchain) {
  1978. callchain_size = (1 + callchain->nr) * sizeof(u64);
  1979. header.type |= PERF_SAMPLE_CALLCHAIN;
  1980. header.size += callchain_size;
  1981. }
  1982. }
  1983. ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
  1984. if (ret)
  1985. return;
  1986. perf_output_put(&handle, header);
  1987. if (sample_type & PERF_SAMPLE_IP)
  1988. perf_output_put(&handle, ip);
  1989. if (sample_type & PERF_SAMPLE_TID)
  1990. perf_output_put(&handle, tid_entry);
  1991. if (sample_type & PERF_SAMPLE_TIME)
  1992. perf_output_put(&handle, time);
  1993. if (sample_type & PERF_SAMPLE_ADDR)
  1994. perf_output_put(&handle, addr);
  1995. if (sample_type & PERF_SAMPLE_CONFIG)
  1996. perf_output_put(&handle, counter->hw_event.config);
  1997. if (sample_type & PERF_SAMPLE_CPU)
  1998. perf_output_put(&handle, cpu_entry);
  1999. /*
  2000. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
  2001. */
  2002. if (sample_type & PERF_SAMPLE_GROUP) {
  2003. struct perf_counter *leader, *sub;
  2004. u64 nr = counter->nr_siblings;
  2005. perf_output_put(&handle, nr);
  2006. leader = counter->group_leader;
  2007. list_for_each_entry(sub, &leader->sibling_list, list_entry) {
  2008. if (sub != counter)
  2009. sub->pmu->read(sub);
  2010. group_entry.id = sub->id;
  2011. group_entry.counter = atomic64_read(&sub->count);
  2012. perf_output_put(&handle, group_entry);
  2013. }
  2014. }
  2015. if (callchain)
  2016. perf_output_copy(&handle, callchain, callchain_size);
  2017. perf_output_end(&handle);
  2018. }
  2019. /*
  2020. * comm tracking
  2021. */
  2022. struct perf_comm_event {
  2023. struct task_struct *task;
  2024. char *comm;
  2025. int comm_size;
  2026. struct {
  2027. struct perf_event_header header;
  2028. u32 pid;
  2029. u32 tid;
  2030. } event;
  2031. };
  2032. static void perf_counter_comm_output(struct perf_counter *counter,
  2033. struct perf_comm_event *comm_event)
  2034. {
  2035. struct perf_output_handle handle;
  2036. int size = comm_event->event.header.size;
  2037. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2038. if (ret)
  2039. return;
  2040. comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
  2041. comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
  2042. perf_output_put(&handle, comm_event->event);
  2043. perf_output_copy(&handle, comm_event->comm,
  2044. comm_event->comm_size);
  2045. perf_output_end(&handle);
  2046. }
  2047. static int perf_counter_comm_match(struct perf_counter *counter,
  2048. struct perf_comm_event *comm_event)
  2049. {
  2050. if (counter->hw_event.comm &&
  2051. comm_event->event.header.type == PERF_EVENT_COMM)
  2052. return 1;
  2053. return 0;
  2054. }
  2055. static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
  2056. struct perf_comm_event *comm_event)
  2057. {
  2058. struct perf_counter *counter;
  2059. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2060. return;
  2061. rcu_read_lock();
  2062. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2063. if (perf_counter_comm_match(counter, comm_event))
  2064. perf_counter_comm_output(counter, comm_event);
  2065. }
  2066. rcu_read_unlock();
  2067. }
  2068. static void perf_counter_comm_event(struct perf_comm_event *comm_event)
  2069. {
  2070. struct perf_cpu_context *cpuctx;
  2071. struct perf_counter_context *ctx;
  2072. unsigned int size;
  2073. char *comm = comm_event->task->comm;
  2074. size = ALIGN(strlen(comm)+1, sizeof(u64));
  2075. comm_event->comm = comm;
  2076. comm_event->comm_size = size;
  2077. comm_event->event.header.size = sizeof(comm_event->event) + size;
  2078. cpuctx = &get_cpu_var(perf_cpu_context);
  2079. perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
  2080. put_cpu_var(perf_cpu_context);
  2081. rcu_read_lock();
  2082. /*
  2083. * doesn't really matter which of the child contexts the
  2084. * events ends up in.
  2085. */
  2086. ctx = rcu_dereference(current->perf_counter_ctxp);
  2087. if (ctx)
  2088. perf_counter_comm_ctx(ctx, comm_event);
  2089. rcu_read_unlock();
  2090. }
  2091. void perf_counter_comm(struct task_struct *task)
  2092. {
  2093. struct perf_comm_event comm_event;
  2094. if (!atomic_read(&nr_comm_tracking))
  2095. return;
  2096. comm_event = (struct perf_comm_event){
  2097. .task = task,
  2098. .event = {
  2099. .header = { .type = PERF_EVENT_COMM, },
  2100. },
  2101. };
  2102. perf_counter_comm_event(&comm_event);
  2103. }
  2104. /*
  2105. * mmap tracking
  2106. */
  2107. struct perf_mmap_event {
  2108. struct file *file;
  2109. char *file_name;
  2110. int file_size;
  2111. struct {
  2112. struct perf_event_header header;
  2113. u32 pid;
  2114. u32 tid;
  2115. u64 start;
  2116. u64 len;
  2117. u64 pgoff;
  2118. } event;
  2119. };
  2120. static void perf_counter_mmap_output(struct perf_counter *counter,
  2121. struct perf_mmap_event *mmap_event)
  2122. {
  2123. struct perf_output_handle handle;
  2124. int size = mmap_event->event.header.size;
  2125. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2126. if (ret)
  2127. return;
  2128. mmap_event->event.pid = perf_counter_pid(counter, current);
  2129. mmap_event->event.tid = perf_counter_tid(counter, current);
  2130. perf_output_put(&handle, mmap_event->event);
  2131. perf_output_copy(&handle, mmap_event->file_name,
  2132. mmap_event->file_size);
  2133. perf_output_end(&handle);
  2134. }
  2135. static int perf_counter_mmap_match(struct perf_counter *counter,
  2136. struct perf_mmap_event *mmap_event)
  2137. {
  2138. if (counter->hw_event.mmap &&
  2139. mmap_event->event.header.type == PERF_EVENT_MMAP)
  2140. return 1;
  2141. if (counter->hw_event.munmap &&
  2142. mmap_event->event.header.type == PERF_EVENT_MUNMAP)
  2143. return 1;
  2144. return 0;
  2145. }
  2146. static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
  2147. struct perf_mmap_event *mmap_event)
  2148. {
  2149. struct perf_counter *counter;
  2150. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2151. return;
  2152. rcu_read_lock();
  2153. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2154. if (perf_counter_mmap_match(counter, mmap_event))
  2155. perf_counter_mmap_output(counter, mmap_event);
  2156. }
  2157. rcu_read_unlock();
  2158. }
  2159. static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
  2160. {
  2161. struct perf_cpu_context *cpuctx;
  2162. struct perf_counter_context *ctx;
  2163. struct file *file = mmap_event->file;
  2164. unsigned int size;
  2165. char tmp[16];
  2166. char *buf = NULL;
  2167. char *name;
  2168. if (file) {
  2169. buf = kzalloc(PATH_MAX, GFP_KERNEL);
  2170. if (!buf) {
  2171. name = strncpy(tmp, "//enomem", sizeof(tmp));
  2172. goto got_name;
  2173. }
  2174. name = d_path(&file->f_path, buf, PATH_MAX);
  2175. if (IS_ERR(name)) {
  2176. name = strncpy(tmp, "//toolong", sizeof(tmp));
  2177. goto got_name;
  2178. }
  2179. } else {
  2180. name = strncpy(tmp, "//anon", sizeof(tmp));
  2181. goto got_name;
  2182. }
  2183. got_name:
  2184. size = ALIGN(strlen(name)+1, sizeof(u64));
  2185. mmap_event->file_name = name;
  2186. mmap_event->file_size = size;
  2187. mmap_event->event.header.size = sizeof(mmap_event->event) + size;
  2188. cpuctx = &get_cpu_var(perf_cpu_context);
  2189. perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
  2190. put_cpu_var(perf_cpu_context);
  2191. rcu_read_lock();
  2192. /*
  2193. * doesn't really matter which of the child contexts the
  2194. * events ends up in.
  2195. */
  2196. ctx = rcu_dereference(current->perf_counter_ctxp);
  2197. if (ctx)
  2198. perf_counter_mmap_ctx(ctx, mmap_event);
  2199. rcu_read_unlock();
  2200. kfree(buf);
  2201. }
  2202. void perf_counter_mmap(unsigned long addr, unsigned long len,
  2203. unsigned long pgoff, struct file *file)
  2204. {
  2205. struct perf_mmap_event mmap_event;
  2206. if (!atomic_read(&nr_mmap_tracking))
  2207. return;
  2208. mmap_event = (struct perf_mmap_event){
  2209. .file = file,
  2210. .event = {
  2211. .header = { .type = PERF_EVENT_MMAP, },
  2212. .start = addr,
  2213. .len = len,
  2214. .pgoff = pgoff,
  2215. },
  2216. };
  2217. perf_counter_mmap_event(&mmap_event);
  2218. }
  2219. void perf_counter_munmap(unsigned long addr, unsigned long len,
  2220. unsigned long pgoff, struct file *file)
  2221. {
  2222. struct perf_mmap_event mmap_event;
  2223. if (!atomic_read(&nr_munmap_tracking))
  2224. return;
  2225. mmap_event = (struct perf_mmap_event){
  2226. .file = file,
  2227. .event = {
  2228. .header = { .type = PERF_EVENT_MUNMAP, },
  2229. .start = addr,
  2230. .len = len,
  2231. .pgoff = pgoff,
  2232. },
  2233. };
  2234. perf_counter_mmap_event(&mmap_event);
  2235. }
  2236. /*
  2237. * Log sample_period changes so that analyzing tools can re-normalize the
  2238. * event flow.
  2239. */
  2240. static void perf_log_period(struct perf_counter *counter, u64 period)
  2241. {
  2242. struct perf_output_handle handle;
  2243. int ret;
  2244. struct {
  2245. struct perf_event_header header;
  2246. u64 time;
  2247. u64 period;
  2248. } freq_event = {
  2249. .header = {
  2250. .type = PERF_EVENT_PERIOD,
  2251. .misc = 0,
  2252. .size = sizeof(freq_event),
  2253. },
  2254. .time = sched_clock(),
  2255. .period = period,
  2256. };
  2257. if (counter->hw.sample_period == period)
  2258. return;
  2259. ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
  2260. if (ret)
  2261. return;
  2262. perf_output_put(&handle, freq_event);
  2263. perf_output_end(&handle);
  2264. }
  2265. /*
  2266. * IRQ throttle logging
  2267. */
  2268. static void perf_log_throttle(struct perf_counter *counter, int enable)
  2269. {
  2270. struct perf_output_handle handle;
  2271. int ret;
  2272. struct {
  2273. struct perf_event_header header;
  2274. u64 time;
  2275. } throttle_event = {
  2276. .header = {
  2277. .type = PERF_EVENT_THROTTLE + 1,
  2278. .misc = 0,
  2279. .size = sizeof(throttle_event),
  2280. },
  2281. .time = sched_clock(),
  2282. };
  2283. ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
  2284. if (ret)
  2285. return;
  2286. perf_output_put(&handle, throttle_event);
  2287. perf_output_end(&handle);
  2288. }
  2289. /*
  2290. * Generic counter overflow handling.
  2291. */
  2292. int perf_counter_overflow(struct perf_counter *counter,
  2293. int nmi, struct pt_regs *regs, u64 addr)
  2294. {
  2295. int events = atomic_read(&counter->event_limit);
  2296. int throttle = counter->pmu->unthrottle != NULL;
  2297. int ret = 0;
  2298. if (!throttle) {
  2299. counter->hw.interrupts++;
  2300. } else if (counter->hw.interrupts != MAX_INTERRUPTS) {
  2301. counter->hw.interrupts++;
  2302. if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
  2303. counter->hw.interrupts = MAX_INTERRUPTS;
  2304. perf_log_throttle(counter, 0);
  2305. ret = 1;
  2306. }
  2307. }
  2308. /*
  2309. * XXX event_limit might not quite work as expected on inherited
  2310. * counters
  2311. */
  2312. counter->pending_kill = POLL_IN;
  2313. if (events && atomic_dec_and_test(&counter->event_limit)) {
  2314. ret = 1;
  2315. counter->pending_kill = POLL_HUP;
  2316. if (nmi) {
  2317. counter->pending_disable = 1;
  2318. perf_pending_queue(&counter->pending,
  2319. perf_pending_counter);
  2320. } else
  2321. perf_counter_disable(counter);
  2322. }
  2323. perf_counter_output(counter, nmi, regs, addr);
  2324. return ret;
  2325. }
  2326. /*
  2327. * Generic software counter infrastructure
  2328. */
  2329. static void perf_swcounter_update(struct perf_counter *counter)
  2330. {
  2331. struct hw_perf_counter *hwc = &counter->hw;
  2332. u64 prev, now;
  2333. s64 delta;
  2334. again:
  2335. prev = atomic64_read(&hwc->prev_count);
  2336. now = atomic64_read(&hwc->count);
  2337. if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
  2338. goto again;
  2339. delta = now - prev;
  2340. atomic64_add(delta, &counter->count);
  2341. atomic64_sub(delta, &hwc->period_left);
  2342. }
  2343. static void perf_swcounter_set_period(struct perf_counter *counter)
  2344. {
  2345. struct hw_perf_counter *hwc = &counter->hw;
  2346. s64 left = atomic64_read(&hwc->period_left);
  2347. s64 period = hwc->sample_period;
  2348. if (unlikely(left <= -period)) {
  2349. left = period;
  2350. atomic64_set(&hwc->period_left, left);
  2351. }
  2352. if (unlikely(left <= 0)) {
  2353. left += period;
  2354. atomic64_add(period, &hwc->period_left);
  2355. }
  2356. atomic64_set(&hwc->prev_count, -left);
  2357. atomic64_set(&hwc->count, -left);
  2358. }
  2359. static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
  2360. {
  2361. enum hrtimer_restart ret = HRTIMER_RESTART;
  2362. struct perf_counter *counter;
  2363. struct pt_regs *regs;
  2364. u64 period;
  2365. counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
  2366. counter->pmu->read(counter);
  2367. regs = get_irq_regs();
  2368. /*
  2369. * In case we exclude kernel IPs or are somehow not in interrupt
  2370. * context, provide the next best thing, the user IP.
  2371. */
  2372. if ((counter->hw_event.exclude_kernel || !regs) &&
  2373. !counter->hw_event.exclude_user)
  2374. regs = task_pt_regs(current);
  2375. if (regs) {
  2376. if (perf_counter_overflow(counter, 0, regs, 0))
  2377. ret = HRTIMER_NORESTART;
  2378. }
  2379. period = max_t(u64, 10000, counter->hw.sample_period);
  2380. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  2381. return ret;
  2382. }
  2383. static void perf_swcounter_overflow(struct perf_counter *counter,
  2384. int nmi, struct pt_regs *regs, u64 addr)
  2385. {
  2386. perf_swcounter_update(counter);
  2387. perf_swcounter_set_period(counter);
  2388. if (perf_counter_overflow(counter, nmi, regs, addr))
  2389. /* soft-disable the counter */
  2390. ;
  2391. }
  2392. static int perf_swcounter_is_counting(struct perf_counter *counter)
  2393. {
  2394. struct perf_counter_context *ctx;
  2395. unsigned long flags;
  2396. int count;
  2397. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  2398. return 1;
  2399. if (counter->state != PERF_COUNTER_STATE_INACTIVE)
  2400. return 0;
  2401. /*
  2402. * If the counter is inactive, it could be just because
  2403. * its task is scheduled out, or because it's in a group
  2404. * which could not go on the PMU. We want to count in
  2405. * the first case but not the second. If the context is
  2406. * currently active then an inactive software counter must
  2407. * be the second case. If it's not currently active then
  2408. * we need to know whether the counter was active when the
  2409. * context was last active, which we can determine by
  2410. * comparing counter->tstamp_stopped with ctx->time.
  2411. *
  2412. * We are within an RCU read-side critical section,
  2413. * which protects the existence of *ctx.
  2414. */
  2415. ctx = counter->ctx;
  2416. spin_lock_irqsave(&ctx->lock, flags);
  2417. count = 1;
  2418. /* Re-check state now we have the lock */
  2419. if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
  2420. counter->ctx->is_active ||
  2421. counter->tstamp_stopped < ctx->time)
  2422. count = 0;
  2423. spin_unlock_irqrestore(&ctx->lock, flags);
  2424. return count;
  2425. }
  2426. static int perf_swcounter_match(struct perf_counter *counter,
  2427. enum perf_event_types type,
  2428. u32 event, struct pt_regs *regs)
  2429. {
  2430. u64 event_config;
  2431. event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
  2432. if (!perf_swcounter_is_counting(counter))
  2433. return 0;
  2434. if (counter->hw_event.config != event_config)
  2435. return 0;
  2436. if (regs) {
  2437. if (counter->hw_event.exclude_user && user_mode(regs))
  2438. return 0;
  2439. if (counter->hw_event.exclude_kernel && !user_mode(regs))
  2440. return 0;
  2441. }
  2442. return 1;
  2443. }
  2444. static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
  2445. int nmi, struct pt_regs *regs, u64 addr)
  2446. {
  2447. int neg = atomic64_add_negative(nr, &counter->hw.count);
  2448. if (counter->hw.sample_period && !neg && regs)
  2449. perf_swcounter_overflow(counter, nmi, regs, addr);
  2450. }
  2451. static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
  2452. enum perf_event_types type, u32 event,
  2453. u64 nr, int nmi, struct pt_regs *regs,
  2454. u64 addr)
  2455. {
  2456. struct perf_counter *counter;
  2457. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2458. return;
  2459. rcu_read_lock();
  2460. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2461. if (perf_swcounter_match(counter, type, event, regs))
  2462. perf_swcounter_add(counter, nr, nmi, regs, addr);
  2463. }
  2464. rcu_read_unlock();
  2465. }
  2466. static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
  2467. {
  2468. if (in_nmi())
  2469. return &cpuctx->recursion[3];
  2470. if (in_irq())
  2471. return &cpuctx->recursion[2];
  2472. if (in_softirq())
  2473. return &cpuctx->recursion[1];
  2474. return &cpuctx->recursion[0];
  2475. }
  2476. static void __perf_swcounter_event(enum perf_event_types type, u32 event,
  2477. u64 nr, int nmi, struct pt_regs *regs,
  2478. u64 addr)
  2479. {
  2480. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  2481. int *recursion = perf_swcounter_recursion_context(cpuctx);
  2482. struct perf_counter_context *ctx;
  2483. if (*recursion)
  2484. goto out;
  2485. (*recursion)++;
  2486. barrier();
  2487. perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
  2488. nr, nmi, regs, addr);
  2489. rcu_read_lock();
  2490. /*
  2491. * doesn't really matter which of the child contexts the
  2492. * events ends up in.
  2493. */
  2494. ctx = rcu_dereference(current->perf_counter_ctxp);
  2495. if (ctx)
  2496. perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
  2497. rcu_read_unlock();
  2498. barrier();
  2499. (*recursion)--;
  2500. out:
  2501. put_cpu_var(perf_cpu_context);
  2502. }
  2503. void
  2504. perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
  2505. {
  2506. __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
  2507. }
  2508. static void perf_swcounter_read(struct perf_counter *counter)
  2509. {
  2510. perf_swcounter_update(counter);
  2511. }
  2512. static int perf_swcounter_enable(struct perf_counter *counter)
  2513. {
  2514. perf_swcounter_set_period(counter);
  2515. return 0;
  2516. }
  2517. static void perf_swcounter_disable(struct perf_counter *counter)
  2518. {
  2519. perf_swcounter_update(counter);
  2520. }
  2521. static const struct pmu perf_ops_generic = {
  2522. .enable = perf_swcounter_enable,
  2523. .disable = perf_swcounter_disable,
  2524. .read = perf_swcounter_read,
  2525. };
  2526. /*
  2527. * Software counter: cpu wall time clock
  2528. */
  2529. static void cpu_clock_perf_counter_update(struct perf_counter *counter)
  2530. {
  2531. int cpu = raw_smp_processor_id();
  2532. s64 prev;
  2533. u64 now;
  2534. now = cpu_clock(cpu);
  2535. prev = atomic64_read(&counter->hw.prev_count);
  2536. atomic64_set(&counter->hw.prev_count, now);
  2537. atomic64_add(now - prev, &counter->count);
  2538. }
  2539. static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
  2540. {
  2541. struct hw_perf_counter *hwc = &counter->hw;
  2542. int cpu = raw_smp_processor_id();
  2543. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  2544. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2545. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2546. if (hwc->sample_period) {
  2547. u64 period = max_t(u64, 10000, hwc->sample_period);
  2548. __hrtimer_start_range_ns(&hwc->hrtimer,
  2549. ns_to_ktime(period), 0,
  2550. HRTIMER_MODE_REL, 0);
  2551. }
  2552. return 0;
  2553. }
  2554. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  2555. {
  2556. if (counter->hw.sample_period)
  2557. hrtimer_cancel(&counter->hw.hrtimer);
  2558. cpu_clock_perf_counter_update(counter);
  2559. }
  2560. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  2561. {
  2562. cpu_clock_perf_counter_update(counter);
  2563. }
  2564. static const struct pmu perf_ops_cpu_clock = {
  2565. .enable = cpu_clock_perf_counter_enable,
  2566. .disable = cpu_clock_perf_counter_disable,
  2567. .read = cpu_clock_perf_counter_read,
  2568. };
  2569. /*
  2570. * Software counter: task time clock
  2571. */
  2572. static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
  2573. {
  2574. u64 prev;
  2575. s64 delta;
  2576. prev = atomic64_xchg(&counter->hw.prev_count, now);
  2577. delta = now - prev;
  2578. atomic64_add(delta, &counter->count);
  2579. }
  2580. static int task_clock_perf_counter_enable(struct perf_counter *counter)
  2581. {
  2582. struct hw_perf_counter *hwc = &counter->hw;
  2583. u64 now;
  2584. now = counter->ctx->time;
  2585. atomic64_set(&hwc->prev_count, now);
  2586. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2587. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2588. if (hwc->sample_period) {
  2589. u64 period = max_t(u64, 10000, hwc->sample_period);
  2590. __hrtimer_start_range_ns(&hwc->hrtimer,
  2591. ns_to_ktime(period), 0,
  2592. HRTIMER_MODE_REL, 0);
  2593. }
  2594. return 0;
  2595. }
  2596. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  2597. {
  2598. if (counter->hw.sample_period)
  2599. hrtimer_cancel(&counter->hw.hrtimer);
  2600. task_clock_perf_counter_update(counter, counter->ctx->time);
  2601. }
  2602. static void task_clock_perf_counter_read(struct perf_counter *counter)
  2603. {
  2604. u64 time;
  2605. if (!in_nmi()) {
  2606. update_context_time(counter->ctx);
  2607. time = counter->ctx->time;
  2608. } else {
  2609. u64 now = perf_clock();
  2610. u64 delta = now - counter->ctx->timestamp;
  2611. time = counter->ctx->time + delta;
  2612. }
  2613. task_clock_perf_counter_update(counter, time);
  2614. }
  2615. static const struct pmu perf_ops_task_clock = {
  2616. .enable = task_clock_perf_counter_enable,
  2617. .disable = task_clock_perf_counter_disable,
  2618. .read = task_clock_perf_counter_read,
  2619. };
  2620. /*
  2621. * Software counter: cpu migrations
  2622. */
  2623. void perf_counter_task_migration(struct task_struct *task, int cpu)
  2624. {
  2625. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  2626. struct perf_counter_context *ctx;
  2627. perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
  2628. PERF_COUNT_CPU_MIGRATIONS,
  2629. 1, 1, NULL, 0);
  2630. ctx = perf_pin_task_context(task);
  2631. if (ctx) {
  2632. perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
  2633. PERF_COUNT_CPU_MIGRATIONS,
  2634. 1, 1, NULL, 0);
  2635. perf_unpin_context(ctx);
  2636. }
  2637. }
  2638. #ifdef CONFIG_EVENT_PROFILE
  2639. void perf_tpcounter_event(int event_id)
  2640. {
  2641. struct pt_regs *regs = get_irq_regs();
  2642. if (!regs)
  2643. regs = task_pt_regs(current);
  2644. __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
  2645. }
  2646. EXPORT_SYMBOL_GPL(perf_tpcounter_event);
  2647. extern int ftrace_profile_enable(int);
  2648. extern void ftrace_profile_disable(int);
  2649. static void tp_perf_counter_destroy(struct perf_counter *counter)
  2650. {
  2651. ftrace_profile_disable(perf_event_id(&counter->hw_event));
  2652. }
  2653. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2654. {
  2655. int event_id = perf_event_id(&counter->hw_event);
  2656. int ret;
  2657. ret = ftrace_profile_enable(event_id);
  2658. if (ret)
  2659. return NULL;
  2660. counter->destroy = tp_perf_counter_destroy;
  2661. counter->hw.sample_period = counter->hw_event.sample_period;
  2662. return &perf_ops_generic;
  2663. }
  2664. #else
  2665. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2666. {
  2667. return NULL;
  2668. }
  2669. #endif
  2670. static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  2671. {
  2672. const struct pmu *pmu = NULL;
  2673. /*
  2674. * Software counters (currently) can't in general distinguish
  2675. * between user, kernel and hypervisor events.
  2676. * However, context switches and cpu migrations are considered
  2677. * to be kernel events, and page faults are never hypervisor
  2678. * events.
  2679. */
  2680. switch (perf_event_id(&counter->hw_event)) {
  2681. case PERF_COUNT_CPU_CLOCK:
  2682. pmu = &perf_ops_cpu_clock;
  2683. break;
  2684. case PERF_COUNT_TASK_CLOCK:
  2685. /*
  2686. * If the user instantiates this as a per-cpu counter,
  2687. * use the cpu_clock counter instead.
  2688. */
  2689. if (counter->ctx->task)
  2690. pmu = &perf_ops_task_clock;
  2691. else
  2692. pmu = &perf_ops_cpu_clock;
  2693. break;
  2694. case PERF_COUNT_PAGE_FAULTS:
  2695. case PERF_COUNT_PAGE_FAULTS_MIN:
  2696. case PERF_COUNT_PAGE_FAULTS_MAJ:
  2697. case PERF_COUNT_CONTEXT_SWITCHES:
  2698. case PERF_COUNT_CPU_MIGRATIONS:
  2699. pmu = &perf_ops_generic;
  2700. break;
  2701. }
  2702. return pmu;
  2703. }
  2704. /*
  2705. * Allocate and initialize a counter structure
  2706. */
  2707. static struct perf_counter *
  2708. perf_counter_alloc(struct perf_counter_hw_event *hw_event,
  2709. int cpu,
  2710. struct perf_counter_context *ctx,
  2711. struct perf_counter *group_leader,
  2712. gfp_t gfpflags)
  2713. {
  2714. const struct pmu *pmu;
  2715. struct perf_counter *counter;
  2716. struct hw_perf_counter *hwc;
  2717. long err;
  2718. counter = kzalloc(sizeof(*counter), gfpflags);
  2719. if (!counter)
  2720. return ERR_PTR(-ENOMEM);
  2721. /*
  2722. * Single counters are their own group leaders, with an
  2723. * empty sibling list:
  2724. */
  2725. if (!group_leader)
  2726. group_leader = counter;
  2727. mutex_init(&counter->child_mutex);
  2728. INIT_LIST_HEAD(&counter->child_list);
  2729. INIT_LIST_HEAD(&counter->list_entry);
  2730. INIT_LIST_HEAD(&counter->event_entry);
  2731. INIT_LIST_HEAD(&counter->sibling_list);
  2732. init_waitqueue_head(&counter->waitq);
  2733. mutex_init(&counter->mmap_mutex);
  2734. counter->cpu = cpu;
  2735. counter->hw_event = *hw_event;
  2736. counter->group_leader = group_leader;
  2737. counter->pmu = NULL;
  2738. counter->ctx = ctx;
  2739. counter->oncpu = -1;
  2740. counter->state = PERF_COUNTER_STATE_INACTIVE;
  2741. if (hw_event->disabled)
  2742. counter->state = PERF_COUNTER_STATE_OFF;
  2743. pmu = NULL;
  2744. hwc = &counter->hw;
  2745. if (hw_event->freq && hw_event->sample_freq)
  2746. hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq);
  2747. else
  2748. hwc->sample_period = hw_event->sample_period;
  2749. /*
  2750. * we currently do not support PERF_SAMPLE_GROUP on inherited counters
  2751. */
  2752. if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP))
  2753. goto done;
  2754. if (perf_event_raw(hw_event)) {
  2755. pmu = hw_perf_counter_init(counter);
  2756. goto done;
  2757. }
  2758. switch (perf_event_type(hw_event)) {
  2759. case PERF_TYPE_HARDWARE:
  2760. pmu = hw_perf_counter_init(counter);
  2761. break;
  2762. case PERF_TYPE_SOFTWARE:
  2763. pmu = sw_perf_counter_init(counter);
  2764. break;
  2765. case PERF_TYPE_TRACEPOINT:
  2766. pmu = tp_perf_counter_init(counter);
  2767. break;
  2768. }
  2769. done:
  2770. err = 0;
  2771. if (!pmu)
  2772. err = -EINVAL;
  2773. else if (IS_ERR(pmu))
  2774. err = PTR_ERR(pmu);
  2775. if (err) {
  2776. kfree(counter);
  2777. return ERR_PTR(err);
  2778. }
  2779. counter->pmu = pmu;
  2780. atomic_inc(&nr_counters);
  2781. if (counter->hw_event.mmap)
  2782. atomic_inc(&nr_mmap_tracking);
  2783. if (counter->hw_event.munmap)
  2784. atomic_inc(&nr_munmap_tracking);
  2785. if (counter->hw_event.comm)
  2786. atomic_inc(&nr_comm_tracking);
  2787. return counter;
  2788. }
  2789. static atomic64_t perf_counter_id;
  2790. /**
  2791. * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  2792. *
  2793. * @hw_event_uptr: event type attributes for monitoring/sampling
  2794. * @pid: target pid
  2795. * @cpu: target cpu
  2796. * @group_fd: group leader counter fd
  2797. */
  2798. SYSCALL_DEFINE5(perf_counter_open,
  2799. const struct perf_counter_hw_event __user *, hw_event_uptr,
  2800. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  2801. {
  2802. struct perf_counter *counter, *group_leader;
  2803. struct perf_counter_hw_event hw_event;
  2804. struct perf_counter_context *ctx;
  2805. struct file *counter_file = NULL;
  2806. struct file *group_file = NULL;
  2807. int fput_needed = 0;
  2808. int fput_needed2 = 0;
  2809. int ret;
  2810. /* for future expandability... */
  2811. if (flags)
  2812. return -EINVAL;
  2813. if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
  2814. return -EFAULT;
  2815. /*
  2816. * Get the target context (task or percpu):
  2817. */
  2818. ctx = find_get_context(pid, cpu);
  2819. if (IS_ERR(ctx))
  2820. return PTR_ERR(ctx);
  2821. /*
  2822. * Look up the group leader (we will attach this counter to it):
  2823. */
  2824. group_leader = NULL;
  2825. if (group_fd != -1) {
  2826. ret = -EINVAL;
  2827. group_file = fget_light(group_fd, &fput_needed);
  2828. if (!group_file)
  2829. goto err_put_context;
  2830. if (group_file->f_op != &perf_fops)
  2831. goto err_put_context;
  2832. group_leader = group_file->private_data;
  2833. /*
  2834. * Do not allow a recursive hierarchy (this new sibling
  2835. * becoming part of another group-sibling):
  2836. */
  2837. if (group_leader->group_leader != group_leader)
  2838. goto err_put_context;
  2839. /*
  2840. * Do not allow to attach to a group in a different
  2841. * task or CPU context:
  2842. */
  2843. if (group_leader->ctx != ctx)
  2844. goto err_put_context;
  2845. /*
  2846. * Only a group leader can be exclusive or pinned
  2847. */
  2848. if (hw_event.exclusive || hw_event.pinned)
  2849. goto err_put_context;
  2850. }
  2851. counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
  2852. GFP_KERNEL);
  2853. ret = PTR_ERR(counter);
  2854. if (IS_ERR(counter))
  2855. goto err_put_context;
  2856. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  2857. if (ret < 0)
  2858. goto err_free_put_context;
  2859. counter_file = fget_light(ret, &fput_needed2);
  2860. if (!counter_file)
  2861. goto err_free_put_context;
  2862. counter->filp = counter_file;
  2863. WARN_ON_ONCE(ctx->parent_ctx);
  2864. mutex_lock(&ctx->mutex);
  2865. perf_install_in_context(ctx, counter, cpu);
  2866. ++ctx->generation;
  2867. mutex_unlock(&ctx->mutex);
  2868. counter->owner = current;
  2869. get_task_struct(current);
  2870. mutex_lock(&current->perf_counter_mutex);
  2871. list_add_tail(&counter->owner_entry, &current->perf_counter_list);
  2872. mutex_unlock(&current->perf_counter_mutex);
  2873. counter->ns = get_pid_ns(current->nsproxy->pid_ns);
  2874. counter->id = atomic64_inc_return(&perf_counter_id);
  2875. fput_light(counter_file, fput_needed2);
  2876. out_fput:
  2877. fput_light(group_file, fput_needed);
  2878. return ret;
  2879. err_free_put_context:
  2880. kfree(counter);
  2881. err_put_context:
  2882. put_ctx(ctx);
  2883. goto out_fput;
  2884. }
  2885. /*
  2886. * inherit a counter from parent task to child task:
  2887. */
  2888. static struct perf_counter *
  2889. inherit_counter(struct perf_counter *parent_counter,
  2890. struct task_struct *parent,
  2891. struct perf_counter_context *parent_ctx,
  2892. struct task_struct *child,
  2893. struct perf_counter *group_leader,
  2894. struct perf_counter_context *child_ctx)
  2895. {
  2896. struct perf_counter *child_counter;
  2897. /*
  2898. * Instead of creating recursive hierarchies of counters,
  2899. * we link inherited counters back to the original parent,
  2900. * which has a filp for sure, which we use as the reference
  2901. * count:
  2902. */
  2903. if (parent_counter->parent)
  2904. parent_counter = parent_counter->parent;
  2905. child_counter = perf_counter_alloc(&parent_counter->hw_event,
  2906. parent_counter->cpu, child_ctx,
  2907. group_leader, GFP_KERNEL);
  2908. if (IS_ERR(child_counter))
  2909. return child_counter;
  2910. get_ctx(child_ctx);
  2911. /*
  2912. * Make the child state follow the state of the parent counter,
  2913. * not its hw_event.disabled bit. We hold the parent's mutex,
  2914. * so we won't race with perf_counter_{en, dis}able_family.
  2915. */
  2916. if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
  2917. child_counter->state = PERF_COUNTER_STATE_INACTIVE;
  2918. else
  2919. child_counter->state = PERF_COUNTER_STATE_OFF;
  2920. /*
  2921. * Link it up in the child's context:
  2922. */
  2923. add_counter_to_ctx(child_counter, child_ctx);
  2924. child_counter->parent = parent_counter;
  2925. /*
  2926. * inherit into child's child as well:
  2927. */
  2928. child_counter->hw_event.inherit = 1;
  2929. /*
  2930. * Get a reference to the parent filp - we will fput it
  2931. * when the child counter exits. This is safe to do because
  2932. * we are in the parent and we know that the filp still
  2933. * exists and has a nonzero count:
  2934. */
  2935. atomic_long_inc(&parent_counter->filp->f_count);
  2936. /*
  2937. * Link this into the parent counter's child list
  2938. */
  2939. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  2940. mutex_lock(&parent_counter->child_mutex);
  2941. list_add_tail(&child_counter->child_list, &parent_counter->child_list);
  2942. mutex_unlock(&parent_counter->child_mutex);
  2943. return child_counter;
  2944. }
  2945. static int inherit_group(struct perf_counter *parent_counter,
  2946. struct task_struct *parent,
  2947. struct perf_counter_context *parent_ctx,
  2948. struct task_struct *child,
  2949. struct perf_counter_context *child_ctx)
  2950. {
  2951. struct perf_counter *leader;
  2952. struct perf_counter *sub;
  2953. struct perf_counter *child_ctr;
  2954. leader = inherit_counter(parent_counter, parent, parent_ctx,
  2955. child, NULL, child_ctx);
  2956. if (IS_ERR(leader))
  2957. return PTR_ERR(leader);
  2958. list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
  2959. child_ctr = inherit_counter(sub, parent, parent_ctx,
  2960. child, leader, child_ctx);
  2961. if (IS_ERR(child_ctr))
  2962. return PTR_ERR(child_ctr);
  2963. }
  2964. return 0;
  2965. }
  2966. static void sync_child_counter(struct perf_counter *child_counter,
  2967. struct perf_counter *parent_counter)
  2968. {
  2969. u64 child_val;
  2970. child_val = atomic64_read(&child_counter->count);
  2971. /*
  2972. * Add back the child's count to the parent's count:
  2973. */
  2974. atomic64_add(child_val, &parent_counter->count);
  2975. atomic64_add(child_counter->total_time_enabled,
  2976. &parent_counter->child_total_time_enabled);
  2977. atomic64_add(child_counter->total_time_running,
  2978. &parent_counter->child_total_time_running);
  2979. /*
  2980. * Remove this counter from the parent's list
  2981. */
  2982. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  2983. mutex_lock(&parent_counter->child_mutex);
  2984. list_del_init(&child_counter->child_list);
  2985. mutex_unlock(&parent_counter->child_mutex);
  2986. /*
  2987. * Release the parent counter, if this was the last
  2988. * reference to it.
  2989. */
  2990. fput(parent_counter->filp);
  2991. }
  2992. static void
  2993. __perf_counter_exit_task(struct perf_counter *child_counter,
  2994. struct perf_counter_context *child_ctx)
  2995. {
  2996. struct perf_counter *parent_counter;
  2997. update_counter_times(child_counter);
  2998. perf_counter_remove_from_context(child_counter);
  2999. parent_counter = child_counter->parent;
  3000. /*
  3001. * It can happen that parent exits first, and has counters
  3002. * that are still around due to the child reference. These
  3003. * counters need to be zapped - but otherwise linger.
  3004. */
  3005. if (parent_counter) {
  3006. sync_child_counter(child_counter, parent_counter);
  3007. free_counter(child_counter);
  3008. }
  3009. }
  3010. /*
  3011. * When a child task exits, feed back counter values to parent counters.
  3012. */
  3013. void perf_counter_exit_task(struct task_struct *child)
  3014. {
  3015. struct perf_counter *child_counter, *tmp;
  3016. struct perf_counter_context *child_ctx;
  3017. unsigned long flags;
  3018. if (likely(!child->perf_counter_ctxp))
  3019. return;
  3020. local_irq_save(flags);
  3021. /*
  3022. * We can't reschedule here because interrupts are disabled,
  3023. * and either child is current or it is a task that can't be
  3024. * scheduled, so we are now safe from rescheduling changing
  3025. * our context.
  3026. */
  3027. child_ctx = child->perf_counter_ctxp;
  3028. __perf_counter_task_sched_out(child_ctx);
  3029. /*
  3030. * Take the context lock here so that if find_get_context is
  3031. * reading child->perf_counter_ctxp, we wait until it has
  3032. * incremented the context's refcount before we do put_ctx below.
  3033. */
  3034. spin_lock(&child_ctx->lock);
  3035. child->perf_counter_ctxp = NULL;
  3036. if (child_ctx->parent_ctx) {
  3037. /*
  3038. * This context is a clone; unclone it so it can't get
  3039. * swapped to another process while we're removing all
  3040. * the counters from it.
  3041. */
  3042. put_ctx(child_ctx->parent_ctx);
  3043. child_ctx->parent_ctx = NULL;
  3044. }
  3045. spin_unlock(&child_ctx->lock);
  3046. local_irq_restore(flags);
  3047. mutex_lock(&child_ctx->mutex);
  3048. again:
  3049. list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
  3050. list_entry)
  3051. __perf_counter_exit_task(child_counter, child_ctx);
  3052. /*
  3053. * If the last counter was a group counter, it will have appended all
  3054. * its siblings to the list, but we obtained 'tmp' before that which
  3055. * will still point to the list head terminating the iteration.
  3056. */
  3057. if (!list_empty(&child_ctx->counter_list))
  3058. goto again;
  3059. mutex_unlock(&child_ctx->mutex);
  3060. put_ctx(child_ctx);
  3061. }
  3062. /*
  3063. * free an unexposed, unused context as created by inheritance by
  3064. * init_task below, used by fork() in case of fail.
  3065. */
  3066. void perf_counter_free_task(struct task_struct *task)
  3067. {
  3068. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  3069. struct perf_counter *counter, *tmp;
  3070. if (!ctx)
  3071. return;
  3072. mutex_lock(&ctx->mutex);
  3073. again:
  3074. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
  3075. struct perf_counter *parent = counter->parent;
  3076. if (WARN_ON_ONCE(!parent))
  3077. continue;
  3078. mutex_lock(&parent->child_mutex);
  3079. list_del_init(&counter->child_list);
  3080. mutex_unlock(&parent->child_mutex);
  3081. fput(parent->filp);
  3082. list_del_counter(counter, ctx);
  3083. free_counter(counter);
  3084. }
  3085. if (!list_empty(&ctx->counter_list))
  3086. goto again;
  3087. mutex_unlock(&ctx->mutex);
  3088. put_ctx(ctx);
  3089. }
  3090. /*
  3091. * Initialize the perf_counter context in task_struct
  3092. */
  3093. int perf_counter_init_task(struct task_struct *child)
  3094. {
  3095. struct perf_counter_context *child_ctx, *parent_ctx;
  3096. struct perf_counter_context *cloned_ctx;
  3097. struct perf_counter *counter;
  3098. struct task_struct *parent = current;
  3099. int inherited_all = 1;
  3100. int ret = 0;
  3101. child->perf_counter_ctxp = NULL;
  3102. mutex_init(&child->perf_counter_mutex);
  3103. INIT_LIST_HEAD(&child->perf_counter_list);
  3104. if (likely(!parent->perf_counter_ctxp))
  3105. return 0;
  3106. /*
  3107. * This is executed from the parent task context, so inherit
  3108. * counters that have been marked for cloning.
  3109. * First allocate and initialize a context for the child.
  3110. */
  3111. child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  3112. if (!child_ctx)
  3113. return -ENOMEM;
  3114. __perf_counter_init_context(child_ctx, child);
  3115. child->perf_counter_ctxp = child_ctx;
  3116. get_task_struct(child);
  3117. /*
  3118. * If the parent's context is a clone, pin it so it won't get
  3119. * swapped under us.
  3120. */
  3121. parent_ctx = perf_pin_task_context(parent);
  3122. /*
  3123. * No need to check if parent_ctx != NULL here; since we saw
  3124. * it non-NULL earlier, the only reason for it to become NULL
  3125. * is if we exit, and since we're currently in the middle of
  3126. * a fork we can't be exiting at the same time.
  3127. */
  3128. /*
  3129. * Lock the parent list. No need to lock the child - not PID
  3130. * hashed yet and not running, so nobody can access it.
  3131. */
  3132. mutex_lock(&parent_ctx->mutex);
  3133. /*
  3134. * We dont have to disable NMIs - we are only looking at
  3135. * the list, not manipulating it:
  3136. */
  3137. list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
  3138. if (counter != counter->group_leader)
  3139. continue;
  3140. if (!counter->hw_event.inherit) {
  3141. inherited_all = 0;
  3142. continue;
  3143. }
  3144. ret = inherit_group(counter, parent, parent_ctx,
  3145. child, child_ctx);
  3146. if (ret) {
  3147. inherited_all = 0;
  3148. break;
  3149. }
  3150. }
  3151. if (inherited_all) {
  3152. /*
  3153. * Mark the child context as a clone of the parent
  3154. * context, or of whatever the parent is a clone of.
  3155. * Note that if the parent is a clone, it could get
  3156. * uncloned at any point, but that doesn't matter
  3157. * because the list of counters and the generation
  3158. * count can't have changed since we took the mutex.
  3159. */
  3160. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  3161. if (cloned_ctx) {
  3162. child_ctx->parent_ctx = cloned_ctx;
  3163. child_ctx->parent_gen = parent_ctx->parent_gen;
  3164. } else {
  3165. child_ctx->parent_ctx = parent_ctx;
  3166. child_ctx->parent_gen = parent_ctx->generation;
  3167. }
  3168. get_ctx(child_ctx->parent_ctx);
  3169. }
  3170. mutex_unlock(&parent_ctx->mutex);
  3171. perf_unpin_context(parent_ctx);
  3172. return ret;
  3173. }
  3174. static void __cpuinit perf_counter_init_cpu(int cpu)
  3175. {
  3176. struct perf_cpu_context *cpuctx;
  3177. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3178. __perf_counter_init_context(&cpuctx->ctx, NULL);
  3179. spin_lock(&perf_resource_lock);
  3180. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  3181. spin_unlock(&perf_resource_lock);
  3182. hw_perf_counter_setup(cpu);
  3183. }
  3184. #ifdef CONFIG_HOTPLUG_CPU
  3185. static void __perf_counter_exit_cpu(void *info)
  3186. {
  3187. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  3188. struct perf_counter_context *ctx = &cpuctx->ctx;
  3189. struct perf_counter *counter, *tmp;
  3190. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  3191. __perf_counter_remove_from_context(counter);
  3192. }
  3193. static void perf_counter_exit_cpu(int cpu)
  3194. {
  3195. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  3196. struct perf_counter_context *ctx = &cpuctx->ctx;
  3197. mutex_lock(&ctx->mutex);
  3198. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  3199. mutex_unlock(&ctx->mutex);
  3200. }
  3201. #else
  3202. static inline void perf_counter_exit_cpu(int cpu) { }
  3203. #endif
  3204. static int __cpuinit
  3205. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  3206. {
  3207. unsigned int cpu = (long)hcpu;
  3208. switch (action) {
  3209. case CPU_UP_PREPARE:
  3210. case CPU_UP_PREPARE_FROZEN:
  3211. perf_counter_init_cpu(cpu);
  3212. break;
  3213. case CPU_DOWN_PREPARE:
  3214. case CPU_DOWN_PREPARE_FROZEN:
  3215. perf_counter_exit_cpu(cpu);
  3216. break;
  3217. default:
  3218. break;
  3219. }
  3220. return NOTIFY_OK;
  3221. }
  3222. /*
  3223. * This has to have a higher priority than migration_notifier in sched.c.
  3224. */
  3225. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  3226. .notifier_call = perf_cpu_notify,
  3227. .priority = 20,
  3228. };
  3229. void __init perf_counter_init(void)
  3230. {
  3231. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  3232. (void *)(long)smp_processor_id());
  3233. register_cpu_notifier(&perf_cpu_nb);
  3234. }
  3235. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  3236. {
  3237. return sprintf(buf, "%d\n", perf_reserved_percpu);
  3238. }
  3239. static ssize_t
  3240. perf_set_reserve_percpu(struct sysdev_class *class,
  3241. const char *buf,
  3242. size_t count)
  3243. {
  3244. struct perf_cpu_context *cpuctx;
  3245. unsigned long val;
  3246. int err, cpu, mpt;
  3247. err = strict_strtoul(buf, 10, &val);
  3248. if (err)
  3249. return err;
  3250. if (val > perf_max_counters)
  3251. return -EINVAL;
  3252. spin_lock(&perf_resource_lock);
  3253. perf_reserved_percpu = val;
  3254. for_each_online_cpu(cpu) {
  3255. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3256. spin_lock_irq(&cpuctx->ctx.lock);
  3257. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  3258. perf_max_counters - perf_reserved_percpu);
  3259. cpuctx->max_pertask = mpt;
  3260. spin_unlock_irq(&cpuctx->ctx.lock);
  3261. }
  3262. spin_unlock(&perf_resource_lock);
  3263. return count;
  3264. }
  3265. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  3266. {
  3267. return sprintf(buf, "%d\n", perf_overcommit);
  3268. }
  3269. static ssize_t
  3270. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  3271. {
  3272. unsigned long val;
  3273. int err;
  3274. err = strict_strtoul(buf, 10, &val);
  3275. if (err)
  3276. return err;
  3277. if (val > 1)
  3278. return -EINVAL;
  3279. spin_lock(&perf_resource_lock);
  3280. perf_overcommit = val;
  3281. spin_unlock(&perf_resource_lock);
  3282. return count;
  3283. }
  3284. static SYSDEV_CLASS_ATTR(
  3285. reserve_percpu,
  3286. 0644,
  3287. perf_show_reserve_percpu,
  3288. perf_set_reserve_percpu
  3289. );
  3290. static SYSDEV_CLASS_ATTR(
  3291. overcommit,
  3292. 0644,
  3293. perf_show_overcommit,
  3294. perf_set_overcommit
  3295. );
  3296. static struct attribute *perfclass_attrs[] = {
  3297. &attr_reserve_percpu.attr,
  3298. &attr_overcommit.attr,
  3299. NULL
  3300. };
  3301. static struct attribute_group perfclass_attr_group = {
  3302. .attrs = perfclass_attrs,
  3303. .name = "perf_counters",
  3304. };
  3305. static int __init perf_counter_sysfs_init(void)
  3306. {
  3307. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  3308. &perfclass_attr_group);
  3309. }
  3310. device_initcall(perf_counter_sysfs_init);