perf_counter.c 100 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/smp.h>
  15. #include <linux/file.h>
  16. #include <linux/poll.h>
  17. #include <linux/sysfs.h>
  18. #include <linux/dcache.h>
  19. #include <linux/percpu.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/rculist.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/anon_inodes.h>
  27. #include <linux/kernel_stat.h>
  28. #include <linux/perf_counter.h>
  29. #include <asm/irq_regs.h>
  30. /*
  31. * Each CPU has a list of per CPU counters:
  32. */
  33. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  34. int perf_max_counters __read_mostly = 1;
  35. static int perf_reserved_percpu __read_mostly;
  36. static int perf_overcommit __read_mostly = 1;
  37. static atomic_t nr_counters __read_mostly;
  38. static atomic_t nr_mmap_counters __read_mostly;
  39. static atomic_t nr_comm_counters __read_mostly;
  40. /*
  41. * perf counter paranoia level:
  42. * 0 - not paranoid
  43. * 1 - disallow cpu counters to unpriv
  44. * 2 - disallow kernel profiling to unpriv
  45. */
  46. int sysctl_perf_counter_paranoid __read_mostly;
  47. static inline bool perf_paranoid_cpu(void)
  48. {
  49. return sysctl_perf_counter_paranoid > 0;
  50. }
  51. static inline bool perf_paranoid_kernel(void)
  52. {
  53. return sysctl_perf_counter_paranoid > 1;
  54. }
  55. int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
  56. /*
  57. * max perf counter sample rate
  58. */
  59. int sysctl_perf_counter_sample_rate __read_mostly = 100000;
  60. static atomic64_t perf_counter_id;
  61. /*
  62. * Lock for (sysadmin-configurable) counter reservations:
  63. */
  64. static DEFINE_SPINLOCK(perf_resource_lock);
  65. /*
  66. * Architecture provided APIs - weak aliases:
  67. */
  68. extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  69. {
  70. return NULL;
  71. }
  72. void __weak hw_perf_disable(void) { barrier(); }
  73. void __weak hw_perf_enable(void) { barrier(); }
  74. void __weak hw_perf_counter_setup(int cpu) { barrier(); }
  75. int __weak
  76. hw_perf_group_sched_in(struct perf_counter *group_leader,
  77. struct perf_cpu_context *cpuctx,
  78. struct perf_counter_context *ctx, int cpu)
  79. {
  80. return 0;
  81. }
  82. void __weak perf_counter_print_debug(void) { }
  83. static DEFINE_PER_CPU(int, disable_count);
  84. void __perf_disable(void)
  85. {
  86. __get_cpu_var(disable_count)++;
  87. }
  88. bool __perf_enable(void)
  89. {
  90. return !--__get_cpu_var(disable_count);
  91. }
  92. void perf_disable(void)
  93. {
  94. __perf_disable();
  95. hw_perf_disable();
  96. }
  97. void perf_enable(void)
  98. {
  99. if (__perf_enable())
  100. hw_perf_enable();
  101. }
  102. static void get_ctx(struct perf_counter_context *ctx)
  103. {
  104. atomic_inc(&ctx->refcount);
  105. }
  106. static void free_ctx(struct rcu_head *head)
  107. {
  108. struct perf_counter_context *ctx;
  109. ctx = container_of(head, struct perf_counter_context, rcu_head);
  110. kfree(ctx);
  111. }
  112. static void put_ctx(struct perf_counter_context *ctx)
  113. {
  114. if (atomic_dec_and_test(&ctx->refcount)) {
  115. if (ctx->parent_ctx)
  116. put_ctx(ctx->parent_ctx);
  117. if (ctx->task)
  118. put_task_struct(ctx->task);
  119. call_rcu(&ctx->rcu_head, free_ctx);
  120. }
  121. }
  122. /*
  123. * Get the perf_counter_context for a task and lock it.
  124. * This has to cope with with the fact that until it is locked,
  125. * the context could get moved to another task.
  126. */
  127. static struct perf_counter_context *
  128. perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  129. {
  130. struct perf_counter_context *ctx;
  131. rcu_read_lock();
  132. retry:
  133. ctx = rcu_dereference(task->perf_counter_ctxp);
  134. if (ctx) {
  135. /*
  136. * If this context is a clone of another, it might
  137. * get swapped for another underneath us by
  138. * perf_counter_task_sched_out, though the
  139. * rcu_read_lock() protects us from any context
  140. * getting freed. Lock the context and check if it
  141. * got swapped before we could get the lock, and retry
  142. * if so. If we locked the right context, then it
  143. * can't get swapped on us any more.
  144. */
  145. spin_lock_irqsave(&ctx->lock, *flags);
  146. if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
  147. spin_unlock_irqrestore(&ctx->lock, *flags);
  148. goto retry;
  149. }
  150. }
  151. rcu_read_unlock();
  152. return ctx;
  153. }
  154. /*
  155. * Get the context for a task and increment its pin_count so it
  156. * can't get swapped to another task. This also increments its
  157. * reference count so that the context can't get freed.
  158. */
  159. static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
  160. {
  161. struct perf_counter_context *ctx;
  162. unsigned long flags;
  163. ctx = perf_lock_task_context(task, &flags);
  164. if (ctx) {
  165. ++ctx->pin_count;
  166. get_ctx(ctx);
  167. spin_unlock_irqrestore(&ctx->lock, flags);
  168. }
  169. return ctx;
  170. }
  171. static void perf_unpin_context(struct perf_counter_context *ctx)
  172. {
  173. unsigned long flags;
  174. spin_lock_irqsave(&ctx->lock, flags);
  175. --ctx->pin_count;
  176. spin_unlock_irqrestore(&ctx->lock, flags);
  177. put_ctx(ctx);
  178. }
  179. /*
  180. * Add a counter from the lists for its context.
  181. * Must be called with ctx->mutex and ctx->lock held.
  182. */
  183. static void
  184. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  185. {
  186. struct perf_counter *group_leader = counter->group_leader;
  187. /*
  188. * Depending on whether it is a standalone or sibling counter,
  189. * add it straight to the context's counter list, or to the group
  190. * leader's sibling list:
  191. */
  192. if (group_leader == counter)
  193. list_add_tail(&counter->list_entry, &ctx->counter_list);
  194. else {
  195. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  196. group_leader->nr_siblings++;
  197. }
  198. list_add_rcu(&counter->event_entry, &ctx->event_list);
  199. ctx->nr_counters++;
  200. }
  201. /*
  202. * Remove a counter from the lists for its context.
  203. * Must be called with ctx->mutex and ctx->lock held.
  204. */
  205. static void
  206. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  207. {
  208. struct perf_counter *sibling, *tmp;
  209. if (list_empty(&counter->list_entry))
  210. return;
  211. ctx->nr_counters--;
  212. list_del_init(&counter->list_entry);
  213. list_del_rcu(&counter->event_entry);
  214. if (counter->group_leader != counter)
  215. counter->group_leader->nr_siblings--;
  216. /*
  217. * If this was a group counter with sibling counters then
  218. * upgrade the siblings to singleton counters by adding them
  219. * to the context list directly:
  220. */
  221. list_for_each_entry_safe(sibling, tmp,
  222. &counter->sibling_list, list_entry) {
  223. list_move_tail(&sibling->list_entry, &ctx->counter_list);
  224. sibling->group_leader = sibling;
  225. }
  226. }
  227. static void
  228. counter_sched_out(struct perf_counter *counter,
  229. struct perf_cpu_context *cpuctx,
  230. struct perf_counter_context *ctx)
  231. {
  232. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  233. return;
  234. counter->state = PERF_COUNTER_STATE_INACTIVE;
  235. counter->tstamp_stopped = ctx->time;
  236. counter->pmu->disable(counter);
  237. counter->oncpu = -1;
  238. if (!is_software_counter(counter))
  239. cpuctx->active_oncpu--;
  240. ctx->nr_active--;
  241. if (counter->attr.exclusive || !cpuctx->active_oncpu)
  242. cpuctx->exclusive = 0;
  243. }
  244. static void
  245. group_sched_out(struct perf_counter *group_counter,
  246. struct perf_cpu_context *cpuctx,
  247. struct perf_counter_context *ctx)
  248. {
  249. struct perf_counter *counter;
  250. if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
  251. return;
  252. counter_sched_out(group_counter, cpuctx, ctx);
  253. /*
  254. * Schedule out siblings (if any):
  255. */
  256. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  257. counter_sched_out(counter, cpuctx, ctx);
  258. if (group_counter->attr.exclusive)
  259. cpuctx->exclusive = 0;
  260. }
  261. /*
  262. * Cross CPU call to remove a performance counter
  263. *
  264. * We disable the counter on the hardware level first. After that we
  265. * remove it from the context list.
  266. */
  267. static void __perf_counter_remove_from_context(void *info)
  268. {
  269. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  270. struct perf_counter *counter = info;
  271. struct perf_counter_context *ctx = counter->ctx;
  272. /*
  273. * If this is a task context, we need to check whether it is
  274. * the current task context of this cpu. If not it has been
  275. * scheduled out before the smp call arrived.
  276. */
  277. if (ctx->task && cpuctx->task_ctx != ctx)
  278. return;
  279. spin_lock(&ctx->lock);
  280. /*
  281. * Protect the list operation against NMI by disabling the
  282. * counters on a global level.
  283. */
  284. perf_disable();
  285. counter_sched_out(counter, cpuctx, ctx);
  286. list_del_counter(counter, ctx);
  287. if (!ctx->task) {
  288. /*
  289. * Allow more per task counters with respect to the
  290. * reservation:
  291. */
  292. cpuctx->max_pertask =
  293. min(perf_max_counters - ctx->nr_counters,
  294. perf_max_counters - perf_reserved_percpu);
  295. }
  296. perf_enable();
  297. spin_unlock(&ctx->lock);
  298. }
  299. /*
  300. * Remove the counter from a task's (or a CPU's) list of counters.
  301. *
  302. * Must be called with ctx->mutex held.
  303. *
  304. * CPU counters are removed with a smp call. For task counters we only
  305. * call when the task is on a CPU.
  306. *
  307. * If counter->ctx is a cloned context, callers must make sure that
  308. * every task struct that counter->ctx->task could possibly point to
  309. * remains valid. This is OK when called from perf_release since
  310. * that only calls us on the top-level context, which can't be a clone.
  311. * When called from perf_counter_exit_task, it's OK because the
  312. * context has been detached from its task.
  313. */
  314. static void perf_counter_remove_from_context(struct perf_counter *counter)
  315. {
  316. struct perf_counter_context *ctx = counter->ctx;
  317. struct task_struct *task = ctx->task;
  318. if (!task) {
  319. /*
  320. * Per cpu counters are removed via an smp call and
  321. * the removal is always sucessful.
  322. */
  323. smp_call_function_single(counter->cpu,
  324. __perf_counter_remove_from_context,
  325. counter, 1);
  326. return;
  327. }
  328. retry:
  329. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  330. counter);
  331. spin_lock_irq(&ctx->lock);
  332. /*
  333. * If the context is active we need to retry the smp call.
  334. */
  335. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  336. spin_unlock_irq(&ctx->lock);
  337. goto retry;
  338. }
  339. /*
  340. * The lock prevents that this context is scheduled in so we
  341. * can remove the counter safely, if the call above did not
  342. * succeed.
  343. */
  344. if (!list_empty(&counter->list_entry)) {
  345. list_del_counter(counter, ctx);
  346. }
  347. spin_unlock_irq(&ctx->lock);
  348. }
  349. static inline u64 perf_clock(void)
  350. {
  351. return cpu_clock(smp_processor_id());
  352. }
  353. /*
  354. * Update the record of the current time in a context.
  355. */
  356. static void update_context_time(struct perf_counter_context *ctx)
  357. {
  358. u64 now = perf_clock();
  359. ctx->time += now - ctx->timestamp;
  360. ctx->timestamp = now;
  361. }
  362. /*
  363. * Update the total_time_enabled and total_time_running fields for a counter.
  364. */
  365. static void update_counter_times(struct perf_counter *counter)
  366. {
  367. struct perf_counter_context *ctx = counter->ctx;
  368. u64 run_end;
  369. if (counter->state < PERF_COUNTER_STATE_INACTIVE)
  370. return;
  371. counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
  372. if (counter->state == PERF_COUNTER_STATE_INACTIVE)
  373. run_end = counter->tstamp_stopped;
  374. else
  375. run_end = ctx->time;
  376. counter->total_time_running = run_end - counter->tstamp_running;
  377. }
  378. /*
  379. * Update total_time_enabled and total_time_running for all counters in a group.
  380. */
  381. static void update_group_times(struct perf_counter *leader)
  382. {
  383. struct perf_counter *counter;
  384. update_counter_times(leader);
  385. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  386. update_counter_times(counter);
  387. }
  388. /*
  389. * Cross CPU call to disable a performance counter
  390. */
  391. static void __perf_counter_disable(void *info)
  392. {
  393. struct perf_counter *counter = info;
  394. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  395. struct perf_counter_context *ctx = counter->ctx;
  396. /*
  397. * If this is a per-task counter, need to check whether this
  398. * counter's task is the current task on this cpu.
  399. */
  400. if (ctx->task && cpuctx->task_ctx != ctx)
  401. return;
  402. spin_lock(&ctx->lock);
  403. /*
  404. * If the counter is on, turn it off.
  405. * If it is in error state, leave it in error state.
  406. */
  407. if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
  408. update_context_time(ctx);
  409. update_counter_times(counter);
  410. if (counter == counter->group_leader)
  411. group_sched_out(counter, cpuctx, ctx);
  412. else
  413. counter_sched_out(counter, cpuctx, ctx);
  414. counter->state = PERF_COUNTER_STATE_OFF;
  415. }
  416. spin_unlock(&ctx->lock);
  417. }
  418. /*
  419. * Disable a counter.
  420. *
  421. * If counter->ctx is a cloned context, callers must make sure that
  422. * every task struct that counter->ctx->task could possibly point to
  423. * remains valid. This condition is satisifed when called through
  424. * perf_counter_for_each_child or perf_counter_for_each because they
  425. * hold the top-level counter's child_mutex, so any descendant that
  426. * goes to exit will block in sync_child_counter.
  427. * When called from perf_pending_counter it's OK because counter->ctx
  428. * is the current context on this CPU and preemption is disabled,
  429. * hence we can't get into perf_counter_task_sched_out for this context.
  430. */
  431. static void perf_counter_disable(struct perf_counter *counter)
  432. {
  433. struct perf_counter_context *ctx = counter->ctx;
  434. struct task_struct *task = ctx->task;
  435. if (!task) {
  436. /*
  437. * Disable the counter on the cpu that it's on
  438. */
  439. smp_call_function_single(counter->cpu, __perf_counter_disable,
  440. counter, 1);
  441. return;
  442. }
  443. retry:
  444. task_oncpu_function_call(task, __perf_counter_disable, counter);
  445. spin_lock_irq(&ctx->lock);
  446. /*
  447. * If the counter is still active, we need to retry the cross-call.
  448. */
  449. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  450. spin_unlock_irq(&ctx->lock);
  451. goto retry;
  452. }
  453. /*
  454. * Since we have the lock this context can't be scheduled
  455. * in, so we can change the state safely.
  456. */
  457. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  458. update_counter_times(counter);
  459. counter->state = PERF_COUNTER_STATE_OFF;
  460. }
  461. spin_unlock_irq(&ctx->lock);
  462. }
  463. static int
  464. counter_sched_in(struct perf_counter *counter,
  465. struct perf_cpu_context *cpuctx,
  466. struct perf_counter_context *ctx,
  467. int cpu)
  468. {
  469. if (counter->state <= PERF_COUNTER_STATE_OFF)
  470. return 0;
  471. counter->state = PERF_COUNTER_STATE_ACTIVE;
  472. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  473. /*
  474. * The new state must be visible before we turn it on in the hardware:
  475. */
  476. smp_wmb();
  477. if (counter->pmu->enable(counter)) {
  478. counter->state = PERF_COUNTER_STATE_INACTIVE;
  479. counter->oncpu = -1;
  480. return -EAGAIN;
  481. }
  482. counter->tstamp_running += ctx->time - counter->tstamp_stopped;
  483. if (!is_software_counter(counter))
  484. cpuctx->active_oncpu++;
  485. ctx->nr_active++;
  486. if (counter->attr.exclusive)
  487. cpuctx->exclusive = 1;
  488. return 0;
  489. }
  490. static int
  491. group_sched_in(struct perf_counter *group_counter,
  492. struct perf_cpu_context *cpuctx,
  493. struct perf_counter_context *ctx,
  494. int cpu)
  495. {
  496. struct perf_counter *counter, *partial_group;
  497. int ret;
  498. if (group_counter->state == PERF_COUNTER_STATE_OFF)
  499. return 0;
  500. ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
  501. if (ret)
  502. return ret < 0 ? ret : 0;
  503. if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
  504. return -EAGAIN;
  505. /*
  506. * Schedule in siblings as one group (if any):
  507. */
  508. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  509. if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
  510. partial_group = counter;
  511. goto group_error;
  512. }
  513. }
  514. return 0;
  515. group_error:
  516. /*
  517. * Groups can be scheduled in as one unit only, so undo any
  518. * partial group before returning:
  519. */
  520. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  521. if (counter == partial_group)
  522. break;
  523. counter_sched_out(counter, cpuctx, ctx);
  524. }
  525. counter_sched_out(group_counter, cpuctx, ctx);
  526. return -EAGAIN;
  527. }
  528. /*
  529. * Return 1 for a group consisting entirely of software counters,
  530. * 0 if the group contains any hardware counters.
  531. */
  532. static int is_software_only_group(struct perf_counter *leader)
  533. {
  534. struct perf_counter *counter;
  535. if (!is_software_counter(leader))
  536. return 0;
  537. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  538. if (!is_software_counter(counter))
  539. return 0;
  540. return 1;
  541. }
  542. /*
  543. * Work out whether we can put this counter group on the CPU now.
  544. */
  545. static int group_can_go_on(struct perf_counter *counter,
  546. struct perf_cpu_context *cpuctx,
  547. int can_add_hw)
  548. {
  549. /*
  550. * Groups consisting entirely of software counters can always go on.
  551. */
  552. if (is_software_only_group(counter))
  553. return 1;
  554. /*
  555. * If an exclusive group is already on, no other hardware
  556. * counters can go on.
  557. */
  558. if (cpuctx->exclusive)
  559. return 0;
  560. /*
  561. * If this group is exclusive and there are already
  562. * counters on the CPU, it can't go on.
  563. */
  564. if (counter->attr.exclusive && cpuctx->active_oncpu)
  565. return 0;
  566. /*
  567. * Otherwise, try to add it if all previous groups were able
  568. * to go on.
  569. */
  570. return can_add_hw;
  571. }
  572. static void add_counter_to_ctx(struct perf_counter *counter,
  573. struct perf_counter_context *ctx)
  574. {
  575. list_add_counter(counter, ctx);
  576. counter->tstamp_enabled = ctx->time;
  577. counter->tstamp_running = ctx->time;
  578. counter->tstamp_stopped = ctx->time;
  579. }
  580. /*
  581. * Cross CPU call to install and enable a performance counter
  582. *
  583. * Must be called with ctx->mutex held
  584. */
  585. static void __perf_install_in_context(void *info)
  586. {
  587. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  588. struct perf_counter *counter = info;
  589. struct perf_counter_context *ctx = counter->ctx;
  590. struct perf_counter *leader = counter->group_leader;
  591. int cpu = smp_processor_id();
  592. int err;
  593. /*
  594. * If this is a task context, we need to check whether it is
  595. * the current task context of this cpu. If not it has been
  596. * scheduled out before the smp call arrived.
  597. * Or possibly this is the right context but it isn't
  598. * on this cpu because it had no counters.
  599. */
  600. if (ctx->task && cpuctx->task_ctx != ctx) {
  601. if (cpuctx->task_ctx || ctx->task != current)
  602. return;
  603. cpuctx->task_ctx = ctx;
  604. }
  605. spin_lock(&ctx->lock);
  606. ctx->is_active = 1;
  607. update_context_time(ctx);
  608. /*
  609. * Protect the list operation against NMI by disabling the
  610. * counters on a global level. NOP for non NMI based counters.
  611. */
  612. perf_disable();
  613. add_counter_to_ctx(counter, ctx);
  614. /*
  615. * Don't put the counter on if it is disabled or if
  616. * it is in a group and the group isn't on.
  617. */
  618. if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
  619. (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
  620. goto unlock;
  621. /*
  622. * An exclusive counter can't go on if there are already active
  623. * hardware counters, and no hardware counter can go on if there
  624. * is already an exclusive counter on.
  625. */
  626. if (!group_can_go_on(counter, cpuctx, 1))
  627. err = -EEXIST;
  628. else
  629. err = counter_sched_in(counter, cpuctx, ctx, cpu);
  630. if (err) {
  631. /*
  632. * This counter couldn't go on. If it is in a group
  633. * then we have to pull the whole group off.
  634. * If the counter group is pinned then put it in error state.
  635. */
  636. if (leader != counter)
  637. group_sched_out(leader, cpuctx, ctx);
  638. if (leader->attr.pinned) {
  639. update_group_times(leader);
  640. leader->state = PERF_COUNTER_STATE_ERROR;
  641. }
  642. }
  643. if (!err && !ctx->task && cpuctx->max_pertask)
  644. cpuctx->max_pertask--;
  645. unlock:
  646. perf_enable();
  647. spin_unlock(&ctx->lock);
  648. }
  649. /*
  650. * Attach a performance counter to a context
  651. *
  652. * First we add the counter to the list with the hardware enable bit
  653. * in counter->hw_config cleared.
  654. *
  655. * If the counter is attached to a task which is on a CPU we use a smp
  656. * call to enable it in the task context. The task might have been
  657. * scheduled away, but we check this in the smp call again.
  658. *
  659. * Must be called with ctx->mutex held.
  660. */
  661. static void
  662. perf_install_in_context(struct perf_counter_context *ctx,
  663. struct perf_counter *counter,
  664. int cpu)
  665. {
  666. struct task_struct *task = ctx->task;
  667. if (!task) {
  668. /*
  669. * Per cpu counters are installed via an smp call and
  670. * the install is always sucessful.
  671. */
  672. smp_call_function_single(cpu, __perf_install_in_context,
  673. counter, 1);
  674. return;
  675. }
  676. retry:
  677. task_oncpu_function_call(task, __perf_install_in_context,
  678. counter);
  679. spin_lock_irq(&ctx->lock);
  680. /*
  681. * we need to retry the smp call.
  682. */
  683. if (ctx->is_active && list_empty(&counter->list_entry)) {
  684. spin_unlock_irq(&ctx->lock);
  685. goto retry;
  686. }
  687. /*
  688. * The lock prevents that this context is scheduled in so we
  689. * can add the counter safely, if it the call above did not
  690. * succeed.
  691. */
  692. if (list_empty(&counter->list_entry))
  693. add_counter_to_ctx(counter, ctx);
  694. spin_unlock_irq(&ctx->lock);
  695. }
  696. /*
  697. * Cross CPU call to enable a performance counter
  698. */
  699. static void __perf_counter_enable(void *info)
  700. {
  701. struct perf_counter *counter = info;
  702. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  703. struct perf_counter_context *ctx = counter->ctx;
  704. struct perf_counter *leader = counter->group_leader;
  705. int err;
  706. /*
  707. * If this is a per-task counter, need to check whether this
  708. * counter's task is the current task on this cpu.
  709. */
  710. if (ctx->task && cpuctx->task_ctx != ctx) {
  711. if (cpuctx->task_ctx || ctx->task != current)
  712. return;
  713. cpuctx->task_ctx = ctx;
  714. }
  715. spin_lock(&ctx->lock);
  716. ctx->is_active = 1;
  717. update_context_time(ctx);
  718. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  719. goto unlock;
  720. counter->state = PERF_COUNTER_STATE_INACTIVE;
  721. counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
  722. /*
  723. * If the counter is in a group and isn't the group leader,
  724. * then don't put it on unless the group is on.
  725. */
  726. if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
  727. goto unlock;
  728. if (!group_can_go_on(counter, cpuctx, 1)) {
  729. err = -EEXIST;
  730. } else {
  731. perf_disable();
  732. if (counter == leader)
  733. err = group_sched_in(counter, cpuctx, ctx,
  734. smp_processor_id());
  735. else
  736. err = counter_sched_in(counter, cpuctx, ctx,
  737. smp_processor_id());
  738. perf_enable();
  739. }
  740. if (err) {
  741. /*
  742. * If this counter can't go on and it's part of a
  743. * group, then the whole group has to come off.
  744. */
  745. if (leader != counter)
  746. group_sched_out(leader, cpuctx, ctx);
  747. if (leader->attr.pinned) {
  748. update_group_times(leader);
  749. leader->state = PERF_COUNTER_STATE_ERROR;
  750. }
  751. }
  752. unlock:
  753. spin_unlock(&ctx->lock);
  754. }
  755. /*
  756. * Enable a counter.
  757. *
  758. * If counter->ctx is a cloned context, callers must make sure that
  759. * every task struct that counter->ctx->task could possibly point to
  760. * remains valid. This condition is satisfied when called through
  761. * perf_counter_for_each_child or perf_counter_for_each as described
  762. * for perf_counter_disable.
  763. */
  764. static void perf_counter_enable(struct perf_counter *counter)
  765. {
  766. struct perf_counter_context *ctx = counter->ctx;
  767. struct task_struct *task = ctx->task;
  768. if (!task) {
  769. /*
  770. * Enable the counter on the cpu that it's on
  771. */
  772. smp_call_function_single(counter->cpu, __perf_counter_enable,
  773. counter, 1);
  774. return;
  775. }
  776. spin_lock_irq(&ctx->lock);
  777. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  778. goto out;
  779. /*
  780. * If the counter is in error state, clear that first.
  781. * That way, if we see the counter in error state below, we
  782. * know that it has gone back into error state, as distinct
  783. * from the task having been scheduled away before the
  784. * cross-call arrived.
  785. */
  786. if (counter->state == PERF_COUNTER_STATE_ERROR)
  787. counter->state = PERF_COUNTER_STATE_OFF;
  788. retry:
  789. spin_unlock_irq(&ctx->lock);
  790. task_oncpu_function_call(task, __perf_counter_enable, counter);
  791. spin_lock_irq(&ctx->lock);
  792. /*
  793. * If the context is active and the counter is still off,
  794. * we need to retry the cross-call.
  795. */
  796. if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
  797. goto retry;
  798. /*
  799. * Since we have the lock this context can't be scheduled
  800. * in, so we can change the state safely.
  801. */
  802. if (counter->state == PERF_COUNTER_STATE_OFF) {
  803. counter->state = PERF_COUNTER_STATE_INACTIVE;
  804. counter->tstamp_enabled =
  805. ctx->time - counter->total_time_enabled;
  806. }
  807. out:
  808. spin_unlock_irq(&ctx->lock);
  809. }
  810. static int perf_counter_refresh(struct perf_counter *counter, int refresh)
  811. {
  812. /*
  813. * not supported on inherited counters
  814. */
  815. if (counter->attr.inherit)
  816. return -EINVAL;
  817. atomic_add(refresh, &counter->event_limit);
  818. perf_counter_enable(counter);
  819. return 0;
  820. }
  821. void __perf_counter_sched_out(struct perf_counter_context *ctx,
  822. struct perf_cpu_context *cpuctx)
  823. {
  824. struct perf_counter *counter;
  825. spin_lock(&ctx->lock);
  826. ctx->is_active = 0;
  827. if (likely(!ctx->nr_counters))
  828. goto out;
  829. update_context_time(ctx);
  830. perf_disable();
  831. if (ctx->nr_active) {
  832. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  833. if (counter != counter->group_leader)
  834. counter_sched_out(counter, cpuctx, ctx);
  835. else
  836. group_sched_out(counter, cpuctx, ctx);
  837. }
  838. }
  839. perf_enable();
  840. out:
  841. spin_unlock(&ctx->lock);
  842. }
  843. /*
  844. * Test whether two contexts are equivalent, i.e. whether they
  845. * have both been cloned from the same version of the same context
  846. * and they both have the same number of enabled counters.
  847. * If the number of enabled counters is the same, then the set
  848. * of enabled counters should be the same, because these are both
  849. * inherited contexts, therefore we can't access individual counters
  850. * in them directly with an fd; we can only enable/disable all
  851. * counters via prctl, or enable/disable all counters in a family
  852. * via ioctl, which will have the same effect on both contexts.
  853. */
  854. static int context_equiv(struct perf_counter_context *ctx1,
  855. struct perf_counter_context *ctx2)
  856. {
  857. return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
  858. && ctx1->parent_gen == ctx2->parent_gen
  859. && !ctx1->pin_count && !ctx2->pin_count;
  860. }
  861. /*
  862. * Called from scheduler to remove the counters of the current task,
  863. * with interrupts disabled.
  864. *
  865. * We stop each counter and update the counter value in counter->count.
  866. *
  867. * This does not protect us against NMI, but disable()
  868. * sets the disabled bit in the control field of counter _before_
  869. * accessing the counter control register. If a NMI hits, then it will
  870. * not restart the counter.
  871. */
  872. void perf_counter_task_sched_out(struct task_struct *task,
  873. struct task_struct *next, int cpu)
  874. {
  875. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  876. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  877. struct perf_counter_context *next_ctx;
  878. struct perf_counter_context *parent;
  879. struct pt_regs *regs;
  880. int do_switch = 1;
  881. regs = task_pt_regs(task);
  882. perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
  883. if (likely(!ctx || !cpuctx->task_ctx))
  884. return;
  885. update_context_time(ctx);
  886. rcu_read_lock();
  887. parent = rcu_dereference(ctx->parent_ctx);
  888. next_ctx = next->perf_counter_ctxp;
  889. if (parent && next_ctx &&
  890. rcu_dereference(next_ctx->parent_ctx) == parent) {
  891. /*
  892. * Looks like the two contexts are clones, so we might be
  893. * able to optimize the context switch. We lock both
  894. * contexts and check that they are clones under the
  895. * lock (including re-checking that neither has been
  896. * uncloned in the meantime). It doesn't matter which
  897. * order we take the locks because no other cpu could
  898. * be trying to lock both of these tasks.
  899. */
  900. spin_lock(&ctx->lock);
  901. spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
  902. if (context_equiv(ctx, next_ctx)) {
  903. /*
  904. * XXX do we need a memory barrier of sorts
  905. * wrt to rcu_dereference() of perf_counter_ctxp
  906. */
  907. task->perf_counter_ctxp = next_ctx;
  908. next->perf_counter_ctxp = ctx;
  909. ctx->task = next;
  910. next_ctx->task = task;
  911. do_switch = 0;
  912. }
  913. spin_unlock(&next_ctx->lock);
  914. spin_unlock(&ctx->lock);
  915. }
  916. rcu_read_unlock();
  917. if (do_switch) {
  918. __perf_counter_sched_out(ctx, cpuctx);
  919. cpuctx->task_ctx = NULL;
  920. }
  921. }
  922. /*
  923. * Called with IRQs disabled
  924. */
  925. static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
  926. {
  927. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  928. if (!cpuctx->task_ctx)
  929. return;
  930. if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
  931. return;
  932. __perf_counter_sched_out(ctx, cpuctx);
  933. cpuctx->task_ctx = NULL;
  934. }
  935. /*
  936. * Called with IRQs disabled
  937. */
  938. static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
  939. {
  940. __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
  941. }
  942. static void
  943. __perf_counter_sched_in(struct perf_counter_context *ctx,
  944. struct perf_cpu_context *cpuctx, int cpu)
  945. {
  946. struct perf_counter *counter;
  947. int can_add_hw = 1;
  948. spin_lock(&ctx->lock);
  949. ctx->is_active = 1;
  950. if (likely(!ctx->nr_counters))
  951. goto out;
  952. ctx->timestamp = perf_clock();
  953. perf_disable();
  954. /*
  955. * First go through the list and put on any pinned groups
  956. * in order to give them the best chance of going on.
  957. */
  958. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  959. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  960. !counter->attr.pinned)
  961. continue;
  962. if (counter->cpu != -1 && counter->cpu != cpu)
  963. continue;
  964. if (counter != counter->group_leader)
  965. counter_sched_in(counter, cpuctx, ctx, cpu);
  966. else {
  967. if (group_can_go_on(counter, cpuctx, 1))
  968. group_sched_in(counter, cpuctx, ctx, cpu);
  969. }
  970. /*
  971. * If this pinned group hasn't been scheduled,
  972. * put it in error state.
  973. */
  974. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  975. update_group_times(counter);
  976. counter->state = PERF_COUNTER_STATE_ERROR;
  977. }
  978. }
  979. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  980. /*
  981. * Ignore counters in OFF or ERROR state, and
  982. * ignore pinned counters since we did them already.
  983. */
  984. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  985. counter->attr.pinned)
  986. continue;
  987. /*
  988. * Listen to the 'cpu' scheduling filter constraint
  989. * of counters:
  990. */
  991. if (counter->cpu != -1 && counter->cpu != cpu)
  992. continue;
  993. if (counter != counter->group_leader) {
  994. if (counter_sched_in(counter, cpuctx, ctx, cpu))
  995. can_add_hw = 0;
  996. } else {
  997. if (group_can_go_on(counter, cpuctx, can_add_hw)) {
  998. if (group_sched_in(counter, cpuctx, ctx, cpu))
  999. can_add_hw = 0;
  1000. }
  1001. }
  1002. }
  1003. perf_enable();
  1004. out:
  1005. spin_unlock(&ctx->lock);
  1006. }
  1007. /*
  1008. * Called from scheduler to add the counters of the current task
  1009. * with interrupts disabled.
  1010. *
  1011. * We restore the counter value and then enable it.
  1012. *
  1013. * This does not protect us against NMI, but enable()
  1014. * sets the enabled bit in the control field of counter _before_
  1015. * accessing the counter control register. If a NMI hits, then it will
  1016. * keep the counter running.
  1017. */
  1018. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  1019. {
  1020. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  1021. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  1022. if (likely(!ctx))
  1023. return;
  1024. if (cpuctx->task_ctx == ctx)
  1025. return;
  1026. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1027. cpuctx->task_ctx = ctx;
  1028. }
  1029. static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
  1030. {
  1031. struct perf_counter_context *ctx = &cpuctx->ctx;
  1032. __perf_counter_sched_in(ctx, cpuctx, cpu);
  1033. }
  1034. #define MAX_INTERRUPTS (~0ULL)
  1035. static void perf_log_throttle(struct perf_counter *counter, int enable);
  1036. static void perf_log_period(struct perf_counter *counter, u64 period);
  1037. static void perf_adjust_period(struct perf_counter *counter, u64 events)
  1038. {
  1039. struct hw_perf_counter *hwc = &counter->hw;
  1040. u64 period, sample_period;
  1041. s64 delta;
  1042. events *= hwc->sample_period;
  1043. period = div64_u64(events, counter->attr.sample_freq);
  1044. delta = (s64)(period - hwc->sample_period);
  1045. delta = (delta + 7) / 8; /* low pass filter */
  1046. sample_period = hwc->sample_period + delta;
  1047. if (!sample_period)
  1048. sample_period = 1;
  1049. perf_log_period(counter, sample_period);
  1050. hwc->sample_period = sample_period;
  1051. }
  1052. static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
  1053. {
  1054. struct perf_counter *counter;
  1055. struct hw_perf_counter *hwc;
  1056. u64 interrupts, freq;
  1057. spin_lock(&ctx->lock);
  1058. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1059. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1060. continue;
  1061. hwc = &counter->hw;
  1062. interrupts = hwc->interrupts;
  1063. hwc->interrupts = 0;
  1064. /*
  1065. * unthrottle counters on the tick
  1066. */
  1067. if (interrupts == MAX_INTERRUPTS) {
  1068. perf_log_throttle(counter, 1);
  1069. counter->pmu->unthrottle(counter);
  1070. interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
  1071. }
  1072. if (!counter->attr.freq || !counter->attr.sample_freq)
  1073. continue;
  1074. /*
  1075. * if the specified freq < HZ then we need to skip ticks
  1076. */
  1077. if (counter->attr.sample_freq < HZ) {
  1078. freq = counter->attr.sample_freq;
  1079. hwc->freq_count += freq;
  1080. hwc->freq_interrupts += interrupts;
  1081. if (hwc->freq_count < HZ)
  1082. continue;
  1083. interrupts = hwc->freq_interrupts;
  1084. hwc->freq_interrupts = 0;
  1085. hwc->freq_count -= HZ;
  1086. } else
  1087. freq = HZ;
  1088. perf_adjust_period(counter, freq * interrupts);
  1089. /*
  1090. * In order to avoid being stalled by an (accidental) huge
  1091. * sample period, force reset the sample period if we didn't
  1092. * get any events in this freq period.
  1093. */
  1094. if (!interrupts) {
  1095. perf_disable();
  1096. counter->pmu->disable(counter);
  1097. atomic_set(&hwc->period_left, 0);
  1098. counter->pmu->enable(counter);
  1099. perf_enable();
  1100. }
  1101. }
  1102. spin_unlock(&ctx->lock);
  1103. }
  1104. /*
  1105. * Round-robin a context's counters:
  1106. */
  1107. static void rotate_ctx(struct perf_counter_context *ctx)
  1108. {
  1109. struct perf_counter *counter;
  1110. if (!ctx->nr_counters)
  1111. return;
  1112. spin_lock(&ctx->lock);
  1113. /*
  1114. * Rotate the first entry last (works just fine for group counters too):
  1115. */
  1116. perf_disable();
  1117. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  1118. list_move_tail(&counter->list_entry, &ctx->counter_list);
  1119. break;
  1120. }
  1121. perf_enable();
  1122. spin_unlock(&ctx->lock);
  1123. }
  1124. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  1125. {
  1126. struct perf_cpu_context *cpuctx;
  1127. struct perf_counter_context *ctx;
  1128. if (!atomic_read(&nr_counters))
  1129. return;
  1130. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1131. ctx = curr->perf_counter_ctxp;
  1132. perf_ctx_adjust_freq(&cpuctx->ctx);
  1133. if (ctx)
  1134. perf_ctx_adjust_freq(ctx);
  1135. perf_counter_cpu_sched_out(cpuctx);
  1136. if (ctx)
  1137. __perf_counter_task_sched_out(ctx);
  1138. rotate_ctx(&cpuctx->ctx);
  1139. if (ctx)
  1140. rotate_ctx(ctx);
  1141. perf_counter_cpu_sched_in(cpuctx, cpu);
  1142. if (ctx)
  1143. perf_counter_task_sched_in(curr, cpu);
  1144. }
  1145. /*
  1146. * Cross CPU call to read the hardware counter
  1147. */
  1148. static void __read(void *info)
  1149. {
  1150. struct perf_counter *counter = info;
  1151. struct perf_counter_context *ctx = counter->ctx;
  1152. unsigned long flags;
  1153. local_irq_save(flags);
  1154. if (ctx->is_active)
  1155. update_context_time(ctx);
  1156. counter->pmu->read(counter);
  1157. update_counter_times(counter);
  1158. local_irq_restore(flags);
  1159. }
  1160. static u64 perf_counter_read(struct perf_counter *counter)
  1161. {
  1162. /*
  1163. * If counter is enabled and currently active on a CPU, update the
  1164. * value in the counter structure:
  1165. */
  1166. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  1167. smp_call_function_single(counter->oncpu,
  1168. __read, counter, 1);
  1169. } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  1170. update_counter_times(counter);
  1171. }
  1172. return atomic64_read(&counter->count);
  1173. }
  1174. /*
  1175. * Initialize the perf_counter context in a task_struct:
  1176. */
  1177. static void
  1178. __perf_counter_init_context(struct perf_counter_context *ctx,
  1179. struct task_struct *task)
  1180. {
  1181. memset(ctx, 0, sizeof(*ctx));
  1182. spin_lock_init(&ctx->lock);
  1183. mutex_init(&ctx->mutex);
  1184. INIT_LIST_HEAD(&ctx->counter_list);
  1185. INIT_LIST_HEAD(&ctx->event_list);
  1186. atomic_set(&ctx->refcount, 1);
  1187. ctx->task = task;
  1188. }
  1189. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  1190. {
  1191. struct perf_counter_context *parent_ctx;
  1192. struct perf_counter_context *ctx;
  1193. struct perf_cpu_context *cpuctx;
  1194. struct task_struct *task;
  1195. unsigned long flags;
  1196. int err;
  1197. /*
  1198. * If cpu is not a wildcard then this is a percpu counter:
  1199. */
  1200. if (cpu != -1) {
  1201. /* Must be root to operate on a CPU counter: */
  1202. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  1203. return ERR_PTR(-EACCES);
  1204. if (cpu < 0 || cpu > num_possible_cpus())
  1205. return ERR_PTR(-EINVAL);
  1206. /*
  1207. * We could be clever and allow to attach a counter to an
  1208. * offline CPU and activate it when the CPU comes up, but
  1209. * that's for later.
  1210. */
  1211. if (!cpu_isset(cpu, cpu_online_map))
  1212. return ERR_PTR(-ENODEV);
  1213. cpuctx = &per_cpu(perf_cpu_context, cpu);
  1214. ctx = &cpuctx->ctx;
  1215. get_ctx(ctx);
  1216. return ctx;
  1217. }
  1218. rcu_read_lock();
  1219. if (!pid)
  1220. task = current;
  1221. else
  1222. task = find_task_by_vpid(pid);
  1223. if (task)
  1224. get_task_struct(task);
  1225. rcu_read_unlock();
  1226. if (!task)
  1227. return ERR_PTR(-ESRCH);
  1228. /*
  1229. * Can't attach counters to a dying task.
  1230. */
  1231. err = -ESRCH;
  1232. if (task->flags & PF_EXITING)
  1233. goto errout;
  1234. /* Reuse ptrace permission checks for now. */
  1235. err = -EACCES;
  1236. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  1237. goto errout;
  1238. retry:
  1239. ctx = perf_lock_task_context(task, &flags);
  1240. if (ctx) {
  1241. parent_ctx = ctx->parent_ctx;
  1242. if (parent_ctx) {
  1243. put_ctx(parent_ctx);
  1244. ctx->parent_ctx = NULL; /* no longer a clone */
  1245. }
  1246. /*
  1247. * Get an extra reference before dropping the lock so that
  1248. * this context won't get freed if the task exits.
  1249. */
  1250. get_ctx(ctx);
  1251. spin_unlock_irqrestore(&ctx->lock, flags);
  1252. }
  1253. if (!ctx) {
  1254. ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  1255. err = -ENOMEM;
  1256. if (!ctx)
  1257. goto errout;
  1258. __perf_counter_init_context(ctx, task);
  1259. get_ctx(ctx);
  1260. if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
  1261. /*
  1262. * We raced with some other task; use
  1263. * the context they set.
  1264. */
  1265. kfree(ctx);
  1266. goto retry;
  1267. }
  1268. get_task_struct(task);
  1269. }
  1270. put_task_struct(task);
  1271. return ctx;
  1272. errout:
  1273. put_task_struct(task);
  1274. return ERR_PTR(err);
  1275. }
  1276. static void free_counter_rcu(struct rcu_head *head)
  1277. {
  1278. struct perf_counter *counter;
  1279. counter = container_of(head, struct perf_counter, rcu_head);
  1280. if (counter->ns)
  1281. put_pid_ns(counter->ns);
  1282. kfree(counter);
  1283. }
  1284. static void perf_pending_sync(struct perf_counter *counter);
  1285. static void free_counter(struct perf_counter *counter)
  1286. {
  1287. perf_pending_sync(counter);
  1288. atomic_dec(&nr_counters);
  1289. if (counter->attr.mmap)
  1290. atomic_dec(&nr_mmap_counters);
  1291. if (counter->attr.comm)
  1292. atomic_dec(&nr_comm_counters);
  1293. if (counter->destroy)
  1294. counter->destroy(counter);
  1295. put_ctx(counter->ctx);
  1296. call_rcu(&counter->rcu_head, free_counter_rcu);
  1297. }
  1298. /*
  1299. * Called when the last reference to the file is gone.
  1300. */
  1301. static int perf_release(struct inode *inode, struct file *file)
  1302. {
  1303. struct perf_counter *counter = file->private_data;
  1304. struct perf_counter_context *ctx = counter->ctx;
  1305. file->private_data = NULL;
  1306. WARN_ON_ONCE(ctx->parent_ctx);
  1307. mutex_lock(&ctx->mutex);
  1308. perf_counter_remove_from_context(counter);
  1309. mutex_unlock(&ctx->mutex);
  1310. mutex_lock(&counter->owner->perf_counter_mutex);
  1311. list_del_init(&counter->owner_entry);
  1312. mutex_unlock(&counter->owner->perf_counter_mutex);
  1313. put_task_struct(counter->owner);
  1314. free_counter(counter);
  1315. return 0;
  1316. }
  1317. /*
  1318. * Read the performance counter - simple non blocking version for now
  1319. */
  1320. static ssize_t
  1321. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  1322. {
  1323. u64 values[3];
  1324. int n;
  1325. /*
  1326. * Return end-of-file for a read on a counter that is in
  1327. * error state (i.e. because it was pinned but it couldn't be
  1328. * scheduled on to the CPU at some point).
  1329. */
  1330. if (counter->state == PERF_COUNTER_STATE_ERROR)
  1331. return 0;
  1332. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1333. mutex_lock(&counter->child_mutex);
  1334. values[0] = perf_counter_read(counter);
  1335. n = 1;
  1336. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1337. values[n++] = counter->total_time_enabled +
  1338. atomic64_read(&counter->child_total_time_enabled);
  1339. if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1340. values[n++] = counter->total_time_running +
  1341. atomic64_read(&counter->child_total_time_running);
  1342. if (counter->attr.read_format & PERF_FORMAT_ID)
  1343. values[n++] = counter->id;
  1344. mutex_unlock(&counter->child_mutex);
  1345. if (count < n * sizeof(u64))
  1346. return -EINVAL;
  1347. count = n * sizeof(u64);
  1348. if (copy_to_user(buf, values, count))
  1349. return -EFAULT;
  1350. return count;
  1351. }
  1352. static ssize_t
  1353. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1354. {
  1355. struct perf_counter *counter = file->private_data;
  1356. return perf_read_hw(counter, buf, count);
  1357. }
  1358. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1359. {
  1360. struct perf_counter *counter = file->private_data;
  1361. struct perf_mmap_data *data;
  1362. unsigned int events = POLL_HUP;
  1363. rcu_read_lock();
  1364. data = rcu_dereference(counter->data);
  1365. if (data)
  1366. events = atomic_xchg(&data->poll, 0);
  1367. rcu_read_unlock();
  1368. poll_wait(file, &counter->waitq, wait);
  1369. return events;
  1370. }
  1371. static void perf_counter_reset(struct perf_counter *counter)
  1372. {
  1373. (void)perf_counter_read(counter);
  1374. atomic64_set(&counter->count, 0);
  1375. perf_counter_update_userpage(counter);
  1376. }
  1377. static void perf_counter_for_each_sibling(struct perf_counter *counter,
  1378. void (*func)(struct perf_counter *))
  1379. {
  1380. struct perf_counter_context *ctx = counter->ctx;
  1381. struct perf_counter *sibling;
  1382. WARN_ON_ONCE(ctx->parent_ctx);
  1383. mutex_lock(&ctx->mutex);
  1384. counter = counter->group_leader;
  1385. func(counter);
  1386. list_for_each_entry(sibling, &counter->sibling_list, list_entry)
  1387. func(sibling);
  1388. mutex_unlock(&ctx->mutex);
  1389. }
  1390. /*
  1391. * Holding the top-level counter's child_mutex means that any
  1392. * descendant process that has inherited this counter will block
  1393. * in sync_child_counter if it goes to exit, thus satisfying the
  1394. * task existence requirements of perf_counter_enable/disable.
  1395. */
  1396. static void perf_counter_for_each_child(struct perf_counter *counter,
  1397. void (*func)(struct perf_counter *))
  1398. {
  1399. struct perf_counter *child;
  1400. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1401. mutex_lock(&counter->child_mutex);
  1402. func(counter);
  1403. list_for_each_entry(child, &counter->child_list, child_list)
  1404. func(child);
  1405. mutex_unlock(&counter->child_mutex);
  1406. }
  1407. static void perf_counter_for_each(struct perf_counter *counter,
  1408. void (*func)(struct perf_counter *))
  1409. {
  1410. struct perf_counter *child;
  1411. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1412. mutex_lock(&counter->child_mutex);
  1413. perf_counter_for_each_sibling(counter, func);
  1414. list_for_each_entry(child, &counter->child_list, child_list)
  1415. perf_counter_for_each_sibling(child, func);
  1416. mutex_unlock(&counter->child_mutex);
  1417. }
  1418. static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
  1419. {
  1420. struct perf_counter_context *ctx = counter->ctx;
  1421. unsigned long size;
  1422. int ret = 0;
  1423. u64 value;
  1424. if (!counter->attr.sample_period)
  1425. return -EINVAL;
  1426. size = copy_from_user(&value, arg, sizeof(value));
  1427. if (size != sizeof(value))
  1428. return -EFAULT;
  1429. if (!value)
  1430. return -EINVAL;
  1431. spin_lock_irq(&ctx->lock);
  1432. if (counter->attr.freq) {
  1433. if (value > sysctl_perf_counter_sample_rate) {
  1434. ret = -EINVAL;
  1435. goto unlock;
  1436. }
  1437. counter->attr.sample_freq = value;
  1438. } else {
  1439. perf_log_period(counter, value);
  1440. counter->attr.sample_period = value;
  1441. counter->hw.sample_period = value;
  1442. }
  1443. unlock:
  1444. spin_unlock_irq(&ctx->lock);
  1445. return ret;
  1446. }
  1447. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1448. {
  1449. struct perf_counter *counter = file->private_data;
  1450. void (*func)(struct perf_counter *);
  1451. u32 flags = arg;
  1452. switch (cmd) {
  1453. case PERF_COUNTER_IOC_ENABLE:
  1454. func = perf_counter_enable;
  1455. break;
  1456. case PERF_COUNTER_IOC_DISABLE:
  1457. func = perf_counter_disable;
  1458. break;
  1459. case PERF_COUNTER_IOC_RESET:
  1460. func = perf_counter_reset;
  1461. break;
  1462. case PERF_COUNTER_IOC_REFRESH:
  1463. return perf_counter_refresh(counter, arg);
  1464. case PERF_COUNTER_IOC_PERIOD:
  1465. return perf_counter_period(counter, (u64 __user *)arg);
  1466. default:
  1467. return -ENOTTY;
  1468. }
  1469. if (flags & PERF_IOC_FLAG_GROUP)
  1470. perf_counter_for_each(counter, func);
  1471. else
  1472. perf_counter_for_each_child(counter, func);
  1473. return 0;
  1474. }
  1475. int perf_counter_task_enable(void)
  1476. {
  1477. struct perf_counter *counter;
  1478. mutex_lock(&current->perf_counter_mutex);
  1479. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1480. perf_counter_for_each_child(counter, perf_counter_enable);
  1481. mutex_unlock(&current->perf_counter_mutex);
  1482. return 0;
  1483. }
  1484. int perf_counter_task_disable(void)
  1485. {
  1486. struct perf_counter *counter;
  1487. mutex_lock(&current->perf_counter_mutex);
  1488. list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
  1489. perf_counter_for_each_child(counter, perf_counter_disable);
  1490. mutex_unlock(&current->perf_counter_mutex);
  1491. return 0;
  1492. }
  1493. /*
  1494. * Callers need to ensure there can be no nesting of this function, otherwise
  1495. * the seqlock logic goes bad. We can not serialize this because the arch
  1496. * code calls this from NMI context.
  1497. */
  1498. void perf_counter_update_userpage(struct perf_counter *counter)
  1499. {
  1500. struct perf_counter_mmap_page *userpg;
  1501. struct perf_mmap_data *data;
  1502. rcu_read_lock();
  1503. data = rcu_dereference(counter->data);
  1504. if (!data)
  1505. goto unlock;
  1506. userpg = data->user_page;
  1507. /*
  1508. * Disable preemption so as to not let the corresponding user-space
  1509. * spin too long if we get preempted.
  1510. */
  1511. preempt_disable();
  1512. ++userpg->lock;
  1513. barrier();
  1514. userpg->index = counter->hw.idx;
  1515. userpg->offset = atomic64_read(&counter->count);
  1516. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  1517. userpg->offset -= atomic64_read(&counter->hw.prev_count);
  1518. barrier();
  1519. ++userpg->lock;
  1520. preempt_enable();
  1521. unlock:
  1522. rcu_read_unlock();
  1523. }
  1524. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1525. {
  1526. struct perf_counter *counter = vma->vm_file->private_data;
  1527. struct perf_mmap_data *data;
  1528. int ret = VM_FAULT_SIGBUS;
  1529. rcu_read_lock();
  1530. data = rcu_dereference(counter->data);
  1531. if (!data)
  1532. goto unlock;
  1533. if (vmf->pgoff == 0) {
  1534. vmf->page = virt_to_page(data->user_page);
  1535. } else {
  1536. int nr = vmf->pgoff - 1;
  1537. if ((unsigned)nr > data->nr_pages)
  1538. goto unlock;
  1539. vmf->page = virt_to_page(data->data_pages[nr]);
  1540. }
  1541. get_page(vmf->page);
  1542. ret = 0;
  1543. unlock:
  1544. rcu_read_unlock();
  1545. return ret;
  1546. }
  1547. static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
  1548. {
  1549. struct perf_mmap_data *data;
  1550. unsigned long size;
  1551. int i;
  1552. WARN_ON(atomic_read(&counter->mmap_count));
  1553. size = sizeof(struct perf_mmap_data);
  1554. size += nr_pages * sizeof(void *);
  1555. data = kzalloc(size, GFP_KERNEL);
  1556. if (!data)
  1557. goto fail;
  1558. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1559. if (!data->user_page)
  1560. goto fail_user_page;
  1561. for (i = 0; i < nr_pages; i++) {
  1562. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1563. if (!data->data_pages[i])
  1564. goto fail_data_pages;
  1565. }
  1566. data->nr_pages = nr_pages;
  1567. atomic_set(&data->lock, -1);
  1568. rcu_assign_pointer(counter->data, data);
  1569. return 0;
  1570. fail_data_pages:
  1571. for (i--; i >= 0; i--)
  1572. free_page((unsigned long)data->data_pages[i]);
  1573. free_page((unsigned long)data->user_page);
  1574. fail_user_page:
  1575. kfree(data);
  1576. fail:
  1577. return -ENOMEM;
  1578. }
  1579. static void __perf_mmap_data_free(struct rcu_head *rcu_head)
  1580. {
  1581. struct perf_mmap_data *data;
  1582. int i;
  1583. data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
  1584. free_page((unsigned long)data->user_page);
  1585. for (i = 0; i < data->nr_pages; i++)
  1586. free_page((unsigned long)data->data_pages[i]);
  1587. kfree(data);
  1588. }
  1589. static void perf_mmap_data_free(struct perf_counter *counter)
  1590. {
  1591. struct perf_mmap_data *data = counter->data;
  1592. WARN_ON(atomic_read(&counter->mmap_count));
  1593. rcu_assign_pointer(counter->data, NULL);
  1594. call_rcu(&data->rcu_head, __perf_mmap_data_free);
  1595. }
  1596. static void perf_mmap_open(struct vm_area_struct *vma)
  1597. {
  1598. struct perf_counter *counter = vma->vm_file->private_data;
  1599. atomic_inc(&counter->mmap_count);
  1600. }
  1601. static void perf_mmap_close(struct vm_area_struct *vma)
  1602. {
  1603. struct perf_counter *counter = vma->vm_file->private_data;
  1604. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1605. if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
  1606. struct user_struct *user = current_user();
  1607. atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
  1608. vma->vm_mm->locked_vm -= counter->data->nr_locked;
  1609. perf_mmap_data_free(counter);
  1610. mutex_unlock(&counter->mmap_mutex);
  1611. }
  1612. }
  1613. static struct vm_operations_struct perf_mmap_vmops = {
  1614. .open = perf_mmap_open,
  1615. .close = perf_mmap_close,
  1616. .fault = perf_mmap_fault,
  1617. };
  1618. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  1619. {
  1620. struct perf_counter *counter = file->private_data;
  1621. unsigned long user_locked, user_lock_limit;
  1622. struct user_struct *user = current_user();
  1623. unsigned long locked, lock_limit;
  1624. unsigned long vma_size;
  1625. unsigned long nr_pages;
  1626. long user_extra, extra;
  1627. int ret = 0;
  1628. if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
  1629. return -EINVAL;
  1630. vma_size = vma->vm_end - vma->vm_start;
  1631. nr_pages = (vma_size / PAGE_SIZE) - 1;
  1632. /*
  1633. * If we have data pages ensure they're a power-of-two number, so we
  1634. * can do bitmasks instead of modulo.
  1635. */
  1636. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  1637. return -EINVAL;
  1638. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  1639. return -EINVAL;
  1640. if (vma->vm_pgoff != 0)
  1641. return -EINVAL;
  1642. WARN_ON_ONCE(counter->ctx->parent_ctx);
  1643. mutex_lock(&counter->mmap_mutex);
  1644. if (atomic_inc_not_zero(&counter->mmap_count)) {
  1645. if (nr_pages != counter->data->nr_pages)
  1646. ret = -EINVAL;
  1647. goto unlock;
  1648. }
  1649. user_extra = nr_pages + 1;
  1650. user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
  1651. /*
  1652. * Increase the limit linearly with more CPUs:
  1653. */
  1654. user_lock_limit *= num_online_cpus();
  1655. user_locked = atomic_long_read(&user->locked_vm) + user_extra;
  1656. extra = 0;
  1657. if (user_locked > user_lock_limit)
  1658. extra = user_locked - user_lock_limit;
  1659. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  1660. lock_limit >>= PAGE_SHIFT;
  1661. locked = vma->vm_mm->locked_vm + extra;
  1662. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  1663. ret = -EPERM;
  1664. goto unlock;
  1665. }
  1666. WARN_ON(counter->data);
  1667. ret = perf_mmap_data_alloc(counter, nr_pages);
  1668. if (ret)
  1669. goto unlock;
  1670. atomic_set(&counter->mmap_count, 1);
  1671. atomic_long_add(user_extra, &user->locked_vm);
  1672. vma->vm_mm->locked_vm += extra;
  1673. counter->data->nr_locked = extra;
  1674. unlock:
  1675. mutex_unlock(&counter->mmap_mutex);
  1676. vma->vm_flags &= ~VM_MAYWRITE;
  1677. vma->vm_flags |= VM_RESERVED;
  1678. vma->vm_ops = &perf_mmap_vmops;
  1679. return ret;
  1680. }
  1681. static int perf_fasync(int fd, struct file *filp, int on)
  1682. {
  1683. struct inode *inode = filp->f_path.dentry->d_inode;
  1684. struct perf_counter *counter = filp->private_data;
  1685. int retval;
  1686. mutex_lock(&inode->i_mutex);
  1687. retval = fasync_helper(fd, filp, on, &counter->fasync);
  1688. mutex_unlock(&inode->i_mutex);
  1689. if (retval < 0)
  1690. return retval;
  1691. return 0;
  1692. }
  1693. static const struct file_operations perf_fops = {
  1694. .release = perf_release,
  1695. .read = perf_read,
  1696. .poll = perf_poll,
  1697. .unlocked_ioctl = perf_ioctl,
  1698. .compat_ioctl = perf_ioctl,
  1699. .mmap = perf_mmap,
  1700. .fasync = perf_fasync,
  1701. };
  1702. /*
  1703. * Perf counter wakeup
  1704. *
  1705. * If there's data, ensure we set the poll() state and publish everything
  1706. * to user-space before waking everybody up.
  1707. */
  1708. void perf_counter_wakeup(struct perf_counter *counter)
  1709. {
  1710. wake_up_all(&counter->waitq);
  1711. if (counter->pending_kill) {
  1712. kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
  1713. counter->pending_kill = 0;
  1714. }
  1715. }
  1716. /*
  1717. * Pending wakeups
  1718. *
  1719. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  1720. *
  1721. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  1722. * single linked list and use cmpxchg() to add entries lockless.
  1723. */
  1724. static void perf_pending_counter(struct perf_pending_entry *entry)
  1725. {
  1726. struct perf_counter *counter = container_of(entry,
  1727. struct perf_counter, pending);
  1728. if (counter->pending_disable) {
  1729. counter->pending_disable = 0;
  1730. perf_counter_disable(counter);
  1731. }
  1732. if (counter->pending_wakeup) {
  1733. counter->pending_wakeup = 0;
  1734. perf_counter_wakeup(counter);
  1735. }
  1736. }
  1737. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  1738. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  1739. PENDING_TAIL,
  1740. };
  1741. static void perf_pending_queue(struct perf_pending_entry *entry,
  1742. void (*func)(struct perf_pending_entry *))
  1743. {
  1744. struct perf_pending_entry **head;
  1745. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  1746. return;
  1747. entry->func = func;
  1748. head = &get_cpu_var(perf_pending_head);
  1749. do {
  1750. entry->next = *head;
  1751. } while (cmpxchg(head, entry->next, entry) != entry->next);
  1752. set_perf_counter_pending();
  1753. put_cpu_var(perf_pending_head);
  1754. }
  1755. static int __perf_pending_run(void)
  1756. {
  1757. struct perf_pending_entry *list;
  1758. int nr = 0;
  1759. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  1760. while (list != PENDING_TAIL) {
  1761. void (*func)(struct perf_pending_entry *);
  1762. struct perf_pending_entry *entry = list;
  1763. list = list->next;
  1764. func = entry->func;
  1765. entry->next = NULL;
  1766. /*
  1767. * Ensure we observe the unqueue before we issue the wakeup,
  1768. * so that we won't be waiting forever.
  1769. * -- see perf_not_pending().
  1770. */
  1771. smp_wmb();
  1772. func(entry);
  1773. nr++;
  1774. }
  1775. return nr;
  1776. }
  1777. static inline int perf_not_pending(struct perf_counter *counter)
  1778. {
  1779. /*
  1780. * If we flush on whatever cpu we run, there is a chance we don't
  1781. * need to wait.
  1782. */
  1783. get_cpu();
  1784. __perf_pending_run();
  1785. put_cpu();
  1786. /*
  1787. * Ensure we see the proper queue state before going to sleep
  1788. * so that we do not miss the wakeup. -- see perf_pending_handle()
  1789. */
  1790. smp_rmb();
  1791. return counter->pending.next == NULL;
  1792. }
  1793. static void perf_pending_sync(struct perf_counter *counter)
  1794. {
  1795. wait_event(counter->waitq, perf_not_pending(counter));
  1796. }
  1797. void perf_counter_do_pending(void)
  1798. {
  1799. __perf_pending_run();
  1800. }
  1801. /*
  1802. * Callchain support -- arch specific
  1803. */
  1804. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1805. {
  1806. return NULL;
  1807. }
  1808. /*
  1809. * Output
  1810. */
  1811. struct perf_output_handle {
  1812. struct perf_counter *counter;
  1813. struct perf_mmap_data *data;
  1814. unsigned long head;
  1815. unsigned long offset;
  1816. int nmi;
  1817. int overflow;
  1818. int locked;
  1819. unsigned long flags;
  1820. };
  1821. static void perf_output_wakeup(struct perf_output_handle *handle)
  1822. {
  1823. atomic_set(&handle->data->poll, POLL_IN);
  1824. if (handle->nmi) {
  1825. handle->counter->pending_wakeup = 1;
  1826. perf_pending_queue(&handle->counter->pending,
  1827. perf_pending_counter);
  1828. } else
  1829. perf_counter_wakeup(handle->counter);
  1830. }
  1831. /*
  1832. * Curious locking construct.
  1833. *
  1834. * We need to ensure a later event doesn't publish a head when a former
  1835. * event isn't done writing. However since we need to deal with NMIs we
  1836. * cannot fully serialize things.
  1837. *
  1838. * What we do is serialize between CPUs so we only have to deal with NMI
  1839. * nesting on a single CPU.
  1840. *
  1841. * We only publish the head (and generate a wakeup) when the outer-most
  1842. * event completes.
  1843. */
  1844. static void perf_output_lock(struct perf_output_handle *handle)
  1845. {
  1846. struct perf_mmap_data *data = handle->data;
  1847. int cpu;
  1848. handle->locked = 0;
  1849. local_irq_save(handle->flags);
  1850. cpu = smp_processor_id();
  1851. if (in_nmi() && atomic_read(&data->lock) == cpu)
  1852. return;
  1853. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1854. cpu_relax();
  1855. handle->locked = 1;
  1856. }
  1857. static void perf_output_unlock(struct perf_output_handle *handle)
  1858. {
  1859. struct perf_mmap_data *data = handle->data;
  1860. unsigned long head;
  1861. int cpu;
  1862. data->done_head = data->head;
  1863. if (!handle->locked)
  1864. goto out;
  1865. again:
  1866. /*
  1867. * The xchg implies a full barrier that ensures all writes are done
  1868. * before we publish the new head, matched by a rmb() in userspace when
  1869. * reading this position.
  1870. */
  1871. while ((head = atomic_long_xchg(&data->done_head, 0)))
  1872. data->user_page->data_head = head;
  1873. /*
  1874. * NMI can happen here, which means we can miss a done_head update.
  1875. */
  1876. cpu = atomic_xchg(&data->lock, -1);
  1877. WARN_ON_ONCE(cpu != smp_processor_id());
  1878. /*
  1879. * Therefore we have to validate we did not indeed do so.
  1880. */
  1881. if (unlikely(atomic_long_read(&data->done_head))) {
  1882. /*
  1883. * Since we had it locked, we can lock it again.
  1884. */
  1885. while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
  1886. cpu_relax();
  1887. goto again;
  1888. }
  1889. if (atomic_xchg(&data->wakeup, 0))
  1890. perf_output_wakeup(handle);
  1891. out:
  1892. local_irq_restore(handle->flags);
  1893. }
  1894. static int perf_output_begin(struct perf_output_handle *handle,
  1895. struct perf_counter *counter, unsigned int size,
  1896. int nmi, int overflow)
  1897. {
  1898. struct perf_mmap_data *data;
  1899. unsigned int offset, head;
  1900. /*
  1901. * For inherited counters we send all the output towards the parent.
  1902. */
  1903. if (counter->parent)
  1904. counter = counter->parent;
  1905. rcu_read_lock();
  1906. data = rcu_dereference(counter->data);
  1907. if (!data)
  1908. goto out;
  1909. handle->data = data;
  1910. handle->counter = counter;
  1911. handle->nmi = nmi;
  1912. handle->overflow = overflow;
  1913. if (!data->nr_pages)
  1914. goto fail;
  1915. perf_output_lock(handle);
  1916. do {
  1917. offset = head = atomic_long_read(&data->head);
  1918. head += size;
  1919. } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
  1920. handle->offset = offset;
  1921. handle->head = head;
  1922. if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
  1923. atomic_set(&data->wakeup, 1);
  1924. return 0;
  1925. fail:
  1926. perf_output_wakeup(handle);
  1927. out:
  1928. rcu_read_unlock();
  1929. return -ENOSPC;
  1930. }
  1931. static void perf_output_copy(struct perf_output_handle *handle,
  1932. const void *buf, unsigned int len)
  1933. {
  1934. unsigned int pages_mask;
  1935. unsigned int offset;
  1936. unsigned int size;
  1937. void **pages;
  1938. offset = handle->offset;
  1939. pages_mask = handle->data->nr_pages - 1;
  1940. pages = handle->data->data_pages;
  1941. do {
  1942. unsigned int page_offset;
  1943. int nr;
  1944. nr = (offset >> PAGE_SHIFT) & pages_mask;
  1945. page_offset = offset & (PAGE_SIZE - 1);
  1946. size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
  1947. memcpy(pages[nr] + page_offset, buf, size);
  1948. len -= size;
  1949. buf += size;
  1950. offset += size;
  1951. } while (len);
  1952. handle->offset = offset;
  1953. /*
  1954. * Check we didn't copy past our reservation window, taking the
  1955. * possible unsigned int wrap into account.
  1956. */
  1957. WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
  1958. }
  1959. #define perf_output_put(handle, x) \
  1960. perf_output_copy((handle), &(x), sizeof(x))
  1961. static void perf_output_end(struct perf_output_handle *handle)
  1962. {
  1963. struct perf_counter *counter = handle->counter;
  1964. struct perf_mmap_data *data = handle->data;
  1965. int wakeup_events = counter->attr.wakeup_events;
  1966. if (handle->overflow && wakeup_events) {
  1967. int events = atomic_inc_return(&data->events);
  1968. if (events >= wakeup_events) {
  1969. atomic_sub(wakeup_events, &data->events);
  1970. atomic_set(&data->wakeup, 1);
  1971. }
  1972. }
  1973. perf_output_unlock(handle);
  1974. rcu_read_unlock();
  1975. }
  1976. static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
  1977. {
  1978. /*
  1979. * only top level counters have the pid namespace they were created in
  1980. */
  1981. if (counter->parent)
  1982. counter = counter->parent;
  1983. return task_tgid_nr_ns(p, counter->ns);
  1984. }
  1985. static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
  1986. {
  1987. /*
  1988. * only top level counters have the pid namespace they were created in
  1989. */
  1990. if (counter->parent)
  1991. counter = counter->parent;
  1992. return task_pid_nr_ns(p, counter->ns);
  1993. }
  1994. static void perf_counter_output(struct perf_counter *counter, int nmi,
  1995. struct perf_sample_data *data)
  1996. {
  1997. int ret;
  1998. u64 sample_type = counter->attr.sample_type;
  1999. struct perf_output_handle handle;
  2000. struct perf_event_header header;
  2001. u64 ip;
  2002. struct {
  2003. u32 pid, tid;
  2004. } tid_entry;
  2005. struct {
  2006. u64 id;
  2007. u64 counter;
  2008. } group_entry;
  2009. struct perf_callchain_entry *callchain = NULL;
  2010. int callchain_size = 0;
  2011. u64 time;
  2012. struct {
  2013. u32 cpu, reserved;
  2014. } cpu_entry;
  2015. header.type = 0;
  2016. header.size = sizeof(header);
  2017. header.misc = PERF_EVENT_MISC_OVERFLOW;
  2018. header.misc |= perf_misc_flags(data->regs);
  2019. if (sample_type & PERF_SAMPLE_IP) {
  2020. ip = perf_instruction_pointer(data->regs);
  2021. header.type |= PERF_SAMPLE_IP;
  2022. header.size += sizeof(ip);
  2023. }
  2024. if (sample_type & PERF_SAMPLE_TID) {
  2025. /* namespace issues */
  2026. tid_entry.pid = perf_counter_pid(counter, current);
  2027. tid_entry.tid = perf_counter_tid(counter, current);
  2028. header.type |= PERF_SAMPLE_TID;
  2029. header.size += sizeof(tid_entry);
  2030. }
  2031. if (sample_type & PERF_SAMPLE_TIME) {
  2032. /*
  2033. * Maybe do better on x86 and provide cpu_clock_nmi()
  2034. */
  2035. time = sched_clock();
  2036. header.type |= PERF_SAMPLE_TIME;
  2037. header.size += sizeof(u64);
  2038. }
  2039. if (sample_type & PERF_SAMPLE_ADDR) {
  2040. header.type |= PERF_SAMPLE_ADDR;
  2041. header.size += sizeof(u64);
  2042. }
  2043. if (sample_type & PERF_SAMPLE_ID) {
  2044. header.type |= PERF_SAMPLE_ID;
  2045. header.size += sizeof(u64);
  2046. }
  2047. if (sample_type & PERF_SAMPLE_CPU) {
  2048. header.type |= PERF_SAMPLE_CPU;
  2049. header.size += sizeof(cpu_entry);
  2050. cpu_entry.cpu = raw_smp_processor_id();
  2051. }
  2052. if (sample_type & PERF_SAMPLE_PERIOD) {
  2053. header.type |= PERF_SAMPLE_PERIOD;
  2054. header.size += sizeof(u64);
  2055. }
  2056. if (sample_type & PERF_SAMPLE_GROUP) {
  2057. header.type |= PERF_SAMPLE_GROUP;
  2058. header.size += sizeof(u64) +
  2059. counter->nr_siblings * sizeof(group_entry);
  2060. }
  2061. if (sample_type & PERF_SAMPLE_CALLCHAIN) {
  2062. callchain = perf_callchain(data->regs);
  2063. if (callchain) {
  2064. callchain_size = (1 + callchain->nr) * sizeof(u64);
  2065. header.type |= PERF_SAMPLE_CALLCHAIN;
  2066. header.size += callchain_size;
  2067. }
  2068. }
  2069. ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
  2070. if (ret)
  2071. return;
  2072. perf_output_put(&handle, header);
  2073. if (sample_type & PERF_SAMPLE_IP)
  2074. perf_output_put(&handle, ip);
  2075. if (sample_type & PERF_SAMPLE_TID)
  2076. perf_output_put(&handle, tid_entry);
  2077. if (sample_type & PERF_SAMPLE_TIME)
  2078. perf_output_put(&handle, time);
  2079. if (sample_type & PERF_SAMPLE_ADDR)
  2080. perf_output_put(&handle, data->addr);
  2081. if (sample_type & PERF_SAMPLE_ID)
  2082. perf_output_put(&handle, counter->id);
  2083. if (sample_type & PERF_SAMPLE_CPU)
  2084. perf_output_put(&handle, cpu_entry);
  2085. if (sample_type & PERF_SAMPLE_PERIOD)
  2086. perf_output_put(&handle, data->period);
  2087. /*
  2088. * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
  2089. */
  2090. if (sample_type & PERF_SAMPLE_GROUP) {
  2091. struct perf_counter *leader, *sub;
  2092. u64 nr = counter->nr_siblings;
  2093. perf_output_put(&handle, nr);
  2094. leader = counter->group_leader;
  2095. list_for_each_entry(sub, &leader->sibling_list, list_entry) {
  2096. if (sub != counter)
  2097. sub->pmu->read(sub);
  2098. group_entry.id = sub->id;
  2099. group_entry.counter = atomic64_read(&sub->count);
  2100. perf_output_put(&handle, group_entry);
  2101. }
  2102. }
  2103. if (callchain)
  2104. perf_output_copy(&handle, callchain, callchain_size);
  2105. perf_output_end(&handle);
  2106. }
  2107. /*
  2108. * fork tracking
  2109. */
  2110. struct perf_fork_event {
  2111. struct task_struct *task;
  2112. struct {
  2113. struct perf_event_header header;
  2114. u32 pid;
  2115. u32 ppid;
  2116. } event;
  2117. };
  2118. static void perf_counter_fork_output(struct perf_counter *counter,
  2119. struct perf_fork_event *fork_event)
  2120. {
  2121. struct perf_output_handle handle;
  2122. int size = fork_event->event.header.size;
  2123. struct task_struct *task = fork_event->task;
  2124. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2125. if (ret)
  2126. return;
  2127. fork_event->event.pid = perf_counter_pid(counter, task);
  2128. fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
  2129. perf_output_put(&handle, fork_event->event);
  2130. perf_output_end(&handle);
  2131. }
  2132. static int perf_counter_fork_match(struct perf_counter *counter)
  2133. {
  2134. if (counter->attr.comm || counter->attr.mmap)
  2135. return 1;
  2136. return 0;
  2137. }
  2138. static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
  2139. struct perf_fork_event *fork_event)
  2140. {
  2141. struct perf_counter *counter;
  2142. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2143. return;
  2144. rcu_read_lock();
  2145. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2146. if (perf_counter_fork_match(counter))
  2147. perf_counter_fork_output(counter, fork_event);
  2148. }
  2149. rcu_read_unlock();
  2150. }
  2151. static void perf_counter_fork_event(struct perf_fork_event *fork_event)
  2152. {
  2153. struct perf_cpu_context *cpuctx;
  2154. struct perf_counter_context *ctx;
  2155. cpuctx = &get_cpu_var(perf_cpu_context);
  2156. perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
  2157. put_cpu_var(perf_cpu_context);
  2158. rcu_read_lock();
  2159. /*
  2160. * doesn't really matter which of the child contexts the
  2161. * events ends up in.
  2162. */
  2163. ctx = rcu_dereference(current->perf_counter_ctxp);
  2164. if (ctx)
  2165. perf_counter_fork_ctx(ctx, fork_event);
  2166. rcu_read_unlock();
  2167. }
  2168. void perf_counter_fork(struct task_struct *task)
  2169. {
  2170. struct perf_fork_event fork_event;
  2171. if (!atomic_read(&nr_comm_counters) &&
  2172. !atomic_read(&nr_mmap_counters))
  2173. return;
  2174. fork_event = (struct perf_fork_event){
  2175. .task = task,
  2176. .event = {
  2177. .header = {
  2178. .type = PERF_EVENT_FORK,
  2179. .size = sizeof(fork_event.event),
  2180. },
  2181. },
  2182. };
  2183. perf_counter_fork_event(&fork_event);
  2184. }
  2185. /*
  2186. * comm tracking
  2187. */
  2188. struct perf_comm_event {
  2189. struct task_struct *task;
  2190. char *comm;
  2191. int comm_size;
  2192. struct {
  2193. struct perf_event_header header;
  2194. u32 pid;
  2195. u32 tid;
  2196. } event;
  2197. };
  2198. static void perf_counter_comm_output(struct perf_counter *counter,
  2199. struct perf_comm_event *comm_event)
  2200. {
  2201. struct perf_output_handle handle;
  2202. int size = comm_event->event.header.size;
  2203. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2204. if (ret)
  2205. return;
  2206. comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
  2207. comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
  2208. perf_output_put(&handle, comm_event->event);
  2209. perf_output_copy(&handle, comm_event->comm,
  2210. comm_event->comm_size);
  2211. perf_output_end(&handle);
  2212. }
  2213. static int perf_counter_comm_match(struct perf_counter *counter)
  2214. {
  2215. if (counter->attr.comm)
  2216. return 1;
  2217. return 0;
  2218. }
  2219. static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
  2220. struct perf_comm_event *comm_event)
  2221. {
  2222. struct perf_counter *counter;
  2223. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2224. return;
  2225. rcu_read_lock();
  2226. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2227. if (perf_counter_comm_match(counter))
  2228. perf_counter_comm_output(counter, comm_event);
  2229. }
  2230. rcu_read_unlock();
  2231. }
  2232. static void perf_counter_comm_event(struct perf_comm_event *comm_event)
  2233. {
  2234. struct perf_cpu_context *cpuctx;
  2235. struct perf_counter_context *ctx;
  2236. unsigned int size;
  2237. char *comm = comm_event->task->comm;
  2238. size = ALIGN(strlen(comm)+1, sizeof(u64));
  2239. comm_event->comm = comm;
  2240. comm_event->comm_size = size;
  2241. comm_event->event.header.size = sizeof(comm_event->event) + size;
  2242. cpuctx = &get_cpu_var(perf_cpu_context);
  2243. perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
  2244. put_cpu_var(perf_cpu_context);
  2245. rcu_read_lock();
  2246. /*
  2247. * doesn't really matter which of the child contexts the
  2248. * events ends up in.
  2249. */
  2250. ctx = rcu_dereference(current->perf_counter_ctxp);
  2251. if (ctx)
  2252. perf_counter_comm_ctx(ctx, comm_event);
  2253. rcu_read_unlock();
  2254. }
  2255. void perf_counter_comm(struct task_struct *task)
  2256. {
  2257. struct perf_comm_event comm_event;
  2258. if (!atomic_read(&nr_comm_counters))
  2259. return;
  2260. comm_event = (struct perf_comm_event){
  2261. .task = task,
  2262. .event = {
  2263. .header = { .type = PERF_EVENT_COMM, },
  2264. },
  2265. };
  2266. perf_counter_comm_event(&comm_event);
  2267. }
  2268. /*
  2269. * mmap tracking
  2270. */
  2271. struct perf_mmap_event {
  2272. struct vm_area_struct *vma;
  2273. const char *file_name;
  2274. int file_size;
  2275. struct {
  2276. struct perf_event_header header;
  2277. u32 pid;
  2278. u32 tid;
  2279. u64 start;
  2280. u64 len;
  2281. u64 pgoff;
  2282. } event;
  2283. };
  2284. static void perf_counter_mmap_output(struct perf_counter *counter,
  2285. struct perf_mmap_event *mmap_event)
  2286. {
  2287. struct perf_output_handle handle;
  2288. int size = mmap_event->event.header.size;
  2289. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  2290. if (ret)
  2291. return;
  2292. mmap_event->event.pid = perf_counter_pid(counter, current);
  2293. mmap_event->event.tid = perf_counter_tid(counter, current);
  2294. perf_output_put(&handle, mmap_event->event);
  2295. perf_output_copy(&handle, mmap_event->file_name,
  2296. mmap_event->file_size);
  2297. perf_output_end(&handle);
  2298. }
  2299. static int perf_counter_mmap_match(struct perf_counter *counter,
  2300. struct perf_mmap_event *mmap_event)
  2301. {
  2302. if (counter->attr.mmap)
  2303. return 1;
  2304. return 0;
  2305. }
  2306. static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
  2307. struct perf_mmap_event *mmap_event)
  2308. {
  2309. struct perf_counter *counter;
  2310. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2311. return;
  2312. rcu_read_lock();
  2313. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2314. if (perf_counter_mmap_match(counter, mmap_event))
  2315. perf_counter_mmap_output(counter, mmap_event);
  2316. }
  2317. rcu_read_unlock();
  2318. }
  2319. static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
  2320. {
  2321. struct perf_cpu_context *cpuctx;
  2322. struct perf_counter_context *ctx;
  2323. struct vm_area_struct *vma = mmap_event->vma;
  2324. struct file *file = vma->vm_file;
  2325. unsigned int size;
  2326. char tmp[16];
  2327. char *buf = NULL;
  2328. const char *name;
  2329. if (file) {
  2330. buf = kzalloc(PATH_MAX, GFP_KERNEL);
  2331. if (!buf) {
  2332. name = strncpy(tmp, "//enomem", sizeof(tmp));
  2333. goto got_name;
  2334. }
  2335. name = d_path(&file->f_path, buf, PATH_MAX);
  2336. if (IS_ERR(name)) {
  2337. name = strncpy(tmp, "//toolong", sizeof(tmp));
  2338. goto got_name;
  2339. }
  2340. } else {
  2341. name = arch_vma_name(mmap_event->vma);
  2342. if (name)
  2343. goto got_name;
  2344. if (!vma->vm_mm) {
  2345. name = strncpy(tmp, "[vdso]", sizeof(tmp));
  2346. goto got_name;
  2347. }
  2348. name = strncpy(tmp, "//anon", sizeof(tmp));
  2349. goto got_name;
  2350. }
  2351. got_name:
  2352. size = ALIGN(strlen(name)+1, sizeof(u64));
  2353. mmap_event->file_name = name;
  2354. mmap_event->file_size = size;
  2355. mmap_event->event.header.size = sizeof(mmap_event->event) + size;
  2356. cpuctx = &get_cpu_var(perf_cpu_context);
  2357. perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
  2358. put_cpu_var(perf_cpu_context);
  2359. rcu_read_lock();
  2360. /*
  2361. * doesn't really matter which of the child contexts the
  2362. * events ends up in.
  2363. */
  2364. ctx = rcu_dereference(current->perf_counter_ctxp);
  2365. if (ctx)
  2366. perf_counter_mmap_ctx(ctx, mmap_event);
  2367. rcu_read_unlock();
  2368. kfree(buf);
  2369. }
  2370. void __perf_counter_mmap(struct vm_area_struct *vma)
  2371. {
  2372. struct perf_mmap_event mmap_event;
  2373. if (!atomic_read(&nr_mmap_counters))
  2374. return;
  2375. mmap_event = (struct perf_mmap_event){
  2376. .vma = vma,
  2377. .event = {
  2378. .header = { .type = PERF_EVENT_MMAP, },
  2379. .start = vma->vm_start,
  2380. .len = vma->vm_end - vma->vm_start,
  2381. .pgoff = vma->vm_pgoff,
  2382. },
  2383. };
  2384. perf_counter_mmap_event(&mmap_event);
  2385. }
  2386. /*
  2387. * Log sample_period changes so that analyzing tools can re-normalize the
  2388. * event flow.
  2389. */
  2390. struct freq_event {
  2391. struct perf_event_header header;
  2392. u64 time;
  2393. u64 id;
  2394. u64 period;
  2395. };
  2396. static void perf_log_period(struct perf_counter *counter, u64 period)
  2397. {
  2398. struct perf_output_handle handle;
  2399. struct freq_event event;
  2400. int ret;
  2401. if (counter->hw.sample_period == period)
  2402. return;
  2403. if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
  2404. return;
  2405. event = (struct freq_event) {
  2406. .header = {
  2407. .type = PERF_EVENT_PERIOD,
  2408. .misc = 0,
  2409. .size = sizeof(event),
  2410. },
  2411. .time = sched_clock(),
  2412. .id = counter->id,
  2413. .period = period,
  2414. };
  2415. ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
  2416. if (ret)
  2417. return;
  2418. perf_output_put(&handle, event);
  2419. perf_output_end(&handle);
  2420. }
  2421. /*
  2422. * IRQ throttle logging
  2423. */
  2424. static void perf_log_throttle(struct perf_counter *counter, int enable)
  2425. {
  2426. struct perf_output_handle handle;
  2427. int ret;
  2428. struct {
  2429. struct perf_event_header header;
  2430. u64 time;
  2431. u64 id;
  2432. } throttle_event = {
  2433. .header = {
  2434. .type = PERF_EVENT_THROTTLE + 1,
  2435. .misc = 0,
  2436. .size = sizeof(throttle_event),
  2437. },
  2438. .time = sched_clock(),
  2439. .id = counter->id,
  2440. };
  2441. ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
  2442. if (ret)
  2443. return;
  2444. perf_output_put(&handle, throttle_event);
  2445. perf_output_end(&handle);
  2446. }
  2447. /*
  2448. * Generic counter overflow handling.
  2449. */
  2450. int perf_counter_overflow(struct perf_counter *counter, int nmi,
  2451. struct perf_sample_data *data)
  2452. {
  2453. int events = atomic_read(&counter->event_limit);
  2454. int throttle = counter->pmu->unthrottle != NULL;
  2455. struct hw_perf_counter *hwc = &counter->hw;
  2456. int ret = 0;
  2457. if (!throttle) {
  2458. hwc->interrupts++;
  2459. } else {
  2460. if (hwc->interrupts != MAX_INTERRUPTS) {
  2461. hwc->interrupts++;
  2462. if (HZ * hwc->interrupts >
  2463. (u64)sysctl_perf_counter_sample_rate) {
  2464. hwc->interrupts = MAX_INTERRUPTS;
  2465. perf_log_throttle(counter, 0);
  2466. ret = 1;
  2467. }
  2468. } else {
  2469. /*
  2470. * Keep re-disabling counters even though on the previous
  2471. * pass we disabled it - just in case we raced with a
  2472. * sched-in and the counter got enabled again:
  2473. */
  2474. ret = 1;
  2475. }
  2476. }
  2477. if (counter->attr.freq) {
  2478. u64 now = sched_clock();
  2479. s64 delta = now - hwc->freq_stamp;
  2480. hwc->freq_stamp = now;
  2481. if (delta > 0 && delta < TICK_NSEC)
  2482. perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
  2483. }
  2484. /*
  2485. * XXX event_limit might not quite work as expected on inherited
  2486. * counters
  2487. */
  2488. counter->pending_kill = POLL_IN;
  2489. if (events && atomic_dec_and_test(&counter->event_limit)) {
  2490. ret = 1;
  2491. counter->pending_kill = POLL_HUP;
  2492. if (nmi) {
  2493. counter->pending_disable = 1;
  2494. perf_pending_queue(&counter->pending,
  2495. perf_pending_counter);
  2496. } else
  2497. perf_counter_disable(counter);
  2498. }
  2499. perf_counter_output(counter, nmi, data);
  2500. return ret;
  2501. }
  2502. /*
  2503. * Generic software counter infrastructure
  2504. */
  2505. static void perf_swcounter_update(struct perf_counter *counter)
  2506. {
  2507. struct hw_perf_counter *hwc = &counter->hw;
  2508. u64 prev, now;
  2509. s64 delta;
  2510. again:
  2511. prev = atomic64_read(&hwc->prev_count);
  2512. now = atomic64_read(&hwc->count);
  2513. if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
  2514. goto again;
  2515. delta = now - prev;
  2516. atomic64_add(delta, &counter->count);
  2517. atomic64_sub(delta, &hwc->period_left);
  2518. }
  2519. static void perf_swcounter_set_period(struct perf_counter *counter)
  2520. {
  2521. struct hw_perf_counter *hwc = &counter->hw;
  2522. s64 left = atomic64_read(&hwc->period_left);
  2523. s64 period = hwc->sample_period;
  2524. if (unlikely(left <= -period)) {
  2525. left = period;
  2526. atomic64_set(&hwc->period_left, left);
  2527. hwc->last_period = period;
  2528. }
  2529. if (unlikely(left <= 0)) {
  2530. left += period;
  2531. atomic64_add(period, &hwc->period_left);
  2532. hwc->last_period = period;
  2533. }
  2534. atomic64_set(&hwc->prev_count, -left);
  2535. atomic64_set(&hwc->count, -left);
  2536. }
  2537. static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
  2538. {
  2539. enum hrtimer_restart ret = HRTIMER_RESTART;
  2540. struct perf_sample_data data;
  2541. struct perf_counter *counter;
  2542. u64 period;
  2543. counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
  2544. counter->pmu->read(counter);
  2545. data.addr = 0;
  2546. data.regs = get_irq_regs();
  2547. /*
  2548. * In case we exclude kernel IPs or are somehow not in interrupt
  2549. * context, provide the next best thing, the user IP.
  2550. */
  2551. if ((counter->attr.exclude_kernel || !data.regs) &&
  2552. !counter->attr.exclude_user)
  2553. data.regs = task_pt_regs(current);
  2554. if (data.regs) {
  2555. if (perf_counter_overflow(counter, 0, &data))
  2556. ret = HRTIMER_NORESTART;
  2557. }
  2558. period = max_t(u64, 10000, counter->hw.sample_period);
  2559. hrtimer_forward_now(hrtimer, ns_to_ktime(period));
  2560. return ret;
  2561. }
  2562. static void perf_swcounter_overflow(struct perf_counter *counter,
  2563. int nmi, struct pt_regs *regs, u64 addr)
  2564. {
  2565. struct perf_sample_data data = {
  2566. .regs = regs,
  2567. .addr = addr,
  2568. .period = counter->hw.last_period,
  2569. };
  2570. perf_swcounter_update(counter);
  2571. perf_swcounter_set_period(counter);
  2572. if (perf_counter_overflow(counter, nmi, &data))
  2573. /* soft-disable the counter */
  2574. ;
  2575. }
  2576. static int perf_swcounter_is_counting(struct perf_counter *counter)
  2577. {
  2578. struct perf_counter_context *ctx;
  2579. unsigned long flags;
  2580. int count;
  2581. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  2582. return 1;
  2583. if (counter->state != PERF_COUNTER_STATE_INACTIVE)
  2584. return 0;
  2585. /*
  2586. * If the counter is inactive, it could be just because
  2587. * its task is scheduled out, or because it's in a group
  2588. * which could not go on the PMU. We want to count in
  2589. * the first case but not the second. If the context is
  2590. * currently active then an inactive software counter must
  2591. * be the second case. If it's not currently active then
  2592. * we need to know whether the counter was active when the
  2593. * context was last active, which we can determine by
  2594. * comparing counter->tstamp_stopped with ctx->time.
  2595. *
  2596. * We are within an RCU read-side critical section,
  2597. * which protects the existence of *ctx.
  2598. */
  2599. ctx = counter->ctx;
  2600. spin_lock_irqsave(&ctx->lock, flags);
  2601. count = 1;
  2602. /* Re-check state now we have the lock */
  2603. if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
  2604. counter->ctx->is_active ||
  2605. counter->tstamp_stopped < ctx->time)
  2606. count = 0;
  2607. spin_unlock_irqrestore(&ctx->lock, flags);
  2608. return count;
  2609. }
  2610. static int perf_swcounter_match(struct perf_counter *counter,
  2611. enum perf_type_id type,
  2612. u32 event, struct pt_regs *regs)
  2613. {
  2614. if (!perf_swcounter_is_counting(counter))
  2615. return 0;
  2616. if (counter->attr.type != type)
  2617. return 0;
  2618. if (counter->attr.config != event)
  2619. return 0;
  2620. if (regs) {
  2621. if (counter->attr.exclude_user && user_mode(regs))
  2622. return 0;
  2623. if (counter->attr.exclude_kernel && !user_mode(regs))
  2624. return 0;
  2625. }
  2626. return 1;
  2627. }
  2628. static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
  2629. int nmi, struct pt_regs *regs, u64 addr)
  2630. {
  2631. int neg = atomic64_add_negative(nr, &counter->hw.count);
  2632. if (counter->hw.sample_period && !neg && regs)
  2633. perf_swcounter_overflow(counter, nmi, regs, addr);
  2634. }
  2635. static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
  2636. enum perf_type_id type, u32 event,
  2637. u64 nr, int nmi, struct pt_regs *regs,
  2638. u64 addr)
  2639. {
  2640. struct perf_counter *counter;
  2641. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  2642. return;
  2643. rcu_read_lock();
  2644. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  2645. if (perf_swcounter_match(counter, type, event, regs))
  2646. perf_swcounter_add(counter, nr, nmi, regs, addr);
  2647. }
  2648. rcu_read_unlock();
  2649. }
  2650. static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
  2651. {
  2652. if (in_nmi())
  2653. return &cpuctx->recursion[3];
  2654. if (in_irq())
  2655. return &cpuctx->recursion[2];
  2656. if (in_softirq())
  2657. return &cpuctx->recursion[1];
  2658. return &cpuctx->recursion[0];
  2659. }
  2660. static void __perf_swcounter_event(enum perf_type_id type, u32 event,
  2661. u64 nr, int nmi, struct pt_regs *regs,
  2662. u64 addr)
  2663. {
  2664. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  2665. int *recursion = perf_swcounter_recursion_context(cpuctx);
  2666. struct perf_counter_context *ctx;
  2667. if (*recursion)
  2668. goto out;
  2669. (*recursion)++;
  2670. barrier();
  2671. perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
  2672. nr, nmi, regs, addr);
  2673. rcu_read_lock();
  2674. /*
  2675. * doesn't really matter which of the child contexts the
  2676. * events ends up in.
  2677. */
  2678. ctx = rcu_dereference(current->perf_counter_ctxp);
  2679. if (ctx)
  2680. perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
  2681. rcu_read_unlock();
  2682. barrier();
  2683. (*recursion)--;
  2684. out:
  2685. put_cpu_var(perf_cpu_context);
  2686. }
  2687. void
  2688. perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
  2689. {
  2690. __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
  2691. }
  2692. static void perf_swcounter_read(struct perf_counter *counter)
  2693. {
  2694. perf_swcounter_update(counter);
  2695. }
  2696. static int perf_swcounter_enable(struct perf_counter *counter)
  2697. {
  2698. perf_swcounter_set_period(counter);
  2699. return 0;
  2700. }
  2701. static void perf_swcounter_disable(struct perf_counter *counter)
  2702. {
  2703. perf_swcounter_update(counter);
  2704. }
  2705. static const struct pmu perf_ops_generic = {
  2706. .enable = perf_swcounter_enable,
  2707. .disable = perf_swcounter_disable,
  2708. .read = perf_swcounter_read,
  2709. };
  2710. /*
  2711. * Software counter: cpu wall time clock
  2712. */
  2713. static void cpu_clock_perf_counter_update(struct perf_counter *counter)
  2714. {
  2715. int cpu = raw_smp_processor_id();
  2716. s64 prev;
  2717. u64 now;
  2718. now = cpu_clock(cpu);
  2719. prev = atomic64_read(&counter->hw.prev_count);
  2720. atomic64_set(&counter->hw.prev_count, now);
  2721. atomic64_add(now - prev, &counter->count);
  2722. }
  2723. static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
  2724. {
  2725. struct hw_perf_counter *hwc = &counter->hw;
  2726. int cpu = raw_smp_processor_id();
  2727. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  2728. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2729. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2730. if (hwc->sample_period) {
  2731. u64 period = max_t(u64, 10000, hwc->sample_period);
  2732. __hrtimer_start_range_ns(&hwc->hrtimer,
  2733. ns_to_ktime(period), 0,
  2734. HRTIMER_MODE_REL, 0);
  2735. }
  2736. return 0;
  2737. }
  2738. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  2739. {
  2740. if (counter->hw.sample_period)
  2741. hrtimer_cancel(&counter->hw.hrtimer);
  2742. cpu_clock_perf_counter_update(counter);
  2743. }
  2744. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  2745. {
  2746. cpu_clock_perf_counter_update(counter);
  2747. }
  2748. static const struct pmu perf_ops_cpu_clock = {
  2749. .enable = cpu_clock_perf_counter_enable,
  2750. .disable = cpu_clock_perf_counter_disable,
  2751. .read = cpu_clock_perf_counter_read,
  2752. };
  2753. /*
  2754. * Software counter: task time clock
  2755. */
  2756. static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
  2757. {
  2758. u64 prev;
  2759. s64 delta;
  2760. prev = atomic64_xchg(&counter->hw.prev_count, now);
  2761. delta = now - prev;
  2762. atomic64_add(delta, &counter->count);
  2763. }
  2764. static int task_clock_perf_counter_enable(struct perf_counter *counter)
  2765. {
  2766. struct hw_perf_counter *hwc = &counter->hw;
  2767. u64 now;
  2768. now = counter->ctx->time;
  2769. atomic64_set(&hwc->prev_count, now);
  2770. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2771. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2772. if (hwc->sample_period) {
  2773. u64 period = max_t(u64, 10000, hwc->sample_period);
  2774. __hrtimer_start_range_ns(&hwc->hrtimer,
  2775. ns_to_ktime(period), 0,
  2776. HRTIMER_MODE_REL, 0);
  2777. }
  2778. return 0;
  2779. }
  2780. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  2781. {
  2782. if (counter->hw.sample_period)
  2783. hrtimer_cancel(&counter->hw.hrtimer);
  2784. task_clock_perf_counter_update(counter, counter->ctx->time);
  2785. }
  2786. static void task_clock_perf_counter_read(struct perf_counter *counter)
  2787. {
  2788. u64 time;
  2789. if (!in_nmi()) {
  2790. update_context_time(counter->ctx);
  2791. time = counter->ctx->time;
  2792. } else {
  2793. u64 now = perf_clock();
  2794. u64 delta = now - counter->ctx->timestamp;
  2795. time = counter->ctx->time + delta;
  2796. }
  2797. task_clock_perf_counter_update(counter, time);
  2798. }
  2799. static const struct pmu perf_ops_task_clock = {
  2800. .enable = task_clock_perf_counter_enable,
  2801. .disable = task_clock_perf_counter_disable,
  2802. .read = task_clock_perf_counter_read,
  2803. };
  2804. /*
  2805. * Software counter: cpu migrations
  2806. */
  2807. void perf_counter_task_migration(struct task_struct *task, int cpu)
  2808. {
  2809. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  2810. struct perf_counter_context *ctx;
  2811. perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
  2812. PERF_COUNT_SW_CPU_MIGRATIONS,
  2813. 1, 1, NULL, 0);
  2814. ctx = perf_pin_task_context(task);
  2815. if (ctx) {
  2816. perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
  2817. PERF_COUNT_SW_CPU_MIGRATIONS,
  2818. 1, 1, NULL, 0);
  2819. perf_unpin_context(ctx);
  2820. }
  2821. }
  2822. #ifdef CONFIG_EVENT_PROFILE
  2823. void perf_tpcounter_event(int event_id)
  2824. {
  2825. struct pt_regs *regs = get_irq_regs();
  2826. if (!regs)
  2827. regs = task_pt_regs(current);
  2828. __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
  2829. }
  2830. EXPORT_SYMBOL_GPL(perf_tpcounter_event);
  2831. extern int ftrace_profile_enable(int);
  2832. extern void ftrace_profile_disable(int);
  2833. static void tp_perf_counter_destroy(struct perf_counter *counter)
  2834. {
  2835. ftrace_profile_disable(perf_event_id(&counter->attr));
  2836. }
  2837. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2838. {
  2839. int event_id = perf_event_id(&counter->attr);
  2840. int ret;
  2841. ret = ftrace_profile_enable(event_id);
  2842. if (ret)
  2843. return NULL;
  2844. counter->destroy = tp_perf_counter_destroy;
  2845. return &perf_ops_generic;
  2846. }
  2847. #else
  2848. static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
  2849. {
  2850. return NULL;
  2851. }
  2852. #endif
  2853. static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  2854. {
  2855. const struct pmu *pmu = NULL;
  2856. /*
  2857. * Software counters (currently) can't in general distinguish
  2858. * between user, kernel and hypervisor events.
  2859. * However, context switches and cpu migrations are considered
  2860. * to be kernel events, and page faults are never hypervisor
  2861. * events.
  2862. */
  2863. switch (counter->attr.config) {
  2864. case PERF_COUNT_SW_CPU_CLOCK:
  2865. pmu = &perf_ops_cpu_clock;
  2866. break;
  2867. case PERF_COUNT_SW_TASK_CLOCK:
  2868. /*
  2869. * If the user instantiates this as a per-cpu counter,
  2870. * use the cpu_clock counter instead.
  2871. */
  2872. if (counter->ctx->task)
  2873. pmu = &perf_ops_task_clock;
  2874. else
  2875. pmu = &perf_ops_cpu_clock;
  2876. break;
  2877. case PERF_COUNT_SW_PAGE_FAULTS:
  2878. case PERF_COUNT_SW_PAGE_FAULTS_MIN:
  2879. case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
  2880. case PERF_COUNT_SW_CONTEXT_SWITCHES:
  2881. case PERF_COUNT_SW_CPU_MIGRATIONS:
  2882. pmu = &perf_ops_generic;
  2883. break;
  2884. }
  2885. return pmu;
  2886. }
  2887. /*
  2888. * Allocate and initialize a counter structure
  2889. */
  2890. static struct perf_counter *
  2891. perf_counter_alloc(struct perf_counter_attr *attr,
  2892. int cpu,
  2893. struct perf_counter_context *ctx,
  2894. struct perf_counter *group_leader,
  2895. gfp_t gfpflags)
  2896. {
  2897. const struct pmu *pmu;
  2898. struct perf_counter *counter;
  2899. struct hw_perf_counter *hwc;
  2900. long err;
  2901. counter = kzalloc(sizeof(*counter), gfpflags);
  2902. if (!counter)
  2903. return ERR_PTR(-ENOMEM);
  2904. /*
  2905. * Single counters are their own group leaders, with an
  2906. * empty sibling list:
  2907. */
  2908. if (!group_leader)
  2909. group_leader = counter;
  2910. mutex_init(&counter->child_mutex);
  2911. INIT_LIST_HEAD(&counter->child_list);
  2912. INIT_LIST_HEAD(&counter->list_entry);
  2913. INIT_LIST_HEAD(&counter->event_entry);
  2914. INIT_LIST_HEAD(&counter->sibling_list);
  2915. init_waitqueue_head(&counter->waitq);
  2916. mutex_init(&counter->mmap_mutex);
  2917. counter->cpu = cpu;
  2918. counter->attr = *attr;
  2919. counter->group_leader = group_leader;
  2920. counter->pmu = NULL;
  2921. counter->ctx = ctx;
  2922. counter->oncpu = -1;
  2923. counter->ns = get_pid_ns(current->nsproxy->pid_ns);
  2924. counter->id = atomic64_inc_return(&perf_counter_id);
  2925. counter->state = PERF_COUNTER_STATE_INACTIVE;
  2926. if (attr->disabled)
  2927. counter->state = PERF_COUNTER_STATE_OFF;
  2928. pmu = NULL;
  2929. hwc = &counter->hw;
  2930. hwc->sample_period = attr->sample_period;
  2931. if (attr->freq && attr->sample_freq)
  2932. hwc->sample_period = 1;
  2933. atomic64_set(&hwc->period_left, hwc->sample_period);
  2934. /*
  2935. * we currently do not support PERF_SAMPLE_GROUP on inherited counters
  2936. */
  2937. if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
  2938. goto done;
  2939. switch (attr->type) {
  2940. case PERF_TYPE_RAW:
  2941. case PERF_TYPE_HARDWARE:
  2942. case PERF_TYPE_HW_CACHE:
  2943. pmu = hw_perf_counter_init(counter);
  2944. break;
  2945. case PERF_TYPE_SOFTWARE:
  2946. pmu = sw_perf_counter_init(counter);
  2947. break;
  2948. case PERF_TYPE_TRACEPOINT:
  2949. pmu = tp_perf_counter_init(counter);
  2950. break;
  2951. }
  2952. done:
  2953. err = 0;
  2954. if (!pmu)
  2955. err = -EINVAL;
  2956. else if (IS_ERR(pmu))
  2957. err = PTR_ERR(pmu);
  2958. if (err) {
  2959. if (counter->ns)
  2960. put_pid_ns(counter->ns);
  2961. kfree(counter);
  2962. return ERR_PTR(err);
  2963. }
  2964. counter->pmu = pmu;
  2965. atomic_inc(&nr_counters);
  2966. if (counter->attr.mmap)
  2967. atomic_inc(&nr_mmap_counters);
  2968. if (counter->attr.comm)
  2969. atomic_inc(&nr_comm_counters);
  2970. return counter;
  2971. }
  2972. /**
  2973. * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  2974. *
  2975. * @attr_uptr: event type attributes for monitoring/sampling
  2976. * @pid: target pid
  2977. * @cpu: target cpu
  2978. * @group_fd: group leader counter fd
  2979. */
  2980. SYSCALL_DEFINE5(perf_counter_open,
  2981. const struct perf_counter_attr __user *, attr_uptr,
  2982. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  2983. {
  2984. struct perf_counter *counter, *group_leader;
  2985. struct perf_counter_attr attr;
  2986. struct perf_counter_context *ctx;
  2987. struct file *counter_file = NULL;
  2988. struct file *group_file = NULL;
  2989. int fput_needed = 0;
  2990. int fput_needed2 = 0;
  2991. int ret;
  2992. /* for future expandability... */
  2993. if (flags)
  2994. return -EINVAL;
  2995. if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
  2996. return -EFAULT;
  2997. if (!attr.exclude_kernel) {
  2998. if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
  2999. return -EACCES;
  3000. }
  3001. if (attr.freq) {
  3002. if (attr.sample_freq > sysctl_perf_counter_sample_rate)
  3003. return -EINVAL;
  3004. }
  3005. /*
  3006. * Get the target context (task or percpu):
  3007. */
  3008. ctx = find_get_context(pid, cpu);
  3009. if (IS_ERR(ctx))
  3010. return PTR_ERR(ctx);
  3011. /*
  3012. * Look up the group leader (we will attach this counter to it):
  3013. */
  3014. group_leader = NULL;
  3015. if (group_fd != -1) {
  3016. ret = -EINVAL;
  3017. group_file = fget_light(group_fd, &fput_needed);
  3018. if (!group_file)
  3019. goto err_put_context;
  3020. if (group_file->f_op != &perf_fops)
  3021. goto err_put_context;
  3022. group_leader = group_file->private_data;
  3023. /*
  3024. * Do not allow a recursive hierarchy (this new sibling
  3025. * becoming part of another group-sibling):
  3026. */
  3027. if (group_leader->group_leader != group_leader)
  3028. goto err_put_context;
  3029. /*
  3030. * Do not allow to attach to a group in a different
  3031. * task or CPU context:
  3032. */
  3033. if (group_leader->ctx != ctx)
  3034. goto err_put_context;
  3035. /*
  3036. * Only a group leader can be exclusive or pinned
  3037. */
  3038. if (attr.exclusive || attr.pinned)
  3039. goto err_put_context;
  3040. }
  3041. counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
  3042. GFP_KERNEL);
  3043. ret = PTR_ERR(counter);
  3044. if (IS_ERR(counter))
  3045. goto err_put_context;
  3046. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  3047. if (ret < 0)
  3048. goto err_free_put_context;
  3049. counter_file = fget_light(ret, &fput_needed2);
  3050. if (!counter_file)
  3051. goto err_free_put_context;
  3052. counter->filp = counter_file;
  3053. WARN_ON_ONCE(ctx->parent_ctx);
  3054. mutex_lock(&ctx->mutex);
  3055. perf_install_in_context(ctx, counter, cpu);
  3056. ++ctx->generation;
  3057. mutex_unlock(&ctx->mutex);
  3058. counter->owner = current;
  3059. get_task_struct(current);
  3060. mutex_lock(&current->perf_counter_mutex);
  3061. list_add_tail(&counter->owner_entry, &current->perf_counter_list);
  3062. mutex_unlock(&current->perf_counter_mutex);
  3063. fput_light(counter_file, fput_needed2);
  3064. out_fput:
  3065. fput_light(group_file, fput_needed);
  3066. return ret;
  3067. err_free_put_context:
  3068. kfree(counter);
  3069. err_put_context:
  3070. put_ctx(ctx);
  3071. goto out_fput;
  3072. }
  3073. /*
  3074. * inherit a counter from parent task to child task:
  3075. */
  3076. static struct perf_counter *
  3077. inherit_counter(struct perf_counter *parent_counter,
  3078. struct task_struct *parent,
  3079. struct perf_counter_context *parent_ctx,
  3080. struct task_struct *child,
  3081. struct perf_counter *group_leader,
  3082. struct perf_counter_context *child_ctx)
  3083. {
  3084. struct perf_counter *child_counter;
  3085. /*
  3086. * Instead of creating recursive hierarchies of counters,
  3087. * we link inherited counters back to the original parent,
  3088. * which has a filp for sure, which we use as the reference
  3089. * count:
  3090. */
  3091. if (parent_counter->parent)
  3092. parent_counter = parent_counter->parent;
  3093. child_counter = perf_counter_alloc(&parent_counter->attr,
  3094. parent_counter->cpu, child_ctx,
  3095. group_leader, GFP_KERNEL);
  3096. if (IS_ERR(child_counter))
  3097. return child_counter;
  3098. get_ctx(child_ctx);
  3099. /*
  3100. * Make the child state follow the state of the parent counter,
  3101. * not its attr.disabled bit. We hold the parent's mutex,
  3102. * so we won't race with perf_counter_{en, dis}able_family.
  3103. */
  3104. if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
  3105. child_counter->state = PERF_COUNTER_STATE_INACTIVE;
  3106. else
  3107. child_counter->state = PERF_COUNTER_STATE_OFF;
  3108. if (parent_counter->attr.freq)
  3109. child_counter->hw.sample_period = parent_counter->hw.sample_period;
  3110. /*
  3111. * Link it up in the child's context:
  3112. */
  3113. add_counter_to_ctx(child_counter, child_ctx);
  3114. child_counter->parent = parent_counter;
  3115. /*
  3116. * inherit into child's child as well:
  3117. */
  3118. child_counter->attr.inherit = 1;
  3119. /*
  3120. * Get a reference to the parent filp - we will fput it
  3121. * when the child counter exits. This is safe to do because
  3122. * we are in the parent and we know that the filp still
  3123. * exists and has a nonzero count:
  3124. */
  3125. atomic_long_inc(&parent_counter->filp->f_count);
  3126. /*
  3127. * Link this into the parent counter's child list
  3128. */
  3129. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3130. mutex_lock(&parent_counter->child_mutex);
  3131. list_add_tail(&child_counter->child_list, &parent_counter->child_list);
  3132. mutex_unlock(&parent_counter->child_mutex);
  3133. return child_counter;
  3134. }
  3135. static int inherit_group(struct perf_counter *parent_counter,
  3136. struct task_struct *parent,
  3137. struct perf_counter_context *parent_ctx,
  3138. struct task_struct *child,
  3139. struct perf_counter_context *child_ctx)
  3140. {
  3141. struct perf_counter *leader;
  3142. struct perf_counter *sub;
  3143. struct perf_counter *child_ctr;
  3144. leader = inherit_counter(parent_counter, parent, parent_ctx,
  3145. child, NULL, child_ctx);
  3146. if (IS_ERR(leader))
  3147. return PTR_ERR(leader);
  3148. list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
  3149. child_ctr = inherit_counter(sub, parent, parent_ctx,
  3150. child, leader, child_ctx);
  3151. if (IS_ERR(child_ctr))
  3152. return PTR_ERR(child_ctr);
  3153. }
  3154. return 0;
  3155. }
  3156. static void sync_child_counter(struct perf_counter *child_counter,
  3157. struct perf_counter *parent_counter)
  3158. {
  3159. u64 child_val;
  3160. child_val = atomic64_read(&child_counter->count);
  3161. /*
  3162. * Add back the child's count to the parent's count:
  3163. */
  3164. atomic64_add(child_val, &parent_counter->count);
  3165. atomic64_add(child_counter->total_time_enabled,
  3166. &parent_counter->child_total_time_enabled);
  3167. atomic64_add(child_counter->total_time_running,
  3168. &parent_counter->child_total_time_running);
  3169. /*
  3170. * Remove this counter from the parent's list
  3171. */
  3172. WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
  3173. mutex_lock(&parent_counter->child_mutex);
  3174. list_del_init(&child_counter->child_list);
  3175. mutex_unlock(&parent_counter->child_mutex);
  3176. /*
  3177. * Release the parent counter, if this was the last
  3178. * reference to it.
  3179. */
  3180. fput(parent_counter->filp);
  3181. }
  3182. static void
  3183. __perf_counter_exit_task(struct perf_counter *child_counter,
  3184. struct perf_counter_context *child_ctx)
  3185. {
  3186. struct perf_counter *parent_counter;
  3187. update_counter_times(child_counter);
  3188. perf_counter_remove_from_context(child_counter);
  3189. parent_counter = child_counter->parent;
  3190. /*
  3191. * It can happen that parent exits first, and has counters
  3192. * that are still around due to the child reference. These
  3193. * counters need to be zapped - but otherwise linger.
  3194. */
  3195. if (parent_counter) {
  3196. sync_child_counter(child_counter, parent_counter);
  3197. free_counter(child_counter);
  3198. }
  3199. }
  3200. /*
  3201. * When a child task exits, feed back counter values to parent counters.
  3202. */
  3203. void perf_counter_exit_task(struct task_struct *child)
  3204. {
  3205. struct perf_counter *child_counter, *tmp;
  3206. struct perf_counter_context *child_ctx;
  3207. unsigned long flags;
  3208. if (likely(!child->perf_counter_ctxp))
  3209. return;
  3210. local_irq_save(flags);
  3211. /*
  3212. * We can't reschedule here because interrupts are disabled,
  3213. * and either child is current or it is a task that can't be
  3214. * scheduled, so we are now safe from rescheduling changing
  3215. * our context.
  3216. */
  3217. child_ctx = child->perf_counter_ctxp;
  3218. __perf_counter_task_sched_out(child_ctx);
  3219. /*
  3220. * Take the context lock here so that if find_get_context is
  3221. * reading child->perf_counter_ctxp, we wait until it has
  3222. * incremented the context's refcount before we do put_ctx below.
  3223. */
  3224. spin_lock(&child_ctx->lock);
  3225. child->perf_counter_ctxp = NULL;
  3226. if (child_ctx->parent_ctx) {
  3227. /*
  3228. * This context is a clone; unclone it so it can't get
  3229. * swapped to another process while we're removing all
  3230. * the counters from it.
  3231. */
  3232. put_ctx(child_ctx->parent_ctx);
  3233. child_ctx->parent_ctx = NULL;
  3234. }
  3235. spin_unlock(&child_ctx->lock);
  3236. local_irq_restore(flags);
  3237. /*
  3238. * We can recurse on the same lock type through:
  3239. *
  3240. * __perf_counter_exit_task()
  3241. * sync_child_counter()
  3242. * fput(parent_counter->filp)
  3243. * perf_release()
  3244. * mutex_lock(&ctx->mutex)
  3245. *
  3246. * But since its the parent context it won't be the same instance.
  3247. */
  3248. mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
  3249. again:
  3250. list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
  3251. list_entry)
  3252. __perf_counter_exit_task(child_counter, child_ctx);
  3253. /*
  3254. * If the last counter was a group counter, it will have appended all
  3255. * its siblings to the list, but we obtained 'tmp' before that which
  3256. * will still point to the list head terminating the iteration.
  3257. */
  3258. if (!list_empty(&child_ctx->counter_list))
  3259. goto again;
  3260. mutex_unlock(&child_ctx->mutex);
  3261. put_ctx(child_ctx);
  3262. }
  3263. /*
  3264. * free an unexposed, unused context as created by inheritance by
  3265. * init_task below, used by fork() in case of fail.
  3266. */
  3267. void perf_counter_free_task(struct task_struct *task)
  3268. {
  3269. struct perf_counter_context *ctx = task->perf_counter_ctxp;
  3270. struct perf_counter *counter, *tmp;
  3271. if (!ctx)
  3272. return;
  3273. mutex_lock(&ctx->mutex);
  3274. again:
  3275. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
  3276. struct perf_counter *parent = counter->parent;
  3277. if (WARN_ON_ONCE(!parent))
  3278. continue;
  3279. mutex_lock(&parent->child_mutex);
  3280. list_del_init(&counter->child_list);
  3281. mutex_unlock(&parent->child_mutex);
  3282. fput(parent->filp);
  3283. list_del_counter(counter, ctx);
  3284. free_counter(counter);
  3285. }
  3286. if (!list_empty(&ctx->counter_list))
  3287. goto again;
  3288. mutex_unlock(&ctx->mutex);
  3289. put_ctx(ctx);
  3290. }
  3291. /*
  3292. * Initialize the perf_counter context in task_struct
  3293. */
  3294. int perf_counter_init_task(struct task_struct *child)
  3295. {
  3296. struct perf_counter_context *child_ctx, *parent_ctx;
  3297. struct perf_counter_context *cloned_ctx;
  3298. struct perf_counter *counter;
  3299. struct task_struct *parent = current;
  3300. int inherited_all = 1;
  3301. int ret = 0;
  3302. child->perf_counter_ctxp = NULL;
  3303. mutex_init(&child->perf_counter_mutex);
  3304. INIT_LIST_HEAD(&child->perf_counter_list);
  3305. if (likely(!parent->perf_counter_ctxp))
  3306. return 0;
  3307. /*
  3308. * This is executed from the parent task context, so inherit
  3309. * counters that have been marked for cloning.
  3310. * First allocate and initialize a context for the child.
  3311. */
  3312. child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
  3313. if (!child_ctx)
  3314. return -ENOMEM;
  3315. __perf_counter_init_context(child_ctx, child);
  3316. child->perf_counter_ctxp = child_ctx;
  3317. get_task_struct(child);
  3318. /*
  3319. * If the parent's context is a clone, pin it so it won't get
  3320. * swapped under us.
  3321. */
  3322. parent_ctx = perf_pin_task_context(parent);
  3323. /*
  3324. * No need to check if parent_ctx != NULL here; since we saw
  3325. * it non-NULL earlier, the only reason for it to become NULL
  3326. * is if we exit, and since we're currently in the middle of
  3327. * a fork we can't be exiting at the same time.
  3328. */
  3329. /*
  3330. * Lock the parent list. No need to lock the child - not PID
  3331. * hashed yet and not running, so nobody can access it.
  3332. */
  3333. mutex_lock(&parent_ctx->mutex);
  3334. /*
  3335. * We dont have to disable NMIs - we are only looking at
  3336. * the list, not manipulating it:
  3337. */
  3338. list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
  3339. if (counter != counter->group_leader)
  3340. continue;
  3341. if (!counter->attr.inherit) {
  3342. inherited_all = 0;
  3343. continue;
  3344. }
  3345. ret = inherit_group(counter, parent, parent_ctx,
  3346. child, child_ctx);
  3347. if (ret) {
  3348. inherited_all = 0;
  3349. break;
  3350. }
  3351. }
  3352. if (inherited_all) {
  3353. /*
  3354. * Mark the child context as a clone of the parent
  3355. * context, or of whatever the parent is a clone of.
  3356. * Note that if the parent is a clone, it could get
  3357. * uncloned at any point, but that doesn't matter
  3358. * because the list of counters and the generation
  3359. * count can't have changed since we took the mutex.
  3360. */
  3361. cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
  3362. if (cloned_ctx) {
  3363. child_ctx->parent_ctx = cloned_ctx;
  3364. child_ctx->parent_gen = parent_ctx->parent_gen;
  3365. } else {
  3366. child_ctx->parent_ctx = parent_ctx;
  3367. child_ctx->parent_gen = parent_ctx->generation;
  3368. }
  3369. get_ctx(child_ctx->parent_ctx);
  3370. }
  3371. mutex_unlock(&parent_ctx->mutex);
  3372. perf_unpin_context(parent_ctx);
  3373. return ret;
  3374. }
  3375. static void __cpuinit perf_counter_init_cpu(int cpu)
  3376. {
  3377. struct perf_cpu_context *cpuctx;
  3378. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3379. __perf_counter_init_context(&cpuctx->ctx, NULL);
  3380. spin_lock(&perf_resource_lock);
  3381. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  3382. spin_unlock(&perf_resource_lock);
  3383. hw_perf_counter_setup(cpu);
  3384. }
  3385. #ifdef CONFIG_HOTPLUG_CPU
  3386. static void __perf_counter_exit_cpu(void *info)
  3387. {
  3388. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  3389. struct perf_counter_context *ctx = &cpuctx->ctx;
  3390. struct perf_counter *counter, *tmp;
  3391. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  3392. __perf_counter_remove_from_context(counter);
  3393. }
  3394. static void perf_counter_exit_cpu(int cpu)
  3395. {
  3396. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  3397. struct perf_counter_context *ctx = &cpuctx->ctx;
  3398. mutex_lock(&ctx->mutex);
  3399. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  3400. mutex_unlock(&ctx->mutex);
  3401. }
  3402. #else
  3403. static inline void perf_counter_exit_cpu(int cpu) { }
  3404. #endif
  3405. static int __cpuinit
  3406. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  3407. {
  3408. unsigned int cpu = (long)hcpu;
  3409. switch (action) {
  3410. case CPU_UP_PREPARE:
  3411. case CPU_UP_PREPARE_FROZEN:
  3412. perf_counter_init_cpu(cpu);
  3413. break;
  3414. case CPU_DOWN_PREPARE:
  3415. case CPU_DOWN_PREPARE_FROZEN:
  3416. perf_counter_exit_cpu(cpu);
  3417. break;
  3418. default:
  3419. break;
  3420. }
  3421. return NOTIFY_OK;
  3422. }
  3423. /*
  3424. * This has to have a higher priority than migration_notifier in sched.c.
  3425. */
  3426. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  3427. .notifier_call = perf_cpu_notify,
  3428. .priority = 20,
  3429. };
  3430. void __init perf_counter_init(void)
  3431. {
  3432. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  3433. (void *)(long)smp_processor_id());
  3434. register_cpu_notifier(&perf_cpu_nb);
  3435. }
  3436. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  3437. {
  3438. return sprintf(buf, "%d\n", perf_reserved_percpu);
  3439. }
  3440. static ssize_t
  3441. perf_set_reserve_percpu(struct sysdev_class *class,
  3442. const char *buf,
  3443. size_t count)
  3444. {
  3445. struct perf_cpu_context *cpuctx;
  3446. unsigned long val;
  3447. int err, cpu, mpt;
  3448. err = strict_strtoul(buf, 10, &val);
  3449. if (err)
  3450. return err;
  3451. if (val > perf_max_counters)
  3452. return -EINVAL;
  3453. spin_lock(&perf_resource_lock);
  3454. perf_reserved_percpu = val;
  3455. for_each_online_cpu(cpu) {
  3456. cpuctx = &per_cpu(perf_cpu_context, cpu);
  3457. spin_lock_irq(&cpuctx->ctx.lock);
  3458. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  3459. perf_max_counters - perf_reserved_percpu);
  3460. cpuctx->max_pertask = mpt;
  3461. spin_unlock_irq(&cpuctx->ctx.lock);
  3462. }
  3463. spin_unlock(&perf_resource_lock);
  3464. return count;
  3465. }
  3466. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  3467. {
  3468. return sprintf(buf, "%d\n", perf_overcommit);
  3469. }
  3470. static ssize_t
  3471. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  3472. {
  3473. unsigned long val;
  3474. int err;
  3475. err = strict_strtoul(buf, 10, &val);
  3476. if (err)
  3477. return err;
  3478. if (val > 1)
  3479. return -EINVAL;
  3480. spin_lock(&perf_resource_lock);
  3481. perf_overcommit = val;
  3482. spin_unlock(&perf_resource_lock);
  3483. return count;
  3484. }
  3485. static SYSDEV_CLASS_ATTR(
  3486. reserve_percpu,
  3487. 0644,
  3488. perf_show_reserve_percpu,
  3489. perf_set_reserve_percpu
  3490. );
  3491. static SYSDEV_CLASS_ATTR(
  3492. overcommit,
  3493. 0644,
  3494. perf_show_overcommit,
  3495. perf_set_overcommit
  3496. );
  3497. static struct attribute *perfclass_attrs[] = {
  3498. &attr_reserve_percpu.attr,
  3499. &attr_overcommit.attr,
  3500. NULL
  3501. };
  3502. static struct attribute_group perfclass_attr_group = {
  3503. .attrs = perfclass_attrs,
  3504. .name = "perf_counters",
  3505. };
  3506. static int __init perf_counter_sysfs_init(void)
  3507. {
  3508. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  3509. &perfclass_attr_group);
  3510. }
  3511. device_initcall(perf_counter_sysfs_init);