perf_counter.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
  6. *
  7. *
  8. * For licensing details see kernel-base/COPYING
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/mm.h>
  12. #include <linux/cpu.h>
  13. #include <linux/smp.h>
  14. #include <linux/file.h>
  15. #include <linux/poll.h>
  16. #include <linux/sysfs.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/percpu.h>
  19. #include <linux/vmstat.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/rculist.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/syscalls.h>
  24. #include <linux/anon_inodes.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/perf_counter.h>
  27. #include <linux/dcache.h>
  28. #include <asm/irq_regs.h>
  29. /*
  30. * Each CPU has a list of per CPU counters:
  31. */
  32. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  33. int perf_max_counters __read_mostly = 1;
  34. static int perf_reserved_percpu __read_mostly;
  35. static int perf_overcommit __read_mostly = 1;
  36. static atomic_t nr_mmap_tracking __read_mostly;
  37. static atomic_t nr_munmap_tracking __read_mostly;
  38. static atomic_t nr_comm_tracking __read_mostly;
  39. int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
  40. /*
  41. * Mutex for (sysadmin-configurable) counter reservations:
  42. */
  43. static DEFINE_MUTEX(perf_resource_mutex);
  44. /*
  45. * Architecture provided APIs - weak aliases:
  46. */
  47. extern __weak const struct hw_perf_counter_ops *
  48. hw_perf_counter_init(struct perf_counter *counter)
  49. {
  50. return NULL;
  51. }
  52. u64 __weak hw_perf_save_disable(void) { return 0; }
  53. void __weak hw_perf_restore(u64 ctrl) { barrier(); }
  54. void __weak hw_perf_counter_setup(int cpu) { barrier(); }
  55. int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
  56. struct perf_cpu_context *cpuctx,
  57. struct perf_counter_context *ctx, int cpu)
  58. {
  59. return 0;
  60. }
  61. void __weak perf_counter_print_debug(void) { }
  62. static void
  63. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  64. {
  65. struct perf_counter *group_leader = counter->group_leader;
  66. /*
  67. * Depending on whether it is a standalone or sibling counter,
  68. * add it straight to the context's counter list, or to the group
  69. * leader's sibling list:
  70. */
  71. if (counter->group_leader == counter)
  72. list_add_tail(&counter->list_entry, &ctx->counter_list);
  73. else {
  74. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  75. group_leader->nr_siblings++;
  76. }
  77. list_add_rcu(&counter->event_entry, &ctx->event_list);
  78. }
  79. static void
  80. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  81. {
  82. struct perf_counter *sibling, *tmp;
  83. list_del_init(&counter->list_entry);
  84. list_del_rcu(&counter->event_entry);
  85. if (counter->group_leader != counter)
  86. counter->group_leader->nr_siblings--;
  87. /*
  88. * If this was a group counter with sibling counters then
  89. * upgrade the siblings to singleton counters by adding them
  90. * to the context list directly:
  91. */
  92. list_for_each_entry_safe(sibling, tmp,
  93. &counter->sibling_list, list_entry) {
  94. list_move_tail(&sibling->list_entry, &ctx->counter_list);
  95. sibling->group_leader = sibling;
  96. }
  97. }
  98. static void
  99. counter_sched_out(struct perf_counter *counter,
  100. struct perf_cpu_context *cpuctx,
  101. struct perf_counter_context *ctx)
  102. {
  103. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  104. return;
  105. counter->state = PERF_COUNTER_STATE_INACTIVE;
  106. counter->tstamp_stopped = ctx->time;
  107. counter->hw_ops->disable(counter);
  108. counter->oncpu = -1;
  109. if (!is_software_counter(counter))
  110. cpuctx->active_oncpu--;
  111. ctx->nr_active--;
  112. if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
  113. cpuctx->exclusive = 0;
  114. }
  115. static void
  116. group_sched_out(struct perf_counter *group_counter,
  117. struct perf_cpu_context *cpuctx,
  118. struct perf_counter_context *ctx)
  119. {
  120. struct perf_counter *counter;
  121. if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
  122. return;
  123. counter_sched_out(group_counter, cpuctx, ctx);
  124. /*
  125. * Schedule out siblings (if any):
  126. */
  127. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  128. counter_sched_out(counter, cpuctx, ctx);
  129. if (group_counter->hw_event.exclusive)
  130. cpuctx->exclusive = 0;
  131. }
  132. /*
  133. * Cross CPU call to remove a performance counter
  134. *
  135. * We disable the counter on the hardware level first. After that we
  136. * remove it from the context list.
  137. */
  138. static void __perf_counter_remove_from_context(void *info)
  139. {
  140. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  141. struct perf_counter *counter = info;
  142. struct perf_counter_context *ctx = counter->ctx;
  143. unsigned long flags;
  144. u64 perf_flags;
  145. /*
  146. * If this is a task context, we need to check whether it is
  147. * the current task context of this cpu. If not it has been
  148. * scheduled out before the smp call arrived.
  149. */
  150. if (ctx->task && cpuctx->task_ctx != ctx)
  151. return;
  152. spin_lock_irqsave(&ctx->lock, flags);
  153. counter_sched_out(counter, cpuctx, ctx);
  154. counter->task = NULL;
  155. ctx->nr_counters--;
  156. /*
  157. * Protect the list operation against NMI by disabling the
  158. * counters on a global level. NOP for non NMI based counters.
  159. */
  160. perf_flags = hw_perf_save_disable();
  161. list_del_counter(counter, ctx);
  162. hw_perf_restore(perf_flags);
  163. if (!ctx->task) {
  164. /*
  165. * Allow more per task counters with respect to the
  166. * reservation:
  167. */
  168. cpuctx->max_pertask =
  169. min(perf_max_counters - ctx->nr_counters,
  170. perf_max_counters - perf_reserved_percpu);
  171. }
  172. spin_unlock_irqrestore(&ctx->lock, flags);
  173. }
  174. /*
  175. * Remove the counter from a task's (or a CPU's) list of counters.
  176. *
  177. * Must be called with counter->mutex and ctx->mutex held.
  178. *
  179. * CPU counters are removed with a smp call. For task counters we only
  180. * call when the task is on a CPU.
  181. */
  182. static void perf_counter_remove_from_context(struct perf_counter *counter)
  183. {
  184. struct perf_counter_context *ctx = counter->ctx;
  185. struct task_struct *task = ctx->task;
  186. if (!task) {
  187. /*
  188. * Per cpu counters are removed via an smp call and
  189. * the removal is always sucessful.
  190. */
  191. smp_call_function_single(counter->cpu,
  192. __perf_counter_remove_from_context,
  193. counter, 1);
  194. return;
  195. }
  196. retry:
  197. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  198. counter);
  199. spin_lock_irq(&ctx->lock);
  200. /*
  201. * If the context is active we need to retry the smp call.
  202. */
  203. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  204. spin_unlock_irq(&ctx->lock);
  205. goto retry;
  206. }
  207. /*
  208. * The lock prevents that this context is scheduled in so we
  209. * can remove the counter safely, if the call above did not
  210. * succeed.
  211. */
  212. if (!list_empty(&counter->list_entry)) {
  213. ctx->nr_counters--;
  214. list_del_counter(counter, ctx);
  215. counter->task = NULL;
  216. }
  217. spin_unlock_irq(&ctx->lock);
  218. }
  219. static inline u64 perf_clock(void)
  220. {
  221. return cpu_clock(smp_processor_id());
  222. }
  223. /*
  224. * Update the record of the current time in a context.
  225. */
  226. static void update_context_time(struct perf_counter_context *ctx)
  227. {
  228. u64 now = perf_clock();
  229. ctx->time += now - ctx->timestamp;
  230. ctx->timestamp = now;
  231. }
  232. /*
  233. * Update the total_time_enabled and total_time_running fields for a counter.
  234. */
  235. static void update_counter_times(struct perf_counter *counter)
  236. {
  237. struct perf_counter_context *ctx = counter->ctx;
  238. u64 run_end;
  239. if (counter->state < PERF_COUNTER_STATE_INACTIVE)
  240. return;
  241. counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
  242. if (counter->state == PERF_COUNTER_STATE_INACTIVE)
  243. run_end = counter->tstamp_stopped;
  244. else
  245. run_end = ctx->time;
  246. counter->total_time_running = run_end - counter->tstamp_running;
  247. }
  248. /*
  249. * Update total_time_enabled and total_time_running for all counters in a group.
  250. */
  251. static void update_group_times(struct perf_counter *leader)
  252. {
  253. struct perf_counter *counter;
  254. update_counter_times(leader);
  255. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  256. update_counter_times(counter);
  257. }
  258. /*
  259. * Cross CPU call to disable a performance counter
  260. */
  261. static void __perf_counter_disable(void *info)
  262. {
  263. struct perf_counter *counter = info;
  264. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  265. struct perf_counter_context *ctx = counter->ctx;
  266. unsigned long flags;
  267. /*
  268. * If this is a per-task counter, need to check whether this
  269. * counter's task is the current task on this cpu.
  270. */
  271. if (ctx->task && cpuctx->task_ctx != ctx)
  272. return;
  273. spin_lock_irqsave(&ctx->lock, flags);
  274. /*
  275. * If the counter is on, turn it off.
  276. * If it is in error state, leave it in error state.
  277. */
  278. if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
  279. update_context_time(ctx);
  280. update_counter_times(counter);
  281. if (counter == counter->group_leader)
  282. group_sched_out(counter, cpuctx, ctx);
  283. else
  284. counter_sched_out(counter, cpuctx, ctx);
  285. counter->state = PERF_COUNTER_STATE_OFF;
  286. }
  287. spin_unlock_irqrestore(&ctx->lock, flags);
  288. }
  289. /*
  290. * Disable a counter.
  291. */
  292. static void perf_counter_disable(struct perf_counter *counter)
  293. {
  294. struct perf_counter_context *ctx = counter->ctx;
  295. struct task_struct *task = ctx->task;
  296. if (!task) {
  297. /*
  298. * Disable the counter on the cpu that it's on
  299. */
  300. smp_call_function_single(counter->cpu, __perf_counter_disable,
  301. counter, 1);
  302. return;
  303. }
  304. retry:
  305. task_oncpu_function_call(task, __perf_counter_disable, counter);
  306. spin_lock_irq(&ctx->lock);
  307. /*
  308. * If the counter is still active, we need to retry the cross-call.
  309. */
  310. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  311. spin_unlock_irq(&ctx->lock);
  312. goto retry;
  313. }
  314. /*
  315. * Since we have the lock this context can't be scheduled
  316. * in, so we can change the state safely.
  317. */
  318. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  319. update_counter_times(counter);
  320. counter->state = PERF_COUNTER_STATE_OFF;
  321. }
  322. spin_unlock_irq(&ctx->lock);
  323. }
  324. /*
  325. * Disable a counter and all its children.
  326. */
  327. static void perf_counter_disable_family(struct perf_counter *counter)
  328. {
  329. struct perf_counter *child;
  330. perf_counter_disable(counter);
  331. /*
  332. * Lock the mutex to protect the list of children
  333. */
  334. mutex_lock(&counter->mutex);
  335. list_for_each_entry(child, &counter->child_list, child_list)
  336. perf_counter_disable(child);
  337. mutex_unlock(&counter->mutex);
  338. }
  339. static int
  340. counter_sched_in(struct perf_counter *counter,
  341. struct perf_cpu_context *cpuctx,
  342. struct perf_counter_context *ctx,
  343. int cpu)
  344. {
  345. if (counter->state <= PERF_COUNTER_STATE_OFF)
  346. return 0;
  347. counter->state = PERF_COUNTER_STATE_ACTIVE;
  348. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  349. /*
  350. * The new state must be visible before we turn it on in the hardware:
  351. */
  352. smp_wmb();
  353. if (counter->hw_ops->enable(counter)) {
  354. counter->state = PERF_COUNTER_STATE_INACTIVE;
  355. counter->oncpu = -1;
  356. return -EAGAIN;
  357. }
  358. counter->tstamp_running += ctx->time - counter->tstamp_stopped;
  359. if (!is_software_counter(counter))
  360. cpuctx->active_oncpu++;
  361. ctx->nr_active++;
  362. if (counter->hw_event.exclusive)
  363. cpuctx->exclusive = 1;
  364. return 0;
  365. }
  366. /*
  367. * Return 1 for a group consisting entirely of software counters,
  368. * 0 if the group contains any hardware counters.
  369. */
  370. static int is_software_only_group(struct perf_counter *leader)
  371. {
  372. struct perf_counter *counter;
  373. if (!is_software_counter(leader))
  374. return 0;
  375. list_for_each_entry(counter, &leader->sibling_list, list_entry)
  376. if (!is_software_counter(counter))
  377. return 0;
  378. return 1;
  379. }
  380. /*
  381. * Work out whether we can put this counter group on the CPU now.
  382. */
  383. static int group_can_go_on(struct perf_counter *counter,
  384. struct perf_cpu_context *cpuctx,
  385. int can_add_hw)
  386. {
  387. /*
  388. * Groups consisting entirely of software counters can always go on.
  389. */
  390. if (is_software_only_group(counter))
  391. return 1;
  392. /*
  393. * If an exclusive group is already on, no other hardware
  394. * counters can go on.
  395. */
  396. if (cpuctx->exclusive)
  397. return 0;
  398. /*
  399. * If this group is exclusive and there are already
  400. * counters on the CPU, it can't go on.
  401. */
  402. if (counter->hw_event.exclusive && cpuctx->active_oncpu)
  403. return 0;
  404. /*
  405. * Otherwise, try to add it if all previous groups were able
  406. * to go on.
  407. */
  408. return can_add_hw;
  409. }
  410. static void add_counter_to_ctx(struct perf_counter *counter,
  411. struct perf_counter_context *ctx)
  412. {
  413. list_add_counter(counter, ctx);
  414. ctx->nr_counters++;
  415. counter->prev_state = PERF_COUNTER_STATE_OFF;
  416. counter->tstamp_enabled = ctx->time;
  417. counter->tstamp_running = ctx->time;
  418. counter->tstamp_stopped = ctx->time;
  419. }
  420. /*
  421. * Cross CPU call to install and enable a performance counter
  422. */
  423. static void __perf_install_in_context(void *info)
  424. {
  425. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  426. struct perf_counter *counter = info;
  427. struct perf_counter_context *ctx = counter->ctx;
  428. struct perf_counter *leader = counter->group_leader;
  429. int cpu = smp_processor_id();
  430. unsigned long flags;
  431. u64 perf_flags;
  432. int err;
  433. /*
  434. * If this is a task context, we need to check whether it is
  435. * the current task context of this cpu. If not it has been
  436. * scheduled out before the smp call arrived.
  437. */
  438. if (ctx->task && cpuctx->task_ctx != ctx)
  439. return;
  440. spin_lock_irqsave(&ctx->lock, flags);
  441. update_context_time(ctx);
  442. /*
  443. * Protect the list operation against NMI by disabling the
  444. * counters on a global level. NOP for non NMI based counters.
  445. */
  446. perf_flags = hw_perf_save_disable();
  447. add_counter_to_ctx(counter, ctx);
  448. /*
  449. * Don't put the counter on if it is disabled or if
  450. * it is in a group and the group isn't on.
  451. */
  452. if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
  453. (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
  454. goto unlock;
  455. /*
  456. * An exclusive counter can't go on if there are already active
  457. * hardware counters, and no hardware counter can go on if there
  458. * is already an exclusive counter on.
  459. */
  460. if (!group_can_go_on(counter, cpuctx, 1))
  461. err = -EEXIST;
  462. else
  463. err = counter_sched_in(counter, cpuctx, ctx, cpu);
  464. if (err) {
  465. /*
  466. * This counter couldn't go on. If it is in a group
  467. * then we have to pull the whole group off.
  468. * If the counter group is pinned then put it in error state.
  469. */
  470. if (leader != counter)
  471. group_sched_out(leader, cpuctx, ctx);
  472. if (leader->hw_event.pinned) {
  473. update_group_times(leader);
  474. leader->state = PERF_COUNTER_STATE_ERROR;
  475. }
  476. }
  477. if (!err && !ctx->task && cpuctx->max_pertask)
  478. cpuctx->max_pertask--;
  479. unlock:
  480. hw_perf_restore(perf_flags);
  481. spin_unlock_irqrestore(&ctx->lock, flags);
  482. }
  483. /*
  484. * Attach a performance counter to a context
  485. *
  486. * First we add the counter to the list with the hardware enable bit
  487. * in counter->hw_config cleared.
  488. *
  489. * If the counter is attached to a task which is on a CPU we use a smp
  490. * call to enable it in the task context. The task might have been
  491. * scheduled away, but we check this in the smp call again.
  492. *
  493. * Must be called with ctx->mutex held.
  494. */
  495. static void
  496. perf_install_in_context(struct perf_counter_context *ctx,
  497. struct perf_counter *counter,
  498. int cpu)
  499. {
  500. struct task_struct *task = ctx->task;
  501. if (!task) {
  502. /*
  503. * Per cpu counters are installed via an smp call and
  504. * the install is always sucessful.
  505. */
  506. smp_call_function_single(cpu, __perf_install_in_context,
  507. counter, 1);
  508. return;
  509. }
  510. counter->task = task;
  511. retry:
  512. task_oncpu_function_call(task, __perf_install_in_context,
  513. counter);
  514. spin_lock_irq(&ctx->lock);
  515. /*
  516. * we need to retry the smp call.
  517. */
  518. if (ctx->is_active && list_empty(&counter->list_entry)) {
  519. spin_unlock_irq(&ctx->lock);
  520. goto retry;
  521. }
  522. /*
  523. * The lock prevents that this context is scheduled in so we
  524. * can add the counter safely, if it the call above did not
  525. * succeed.
  526. */
  527. if (list_empty(&counter->list_entry))
  528. add_counter_to_ctx(counter, ctx);
  529. spin_unlock_irq(&ctx->lock);
  530. }
  531. /*
  532. * Cross CPU call to enable a performance counter
  533. */
  534. static void __perf_counter_enable(void *info)
  535. {
  536. struct perf_counter *counter = info;
  537. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  538. struct perf_counter_context *ctx = counter->ctx;
  539. struct perf_counter *leader = counter->group_leader;
  540. unsigned long flags;
  541. int err;
  542. /*
  543. * If this is a per-task counter, need to check whether this
  544. * counter's task is the current task on this cpu.
  545. */
  546. if (ctx->task && cpuctx->task_ctx != ctx)
  547. return;
  548. spin_lock_irqsave(&ctx->lock, flags);
  549. update_context_time(ctx);
  550. counter->prev_state = counter->state;
  551. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  552. goto unlock;
  553. counter->state = PERF_COUNTER_STATE_INACTIVE;
  554. counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
  555. /*
  556. * If the counter is in a group and isn't the group leader,
  557. * then don't put it on unless the group is on.
  558. */
  559. if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
  560. goto unlock;
  561. if (!group_can_go_on(counter, cpuctx, 1))
  562. err = -EEXIST;
  563. else
  564. err = counter_sched_in(counter, cpuctx, ctx,
  565. smp_processor_id());
  566. if (err) {
  567. /*
  568. * If this counter can't go on and it's part of a
  569. * group, then the whole group has to come off.
  570. */
  571. if (leader != counter)
  572. group_sched_out(leader, cpuctx, ctx);
  573. if (leader->hw_event.pinned) {
  574. update_group_times(leader);
  575. leader->state = PERF_COUNTER_STATE_ERROR;
  576. }
  577. }
  578. unlock:
  579. spin_unlock_irqrestore(&ctx->lock, flags);
  580. }
  581. /*
  582. * Enable a counter.
  583. */
  584. static void perf_counter_enable(struct perf_counter *counter)
  585. {
  586. struct perf_counter_context *ctx = counter->ctx;
  587. struct task_struct *task = ctx->task;
  588. if (!task) {
  589. /*
  590. * Enable the counter on the cpu that it's on
  591. */
  592. smp_call_function_single(counter->cpu, __perf_counter_enable,
  593. counter, 1);
  594. return;
  595. }
  596. spin_lock_irq(&ctx->lock);
  597. if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
  598. goto out;
  599. /*
  600. * If the counter is in error state, clear that first.
  601. * That way, if we see the counter in error state below, we
  602. * know that it has gone back into error state, as distinct
  603. * from the task having been scheduled away before the
  604. * cross-call arrived.
  605. */
  606. if (counter->state == PERF_COUNTER_STATE_ERROR)
  607. counter->state = PERF_COUNTER_STATE_OFF;
  608. retry:
  609. spin_unlock_irq(&ctx->lock);
  610. task_oncpu_function_call(task, __perf_counter_enable, counter);
  611. spin_lock_irq(&ctx->lock);
  612. /*
  613. * If the context is active and the counter is still off,
  614. * we need to retry the cross-call.
  615. */
  616. if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
  617. goto retry;
  618. /*
  619. * Since we have the lock this context can't be scheduled
  620. * in, so we can change the state safely.
  621. */
  622. if (counter->state == PERF_COUNTER_STATE_OFF) {
  623. counter->state = PERF_COUNTER_STATE_INACTIVE;
  624. counter->tstamp_enabled =
  625. ctx->time - counter->total_time_enabled;
  626. }
  627. out:
  628. spin_unlock_irq(&ctx->lock);
  629. }
  630. static void perf_counter_refresh(struct perf_counter *counter, int refresh)
  631. {
  632. atomic_add(refresh, &counter->event_limit);
  633. perf_counter_enable(counter);
  634. }
  635. /*
  636. * Enable a counter and all its children.
  637. */
  638. static void perf_counter_enable_family(struct perf_counter *counter)
  639. {
  640. struct perf_counter *child;
  641. perf_counter_enable(counter);
  642. /*
  643. * Lock the mutex to protect the list of children
  644. */
  645. mutex_lock(&counter->mutex);
  646. list_for_each_entry(child, &counter->child_list, child_list)
  647. perf_counter_enable(child);
  648. mutex_unlock(&counter->mutex);
  649. }
  650. void __perf_counter_sched_out(struct perf_counter_context *ctx,
  651. struct perf_cpu_context *cpuctx)
  652. {
  653. struct perf_counter *counter;
  654. u64 flags;
  655. spin_lock(&ctx->lock);
  656. ctx->is_active = 0;
  657. if (likely(!ctx->nr_counters))
  658. goto out;
  659. update_context_time(ctx);
  660. flags = hw_perf_save_disable();
  661. if (ctx->nr_active) {
  662. list_for_each_entry(counter, &ctx->counter_list, list_entry)
  663. group_sched_out(counter, cpuctx, ctx);
  664. }
  665. hw_perf_restore(flags);
  666. out:
  667. spin_unlock(&ctx->lock);
  668. }
  669. /*
  670. * Called from scheduler to remove the counters of the current task,
  671. * with interrupts disabled.
  672. *
  673. * We stop each counter and update the counter value in counter->count.
  674. *
  675. * This does not protect us against NMI, but disable()
  676. * sets the disabled bit in the control field of counter _before_
  677. * accessing the counter control register. If a NMI hits, then it will
  678. * not restart the counter.
  679. */
  680. void perf_counter_task_sched_out(struct task_struct *task, int cpu)
  681. {
  682. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  683. struct perf_counter_context *ctx = &task->perf_counter_ctx;
  684. struct pt_regs *regs;
  685. if (likely(!cpuctx->task_ctx))
  686. return;
  687. update_context_time(ctx);
  688. regs = task_pt_regs(task);
  689. perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
  690. __perf_counter_sched_out(ctx, cpuctx);
  691. cpuctx->task_ctx = NULL;
  692. }
  693. static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
  694. {
  695. __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
  696. }
  697. static int
  698. group_sched_in(struct perf_counter *group_counter,
  699. struct perf_cpu_context *cpuctx,
  700. struct perf_counter_context *ctx,
  701. int cpu)
  702. {
  703. struct perf_counter *counter, *partial_group;
  704. int ret;
  705. if (group_counter->state == PERF_COUNTER_STATE_OFF)
  706. return 0;
  707. ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
  708. if (ret)
  709. return ret < 0 ? ret : 0;
  710. group_counter->prev_state = group_counter->state;
  711. if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
  712. return -EAGAIN;
  713. /*
  714. * Schedule in siblings as one group (if any):
  715. */
  716. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  717. counter->prev_state = counter->state;
  718. if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
  719. partial_group = counter;
  720. goto group_error;
  721. }
  722. }
  723. return 0;
  724. group_error:
  725. /*
  726. * Groups can be scheduled in as one unit only, so undo any
  727. * partial group before returning:
  728. */
  729. list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
  730. if (counter == partial_group)
  731. break;
  732. counter_sched_out(counter, cpuctx, ctx);
  733. }
  734. counter_sched_out(group_counter, cpuctx, ctx);
  735. return -EAGAIN;
  736. }
  737. static void
  738. __perf_counter_sched_in(struct perf_counter_context *ctx,
  739. struct perf_cpu_context *cpuctx, int cpu)
  740. {
  741. struct perf_counter *counter;
  742. u64 flags;
  743. int can_add_hw = 1;
  744. spin_lock(&ctx->lock);
  745. ctx->is_active = 1;
  746. if (likely(!ctx->nr_counters))
  747. goto out;
  748. ctx->timestamp = perf_clock();
  749. flags = hw_perf_save_disable();
  750. /*
  751. * First go through the list and put on any pinned groups
  752. * in order to give them the best chance of going on.
  753. */
  754. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  755. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  756. !counter->hw_event.pinned)
  757. continue;
  758. if (counter->cpu != -1 && counter->cpu != cpu)
  759. continue;
  760. if (group_can_go_on(counter, cpuctx, 1))
  761. group_sched_in(counter, cpuctx, ctx, cpu);
  762. /*
  763. * If this pinned group hasn't been scheduled,
  764. * put it in error state.
  765. */
  766. if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  767. update_group_times(counter);
  768. counter->state = PERF_COUNTER_STATE_ERROR;
  769. }
  770. }
  771. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  772. /*
  773. * Ignore counters in OFF or ERROR state, and
  774. * ignore pinned counters since we did them already.
  775. */
  776. if (counter->state <= PERF_COUNTER_STATE_OFF ||
  777. counter->hw_event.pinned)
  778. continue;
  779. /*
  780. * Listen to the 'cpu' scheduling filter constraint
  781. * of counters:
  782. */
  783. if (counter->cpu != -1 && counter->cpu != cpu)
  784. continue;
  785. if (group_can_go_on(counter, cpuctx, can_add_hw)) {
  786. if (group_sched_in(counter, cpuctx, ctx, cpu))
  787. can_add_hw = 0;
  788. }
  789. }
  790. hw_perf_restore(flags);
  791. out:
  792. spin_unlock(&ctx->lock);
  793. }
  794. /*
  795. * Called from scheduler to add the counters of the current task
  796. * with interrupts disabled.
  797. *
  798. * We restore the counter value and then enable it.
  799. *
  800. * This does not protect us against NMI, but enable()
  801. * sets the enabled bit in the control field of counter _before_
  802. * accessing the counter control register. If a NMI hits, then it will
  803. * keep the counter running.
  804. */
  805. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  806. {
  807. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  808. struct perf_counter_context *ctx = &task->perf_counter_ctx;
  809. __perf_counter_sched_in(ctx, cpuctx, cpu);
  810. cpuctx->task_ctx = ctx;
  811. }
  812. static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
  813. {
  814. struct perf_counter_context *ctx = &cpuctx->ctx;
  815. __perf_counter_sched_in(ctx, cpuctx, cpu);
  816. }
  817. int perf_counter_task_disable(void)
  818. {
  819. struct task_struct *curr = current;
  820. struct perf_counter_context *ctx = &curr->perf_counter_ctx;
  821. struct perf_counter *counter;
  822. unsigned long flags;
  823. u64 perf_flags;
  824. int cpu;
  825. if (likely(!ctx->nr_counters))
  826. return 0;
  827. local_irq_save(flags);
  828. cpu = smp_processor_id();
  829. perf_counter_task_sched_out(curr, cpu);
  830. spin_lock(&ctx->lock);
  831. /*
  832. * Disable all the counters:
  833. */
  834. perf_flags = hw_perf_save_disable();
  835. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  836. if (counter->state != PERF_COUNTER_STATE_ERROR) {
  837. update_group_times(counter);
  838. counter->state = PERF_COUNTER_STATE_OFF;
  839. }
  840. }
  841. hw_perf_restore(perf_flags);
  842. spin_unlock_irqrestore(&ctx->lock, flags);
  843. return 0;
  844. }
  845. int perf_counter_task_enable(void)
  846. {
  847. struct task_struct *curr = current;
  848. struct perf_counter_context *ctx = &curr->perf_counter_ctx;
  849. struct perf_counter *counter;
  850. unsigned long flags;
  851. u64 perf_flags;
  852. int cpu;
  853. if (likely(!ctx->nr_counters))
  854. return 0;
  855. local_irq_save(flags);
  856. cpu = smp_processor_id();
  857. perf_counter_task_sched_out(curr, cpu);
  858. spin_lock(&ctx->lock);
  859. /*
  860. * Disable all the counters:
  861. */
  862. perf_flags = hw_perf_save_disable();
  863. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  864. if (counter->state > PERF_COUNTER_STATE_OFF)
  865. continue;
  866. counter->state = PERF_COUNTER_STATE_INACTIVE;
  867. counter->tstamp_enabled =
  868. ctx->time - counter->total_time_enabled;
  869. counter->hw_event.disabled = 0;
  870. }
  871. hw_perf_restore(perf_flags);
  872. spin_unlock(&ctx->lock);
  873. perf_counter_task_sched_in(curr, cpu);
  874. local_irq_restore(flags);
  875. return 0;
  876. }
  877. /*
  878. * Round-robin a context's counters:
  879. */
  880. static void rotate_ctx(struct perf_counter_context *ctx)
  881. {
  882. struct perf_counter *counter;
  883. u64 perf_flags;
  884. if (!ctx->nr_counters)
  885. return;
  886. spin_lock(&ctx->lock);
  887. /*
  888. * Rotate the first entry last (works just fine for group counters too):
  889. */
  890. perf_flags = hw_perf_save_disable();
  891. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  892. list_move_tail(&counter->list_entry, &ctx->counter_list);
  893. break;
  894. }
  895. hw_perf_restore(perf_flags);
  896. spin_unlock(&ctx->lock);
  897. }
  898. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  899. {
  900. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  901. struct perf_counter_context *ctx = &curr->perf_counter_ctx;
  902. const int rotate_percpu = 0;
  903. if (rotate_percpu)
  904. perf_counter_cpu_sched_out(cpuctx);
  905. perf_counter_task_sched_out(curr, cpu);
  906. if (rotate_percpu)
  907. rotate_ctx(&cpuctx->ctx);
  908. rotate_ctx(ctx);
  909. if (rotate_percpu)
  910. perf_counter_cpu_sched_in(cpuctx, cpu);
  911. perf_counter_task_sched_in(curr, cpu);
  912. }
  913. /*
  914. * Cross CPU call to read the hardware counter
  915. */
  916. static void __read(void *info)
  917. {
  918. struct perf_counter *counter = info;
  919. struct perf_counter_context *ctx = counter->ctx;
  920. unsigned long flags;
  921. local_irq_save(flags);
  922. if (ctx->is_active)
  923. update_context_time(ctx);
  924. counter->hw_ops->read(counter);
  925. update_counter_times(counter);
  926. local_irq_restore(flags);
  927. }
  928. static u64 perf_counter_read(struct perf_counter *counter)
  929. {
  930. /*
  931. * If counter is enabled and currently active on a CPU, update the
  932. * value in the counter structure:
  933. */
  934. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  935. smp_call_function_single(counter->oncpu,
  936. __read, counter, 1);
  937. } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
  938. update_counter_times(counter);
  939. }
  940. return atomic64_read(&counter->count);
  941. }
  942. static void put_context(struct perf_counter_context *ctx)
  943. {
  944. if (ctx->task)
  945. put_task_struct(ctx->task);
  946. }
  947. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  948. {
  949. struct perf_cpu_context *cpuctx;
  950. struct perf_counter_context *ctx;
  951. struct task_struct *task;
  952. /*
  953. * If cpu is not a wildcard then this is a percpu counter:
  954. */
  955. if (cpu != -1) {
  956. /* Must be root to operate on a CPU counter: */
  957. if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
  958. return ERR_PTR(-EACCES);
  959. if (cpu < 0 || cpu > num_possible_cpus())
  960. return ERR_PTR(-EINVAL);
  961. /*
  962. * We could be clever and allow to attach a counter to an
  963. * offline CPU and activate it when the CPU comes up, but
  964. * that's for later.
  965. */
  966. if (!cpu_isset(cpu, cpu_online_map))
  967. return ERR_PTR(-ENODEV);
  968. cpuctx = &per_cpu(perf_cpu_context, cpu);
  969. ctx = &cpuctx->ctx;
  970. return ctx;
  971. }
  972. rcu_read_lock();
  973. if (!pid)
  974. task = current;
  975. else
  976. task = find_task_by_vpid(pid);
  977. if (task)
  978. get_task_struct(task);
  979. rcu_read_unlock();
  980. if (!task)
  981. return ERR_PTR(-ESRCH);
  982. ctx = &task->perf_counter_ctx;
  983. ctx->task = task;
  984. /* Reuse ptrace permission checks for now. */
  985. if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
  986. put_context(ctx);
  987. return ERR_PTR(-EACCES);
  988. }
  989. return ctx;
  990. }
  991. static void free_counter_rcu(struct rcu_head *head)
  992. {
  993. struct perf_counter *counter;
  994. counter = container_of(head, struct perf_counter, rcu_head);
  995. kfree(counter);
  996. }
  997. static void perf_pending_sync(struct perf_counter *counter);
  998. static void free_counter(struct perf_counter *counter)
  999. {
  1000. perf_pending_sync(counter);
  1001. if (counter->hw_event.mmap)
  1002. atomic_dec(&nr_mmap_tracking);
  1003. if (counter->hw_event.munmap)
  1004. atomic_dec(&nr_munmap_tracking);
  1005. if (counter->hw_event.comm)
  1006. atomic_dec(&nr_comm_tracking);
  1007. if (counter->destroy)
  1008. counter->destroy(counter);
  1009. call_rcu(&counter->rcu_head, free_counter_rcu);
  1010. }
  1011. /*
  1012. * Called when the last reference to the file is gone.
  1013. */
  1014. static int perf_release(struct inode *inode, struct file *file)
  1015. {
  1016. struct perf_counter *counter = file->private_data;
  1017. struct perf_counter_context *ctx = counter->ctx;
  1018. file->private_data = NULL;
  1019. mutex_lock(&ctx->mutex);
  1020. mutex_lock(&counter->mutex);
  1021. perf_counter_remove_from_context(counter);
  1022. mutex_unlock(&counter->mutex);
  1023. mutex_unlock(&ctx->mutex);
  1024. free_counter(counter);
  1025. put_context(ctx);
  1026. return 0;
  1027. }
  1028. /*
  1029. * Read the performance counter - simple non blocking version for now
  1030. */
  1031. static ssize_t
  1032. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  1033. {
  1034. u64 values[3];
  1035. int n;
  1036. /*
  1037. * Return end-of-file for a read on a counter that is in
  1038. * error state (i.e. because it was pinned but it couldn't be
  1039. * scheduled on to the CPU at some point).
  1040. */
  1041. if (counter->state == PERF_COUNTER_STATE_ERROR)
  1042. return 0;
  1043. mutex_lock(&counter->mutex);
  1044. values[0] = perf_counter_read(counter);
  1045. n = 1;
  1046. if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1047. values[n++] = counter->total_time_enabled +
  1048. atomic64_read(&counter->child_total_time_enabled);
  1049. if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1050. values[n++] = counter->total_time_running +
  1051. atomic64_read(&counter->child_total_time_running);
  1052. mutex_unlock(&counter->mutex);
  1053. if (count < n * sizeof(u64))
  1054. return -EINVAL;
  1055. count = n * sizeof(u64);
  1056. if (copy_to_user(buf, values, count))
  1057. return -EFAULT;
  1058. return count;
  1059. }
  1060. static ssize_t
  1061. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  1062. {
  1063. struct perf_counter *counter = file->private_data;
  1064. return perf_read_hw(counter, buf, count);
  1065. }
  1066. static unsigned int perf_poll(struct file *file, poll_table *wait)
  1067. {
  1068. struct perf_counter *counter = file->private_data;
  1069. struct perf_mmap_data *data;
  1070. unsigned int events;
  1071. rcu_read_lock();
  1072. data = rcu_dereference(counter->data);
  1073. if (data)
  1074. events = atomic_xchg(&data->wakeup, 0);
  1075. else
  1076. events = POLL_HUP;
  1077. rcu_read_unlock();
  1078. poll_wait(file, &counter->waitq, wait);
  1079. return events;
  1080. }
  1081. static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1082. {
  1083. struct perf_counter *counter = file->private_data;
  1084. int err = 0;
  1085. switch (cmd) {
  1086. case PERF_COUNTER_IOC_ENABLE:
  1087. perf_counter_enable_family(counter);
  1088. break;
  1089. case PERF_COUNTER_IOC_DISABLE:
  1090. perf_counter_disable_family(counter);
  1091. break;
  1092. case PERF_COUNTER_IOC_REFRESH:
  1093. perf_counter_refresh(counter, arg);
  1094. break;
  1095. default:
  1096. err = -ENOTTY;
  1097. }
  1098. return err;
  1099. }
  1100. /*
  1101. * Callers need to ensure there can be no nesting of this function, otherwise
  1102. * the seqlock logic goes bad. We can not serialize this because the arch
  1103. * code calls this from NMI context.
  1104. */
  1105. void perf_counter_update_userpage(struct perf_counter *counter)
  1106. {
  1107. struct perf_mmap_data *data;
  1108. struct perf_counter_mmap_page *userpg;
  1109. rcu_read_lock();
  1110. data = rcu_dereference(counter->data);
  1111. if (!data)
  1112. goto unlock;
  1113. userpg = data->user_page;
  1114. /*
  1115. * Disable preemption so as to not let the corresponding user-space
  1116. * spin too long if we get preempted.
  1117. */
  1118. preempt_disable();
  1119. ++userpg->lock;
  1120. barrier();
  1121. userpg->index = counter->hw.idx;
  1122. userpg->offset = atomic64_read(&counter->count);
  1123. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  1124. userpg->offset -= atomic64_read(&counter->hw.prev_count);
  1125. barrier();
  1126. ++userpg->lock;
  1127. preempt_enable();
  1128. unlock:
  1129. rcu_read_unlock();
  1130. }
  1131. static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1132. {
  1133. struct perf_counter *counter = vma->vm_file->private_data;
  1134. struct perf_mmap_data *data;
  1135. int ret = VM_FAULT_SIGBUS;
  1136. rcu_read_lock();
  1137. data = rcu_dereference(counter->data);
  1138. if (!data)
  1139. goto unlock;
  1140. if (vmf->pgoff == 0) {
  1141. vmf->page = virt_to_page(data->user_page);
  1142. } else {
  1143. int nr = vmf->pgoff - 1;
  1144. if ((unsigned)nr > data->nr_pages)
  1145. goto unlock;
  1146. vmf->page = virt_to_page(data->data_pages[nr]);
  1147. }
  1148. get_page(vmf->page);
  1149. ret = 0;
  1150. unlock:
  1151. rcu_read_unlock();
  1152. return ret;
  1153. }
  1154. static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
  1155. {
  1156. struct perf_mmap_data *data;
  1157. unsigned long size;
  1158. int i;
  1159. WARN_ON(atomic_read(&counter->mmap_count));
  1160. size = sizeof(struct perf_mmap_data);
  1161. size += nr_pages * sizeof(void *);
  1162. data = kzalloc(size, GFP_KERNEL);
  1163. if (!data)
  1164. goto fail;
  1165. data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
  1166. if (!data->user_page)
  1167. goto fail_user_page;
  1168. for (i = 0; i < nr_pages; i++) {
  1169. data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  1170. if (!data->data_pages[i])
  1171. goto fail_data_pages;
  1172. }
  1173. data->nr_pages = nr_pages;
  1174. rcu_assign_pointer(counter->data, data);
  1175. return 0;
  1176. fail_data_pages:
  1177. for (i--; i >= 0; i--)
  1178. free_page((unsigned long)data->data_pages[i]);
  1179. free_page((unsigned long)data->user_page);
  1180. fail_user_page:
  1181. kfree(data);
  1182. fail:
  1183. return -ENOMEM;
  1184. }
  1185. static void __perf_mmap_data_free(struct rcu_head *rcu_head)
  1186. {
  1187. struct perf_mmap_data *data = container_of(rcu_head,
  1188. struct perf_mmap_data, rcu_head);
  1189. int i;
  1190. free_page((unsigned long)data->user_page);
  1191. for (i = 0; i < data->nr_pages; i++)
  1192. free_page((unsigned long)data->data_pages[i]);
  1193. kfree(data);
  1194. }
  1195. static void perf_mmap_data_free(struct perf_counter *counter)
  1196. {
  1197. struct perf_mmap_data *data = counter->data;
  1198. WARN_ON(atomic_read(&counter->mmap_count));
  1199. rcu_assign_pointer(counter->data, NULL);
  1200. call_rcu(&data->rcu_head, __perf_mmap_data_free);
  1201. }
  1202. static void perf_mmap_open(struct vm_area_struct *vma)
  1203. {
  1204. struct perf_counter *counter = vma->vm_file->private_data;
  1205. atomic_inc(&counter->mmap_count);
  1206. }
  1207. static void perf_mmap_close(struct vm_area_struct *vma)
  1208. {
  1209. struct perf_counter *counter = vma->vm_file->private_data;
  1210. if (atomic_dec_and_mutex_lock(&counter->mmap_count,
  1211. &counter->mmap_mutex)) {
  1212. vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
  1213. perf_mmap_data_free(counter);
  1214. mutex_unlock(&counter->mmap_mutex);
  1215. }
  1216. }
  1217. static struct vm_operations_struct perf_mmap_vmops = {
  1218. .open = perf_mmap_open,
  1219. .close = perf_mmap_close,
  1220. .fault = perf_mmap_fault,
  1221. };
  1222. static int perf_mmap(struct file *file, struct vm_area_struct *vma)
  1223. {
  1224. struct perf_counter *counter = file->private_data;
  1225. unsigned long vma_size;
  1226. unsigned long nr_pages;
  1227. unsigned long locked, lock_limit;
  1228. int ret = 0;
  1229. if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
  1230. return -EINVAL;
  1231. vma_size = vma->vm_end - vma->vm_start;
  1232. nr_pages = (vma_size / PAGE_SIZE) - 1;
  1233. /*
  1234. * If we have data pages ensure they're a power-of-two number, so we
  1235. * can do bitmasks instead of modulo.
  1236. */
  1237. if (nr_pages != 0 && !is_power_of_2(nr_pages))
  1238. return -EINVAL;
  1239. if (vma_size != PAGE_SIZE * (1 + nr_pages))
  1240. return -EINVAL;
  1241. if (vma->vm_pgoff != 0)
  1242. return -EINVAL;
  1243. mutex_lock(&counter->mmap_mutex);
  1244. if (atomic_inc_not_zero(&counter->mmap_count)) {
  1245. if (nr_pages != counter->data->nr_pages)
  1246. ret = -EINVAL;
  1247. goto unlock;
  1248. }
  1249. locked = vma->vm_mm->locked_vm;
  1250. locked += nr_pages + 1;
  1251. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  1252. lock_limit >>= PAGE_SHIFT;
  1253. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  1254. ret = -EPERM;
  1255. goto unlock;
  1256. }
  1257. WARN_ON(counter->data);
  1258. ret = perf_mmap_data_alloc(counter, nr_pages);
  1259. if (ret)
  1260. goto unlock;
  1261. atomic_set(&counter->mmap_count, 1);
  1262. vma->vm_mm->locked_vm += nr_pages + 1;
  1263. unlock:
  1264. mutex_unlock(&counter->mmap_mutex);
  1265. vma->vm_flags &= ~VM_MAYWRITE;
  1266. vma->vm_flags |= VM_RESERVED;
  1267. vma->vm_ops = &perf_mmap_vmops;
  1268. return ret;
  1269. }
  1270. static int perf_fasync(int fd, struct file *filp, int on)
  1271. {
  1272. struct perf_counter *counter = filp->private_data;
  1273. struct inode *inode = filp->f_path.dentry->d_inode;
  1274. int retval;
  1275. mutex_lock(&inode->i_mutex);
  1276. retval = fasync_helper(fd, filp, on, &counter->fasync);
  1277. mutex_unlock(&inode->i_mutex);
  1278. if (retval < 0)
  1279. return retval;
  1280. return 0;
  1281. }
  1282. static const struct file_operations perf_fops = {
  1283. .release = perf_release,
  1284. .read = perf_read,
  1285. .poll = perf_poll,
  1286. .unlocked_ioctl = perf_ioctl,
  1287. .compat_ioctl = perf_ioctl,
  1288. .mmap = perf_mmap,
  1289. .fasync = perf_fasync,
  1290. };
  1291. /*
  1292. * Perf counter wakeup
  1293. *
  1294. * If there's data, ensure we set the poll() state and publish everything
  1295. * to user-space before waking everybody up.
  1296. */
  1297. void perf_counter_wakeup(struct perf_counter *counter)
  1298. {
  1299. struct perf_mmap_data *data;
  1300. rcu_read_lock();
  1301. data = rcu_dereference(counter->data);
  1302. if (data) {
  1303. atomic_set(&data->wakeup, POLL_IN);
  1304. /*
  1305. * Ensure all data writes are issued before updating the
  1306. * user-space data head information. The matching rmb()
  1307. * will be in userspace after reading this value.
  1308. */
  1309. smp_wmb();
  1310. data->user_page->data_head = atomic_read(&data->head);
  1311. }
  1312. rcu_read_unlock();
  1313. wake_up_all(&counter->waitq);
  1314. if (counter->pending_kill) {
  1315. kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
  1316. counter->pending_kill = 0;
  1317. }
  1318. }
  1319. /*
  1320. * Pending wakeups
  1321. *
  1322. * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
  1323. *
  1324. * The NMI bit means we cannot possibly take locks. Therefore, maintain a
  1325. * single linked list and use cmpxchg() to add entries lockless.
  1326. */
  1327. static void perf_pending_counter(struct perf_pending_entry *entry)
  1328. {
  1329. struct perf_counter *counter = container_of(entry,
  1330. struct perf_counter, pending);
  1331. if (counter->pending_disable) {
  1332. counter->pending_disable = 0;
  1333. perf_counter_disable(counter);
  1334. }
  1335. if (counter->pending_wakeup) {
  1336. counter->pending_wakeup = 0;
  1337. perf_counter_wakeup(counter);
  1338. }
  1339. }
  1340. #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
  1341. static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
  1342. PENDING_TAIL,
  1343. };
  1344. static void perf_pending_queue(struct perf_pending_entry *entry,
  1345. void (*func)(struct perf_pending_entry *))
  1346. {
  1347. struct perf_pending_entry **head;
  1348. if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
  1349. return;
  1350. entry->func = func;
  1351. head = &get_cpu_var(perf_pending_head);
  1352. do {
  1353. entry->next = *head;
  1354. } while (cmpxchg(head, entry->next, entry) != entry->next);
  1355. set_perf_counter_pending();
  1356. put_cpu_var(perf_pending_head);
  1357. }
  1358. static int __perf_pending_run(void)
  1359. {
  1360. struct perf_pending_entry *list;
  1361. int nr = 0;
  1362. list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
  1363. while (list != PENDING_TAIL) {
  1364. void (*func)(struct perf_pending_entry *);
  1365. struct perf_pending_entry *entry = list;
  1366. list = list->next;
  1367. func = entry->func;
  1368. entry->next = NULL;
  1369. /*
  1370. * Ensure we observe the unqueue before we issue the wakeup,
  1371. * so that we won't be waiting forever.
  1372. * -- see perf_not_pending().
  1373. */
  1374. smp_wmb();
  1375. func(entry);
  1376. nr++;
  1377. }
  1378. return nr;
  1379. }
  1380. static inline int perf_not_pending(struct perf_counter *counter)
  1381. {
  1382. /*
  1383. * If we flush on whatever cpu we run, there is a chance we don't
  1384. * need to wait.
  1385. */
  1386. get_cpu();
  1387. __perf_pending_run();
  1388. put_cpu();
  1389. /*
  1390. * Ensure we see the proper queue state before going to sleep
  1391. * so that we do not miss the wakeup. -- see perf_pending_handle()
  1392. */
  1393. smp_rmb();
  1394. return counter->pending.next == NULL;
  1395. }
  1396. static void perf_pending_sync(struct perf_counter *counter)
  1397. {
  1398. wait_event(counter->waitq, perf_not_pending(counter));
  1399. }
  1400. void perf_counter_do_pending(void)
  1401. {
  1402. __perf_pending_run();
  1403. }
  1404. /*
  1405. * Callchain support -- arch specific
  1406. */
  1407. __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1408. {
  1409. return NULL;
  1410. }
  1411. /*
  1412. * Output
  1413. */
  1414. struct perf_output_handle {
  1415. struct perf_counter *counter;
  1416. struct perf_mmap_data *data;
  1417. unsigned int offset;
  1418. unsigned int head;
  1419. int wakeup;
  1420. int nmi;
  1421. int overflow;
  1422. };
  1423. static inline void __perf_output_wakeup(struct perf_output_handle *handle)
  1424. {
  1425. if (handle->nmi) {
  1426. handle->counter->pending_wakeup = 1;
  1427. perf_pending_queue(&handle->counter->pending,
  1428. perf_pending_counter);
  1429. } else
  1430. perf_counter_wakeup(handle->counter);
  1431. }
  1432. static int perf_output_begin(struct perf_output_handle *handle,
  1433. struct perf_counter *counter, unsigned int size,
  1434. int nmi, int overflow)
  1435. {
  1436. struct perf_mmap_data *data;
  1437. unsigned int offset, head;
  1438. rcu_read_lock();
  1439. data = rcu_dereference(counter->data);
  1440. if (!data)
  1441. goto out;
  1442. handle->counter = counter;
  1443. handle->nmi = nmi;
  1444. handle->overflow = overflow;
  1445. if (!data->nr_pages)
  1446. goto fail;
  1447. do {
  1448. offset = head = atomic_read(&data->head);
  1449. head += size;
  1450. } while (atomic_cmpxchg(&data->head, offset, head) != offset);
  1451. handle->data = data;
  1452. handle->offset = offset;
  1453. handle->head = head;
  1454. handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
  1455. return 0;
  1456. fail:
  1457. __perf_output_wakeup(handle);
  1458. out:
  1459. rcu_read_unlock();
  1460. return -ENOSPC;
  1461. }
  1462. static void perf_output_copy(struct perf_output_handle *handle,
  1463. void *buf, unsigned int len)
  1464. {
  1465. unsigned int pages_mask;
  1466. unsigned int offset;
  1467. unsigned int size;
  1468. void **pages;
  1469. offset = handle->offset;
  1470. pages_mask = handle->data->nr_pages - 1;
  1471. pages = handle->data->data_pages;
  1472. do {
  1473. unsigned int page_offset;
  1474. int nr;
  1475. nr = (offset >> PAGE_SHIFT) & pages_mask;
  1476. page_offset = offset & (PAGE_SIZE - 1);
  1477. size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
  1478. memcpy(pages[nr] + page_offset, buf, size);
  1479. len -= size;
  1480. buf += size;
  1481. offset += size;
  1482. } while (len);
  1483. handle->offset = offset;
  1484. WARN_ON_ONCE(handle->offset > handle->head);
  1485. }
  1486. #define perf_output_put(handle, x) \
  1487. perf_output_copy((handle), &(x), sizeof(x))
  1488. static void perf_output_end(struct perf_output_handle *handle)
  1489. {
  1490. int wakeup_events = handle->counter->hw_event.wakeup_events;
  1491. if (handle->overflow && wakeup_events) {
  1492. int events = atomic_inc_return(&handle->data->events);
  1493. if (events >= wakeup_events) {
  1494. atomic_sub(wakeup_events, &handle->data->events);
  1495. __perf_output_wakeup(handle);
  1496. }
  1497. } else if (handle->wakeup)
  1498. __perf_output_wakeup(handle);
  1499. rcu_read_unlock();
  1500. }
  1501. static void perf_counter_output(struct perf_counter *counter,
  1502. int nmi, struct pt_regs *regs, u64 addr)
  1503. {
  1504. int ret;
  1505. u64 record_type = counter->hw_event.record_type;
  1506. struct perf_output_handle handle;
  1507. struct perf_event_header header;
  1508. u64 ip;
  1509. struct {
  1510. u32 pid, tid;
  1511. } tid_entry;
  1512. struct {
  1513. u64 event;
  1514. u64 counter;
  1515. } group_entry;
  1516. struct perf_callchain_entry *callchain = NULL;
  1517. int callchain_size = 0;
  1518. u64 time;
  1519. header.type = 0;
  1520. header.size = sizeof(header);
  1521. header.misc = PERF_EVENT_MISC_OVERFLOW;
  1522. header.misc |= user_mode(regs) ?
  1523. PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
  1524. if (record_type & PERF_RECORD_IP) {
  1525. ip = instruction_pointer(regs);
  1526. header.type |= PERF_RECORD_IP;
  1527. header.size += sizeof(ip);
  1528. }
  1529. if (record_type & PERF_RECORD_TID) {
  1530. /* namespace issues */
  1531. tid_entry.pid = current->group_leader->pid;
  1532. tid_entry.tid = current->pid;
  1533. header.type |= PERF_RECORD_TID;
  1534. header.size += sizeof(tid_entry);
  1535. }
  1536. if (record_type & PERF_RECORD_TIME) {
  1537. /*
  1538. * Maybe do better on x86 and provide cpu_clock_nmi()
  1539. */
  1540. time = sched_clock();
  1541. header.type |= PERF_RECORD_TIME;
  1542. header.size += sizeof(u64);
  1543. }
  1544. if (record_type & PERF_RECORD_ADDR) {
  1545. header.type |= PERF_RECORD_ADDR;
  1546. header.size += sizeof(u64);
  1547. }
  1548. if (record_type & PERF_RECORD_GROUP) {
  1549. header.type |= PERF_RECORD_GROUP;
  1550. header.size += sizeof(u64) +
  1551. counter->nr_siblings * sizeof(group_entry);
  1552. }
  1553. if (record_type & PERF_RECORD_CALLCHAIN) {
  1554. callchain = perf_callchain(regs);
  1555. if (callchain) {
  1556. callchain_size = (1 + callchain->nr) * sizeof(u64);
  1557. header.type |= PERF_RECORD_CALLCHAIN;
  1558. header.size += callchain_size;
  1559. }
  1560. }
  1561. ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
  1562. if (ret)
  1563. return;
  1564. perf_output_put(&handle, header);
  1565. if (record_type & PERF_RECORD_IP)
  1566. perf_output_put(&handle, ip);
  1567. if (record_type & PERF_RECORD_TID)
  1568. perf_output_put(&handle, tid_entry);
  1569. if (record_type & PERF_RECORD_TIME)
  1570. perf_output_put(&handle, time);
  1571. if (record_type & PERF_RECORD_ADDR)
  1572. perf_output_put(&handle, addr);
  1573. if (record_type & PERF_RECORD_GROUP) {
  1574. struct perf_counter *leader, *sub;
  1575. u64 nr = counter->nr_siblings;
  1576. perf_output_put(&handle, nr);
  1577. leader = counter->group_leader;
  1578. list_for_each_entry(sub, &leader->sibling_list, list_entry) {
  1579. if (sub != counter)
  1580. sub->hw_ops->read(sub);
  1581. group_entry.event = sub->hw_event.config;
  1582. group_entry.counter = atomic64_read(&sub->count);
  1583. perf_output_put(&handle, group_entry);
  1584. }
  1585. }
  1586. if (callchain)
  1587. perf_output_copy(&handle, callchain, callchain_size);
  1588. perf_output_end(&handle);
  1589. }
  1590. /*
  1591. * comm tracking
  1592. */
  1593. struct perf_comm_event {
  1594. struct task_struct *task;
  1595. char *comm;
  1596. int comm_size;
  1597. struct {
  1598. struct perf_event_header header;
  1599. u32 pid;
  1600. u32 tid;
  1601. } event;
  1602. };
  1603. static void perf_counter_comm_output(struct perf_counter *counter,
  1604. struct perf_comm_event *comm_event)
  1605. {
  1606. struct perf_output_handle handle;
  1607. int size = comm_event->event.header.size;
  1608. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  1609. if (ret)
  1610. return;
  1611. perf_output_put(&handle, comm_event->event);
  1612. perf_output_copy(&handle, comm_event->comm,
  1613. comm_event->comm_size);
  1614. perf_output_end(&handle);
  1615. }
  1616. static int perf_counter_comm_match(struct perf_counter *counter,
  1617. struct perf_comm_event *comm_event)
  1618. {
  1619. if (counter->hw_event.comm &&
  1620. comm_event->event.header.type == PERF_EVENT_COMM)
  1621. return 1;
  1622. return 0;
  1623. }
  1624. static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
  1625. struct perf_comm_event *comm_event)
  1626. {
  1627. struct perf_counter *counter;
  1628. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  1629. return;
  1630. rcu_read_lock();
  1631. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  1632. if (perf_counter_comm_match(counter, comm_event))
  1633. perf_counter_comm_output(counter, comm_event);
  1634. }
  1635. rcu_read_unlock();
  1636. }
  1637. static void perf_counter_comm_event(struct perf_comm_event *comm_event)
  1638. {
  1639. struct perf_cpu_context *cpuctx;
  1640. unsigned int size;
  1641. char *comm = comm_event->task->comm;
  1642. size = ALIGN(strlen(comm)+1, sizeof(u64));
  1643. comm_event->comm = comm;
  1644. comm_event->comm_size = size;
  1645. comm_event->event.header.size = sizeof(comm_event->event) + size;
  1646. cpuctx = &get_cpu_var(perf_cpu_context);
  1647. perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
  1648. put_cpu_var(perf_cpu_context);
  1649. perf_counter_comm_ctx(&current->perf_counter_ctx, comm_event);
  1650. }
  1651. void perf_counter_comm(struct task_struct *task)
  1652. {
  1653. struct perf_comm_event comm_event;
  1654. if (!atomic_read(&nr_comm_tracking))
  1655. return;
  1656. comm_event = (struct perf_comm_event){
  1657. .task = task,
  1658. .event = {
  1659. .header = { .type = PERF_EVENT_COMM, },
  1660. .pid = task->group_leader->pid,
  1661. .tid = task->pid,
  1662. },
  1663. };
  1664. perf_counter_comm_event(&comm_event);
  1665. }
  1666. /*
  1667. * mmap tracking
  1668. */
  1669. struct perf_mmap_event {
  1670. struct file *file;
  1671. char *file_name;
  1672. int file_size;
  1673. struct {
  1674. struct perf_event_header header;
  1675. u32 pid;
  1676. u32 tid;
  1677. u64 start;
  1678. u64 len;
  1679. u64 pgoff;
  1680. } event;
  1681. };
  1682. static void perf_counter_mmap_output(struct perf_counter *counter,
  1683. struct perf_mmap_event *mmap_event)
  1684. {
  1685. struct perf_output_handle handle;
  1686. int size = mmap_event->event.header.size;
  1687. int ret = perf_output_begin(&handle, counter, size, 0, 0);
  1688. if (ret)
  1689. return;
  1690. perf_output_put(&handle, mmap_event->event);
  1691. perf_output_copy(&handle, mmap_event->file_name,
  1692. mmap_event->file_size);
  1693. perf_output_end(&handle);
  1694. }
  1695. static int perf_counter_mmap_match(struct perf_counter *counter,
  1696. struct perf_mmap_event *mmap_event)
  1697. {
  1698. if (counter->hw_event.mmap &&
  1699. mmap_event->event.header.type == PERF_EVENT_MMAP)
  1700. return 1;
  1701. if (counter->hw_event.munmap &&
  1702. mmap_event->event.header.type == PERF_EVENT_MUNMAP)
  1703. return 1;
  1704. return 0;
  1705. }
  1706. static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
  1707. struct perf_mmap_event *mmap_event)
  1708. {
  1709. struct perf_counter *counter;
  1710. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  1711. return;
  1712. rcu_read_lock();
  1713. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  1714. if (perf_counter_mmap_match(counter, mmap_event))
  1715. perf_counter_mmap_output(counter, mmap_event);
  1716. }
  1717. rcu_read_unlock();
  1718. }
  1719. static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
  1720. {
  1721. struct perf_cpu_context *cpuctx;
  1722. struct file *file = mmap_event->file;
  1723. unsigned int size;
  1724. char tmp[16];
  1725. char *buf = NULL;
  1726. char *name;
  1727. if (file) {
  1728. buf = kzalloc(PATH_MAX, GFP_KERNEL);
  1729. if (!buf) {
  1730. name = strncpy(tmp, "//enomem", sizeof(tmp));
  1731. goto got_name;
  1732. }
  1733. name = dentry_path(file->f_dentry, buf, PATH_MAX);
  1734. if (IS_ERR(name)) {
  1735. name = strncpy(tmp, "//toolong", sizeof(tmp));
  1736. goto got_name;
  1737. }
  1738. } else {
  1739. name = strncpy(tmp, "//anon", sizeof(tmp));
  1740. goto got_name;
  1741. }
  1742. got_name:
  1743. size = ALIGN(strlen(name)+1, sizeof(u64));
  1744. mmap_event->file_name = name;
  1745. mmap_event->file_size = size;
  1746. mmap_event->event.header.size = sizeof(mmap_event->event) + size;
  1747. cpuctx = &get_cpu_var(perf_cpu_context);
  1748. perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
  1749. put_cpu_var(perf_cpu_context);
  1750. perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
  1751. kfree(buf);
  1752. }
  1753. void perf_counter_mmap(unsigned long addr, unsigned long len,
  1754. unsigned long pgoff, struct file *file)
  1755. {
  1756. struct perf_mmap_event mmap_event;
  1757. if (!atomic_read(&nr_mmap_tracking))
  1758. return;
  1759. mmap_event = (struct perf_mmap_event){
  1760. .file = file,
  1761. .event = {
  1762. .header = { .type = PERF_EVENT_MMAP, },
  1763. .pid = current->group_leader->pid,
  1764. .tid = current->pid,
  1765. .start = addr,
  1766. .len = len,
  1767. .pgoff = pgoff,
  1768. },
  1769. };
  1770. perf_counter_mmap_event(&mmap_event);
  1771. }
  1772. void perf_counter_munmap(unsigned long addr, unsigned long len,
  1773. unsigned long pgoff, struct file *file)
  1774. {
  1775. struct perf_mmap_event mmap_event;
  1776. if (!atomic_read(&nr_munmap_tracking))
  1777. return;
  1778. mmap_event = (struct perf_mmap_event){
  1779. .file = file,
  1780. .event = {
  1781. .header = { .type = PERF_EVENT_MUNMAP, },
  1782. .pid = current->group_leader->pid,
  1783. .tid = current->pid,
  1784. .start = addr,
  1785. .len = len,
  1786. .pgoff = pgoff,
  1787. },
  1788. };
  1789. perf_counter_mmap_event(&mmap_event);
  1790. }
  1791. /*
  1792. * Generic counter overflow handling.
  1793. */
  1794. int perf_counter_overflow(struct perf_counter *counter,
  1795. int nmi, struct pt_regs *regs, u64 addr)
  1796. {
  1797. int events = atomic_read(&counter->event_limit);
  1798. int ret = 0;
  1799. counter->pending_kill = POLL_IN;
  1800. if (events && atomic_dec_and_test(&counter->event_limit)) {
  1801. ret = 1;
  1802. counter->pending_kill = POLL_HUP;
  1803. if (nmi) {
  1804. counter->pending_disable = 1;
  1805. perf_pending_queue(&counter->pending,
  1806. perf_pending_counter);
  1807. } else
  1808. perf_counter_disable(counter);
  1809. }
  1810. perf_counter_output(counter, nmi, regs, addr);
  1811. return ret;
  1812. }
  1813. /*
  1814. * Generic software counter infrastructure
  1815. */
  1816. static void perf_swcounter_update(struct perf_counter *counter)
  1817. {
  1818. struct hw_perf_counter *hwc = &counter->hw;
  1819. u64 prev, now;
  1820. s64 delta;
  1821. again:
  1822. prev = atomic64_read(&hwc->prev_count);
  1823. now = atomic64_read(&hwc->count);
  1824. if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
  1825. goto again;
  1826. delta = now - prev;
  1827. atomic64_add(delta, &counter->count);
  1828. atomic64_sub(delta, &hwc->period_left);
  1829. }
  1830. static void perf_swcounter_set_period(struct perf_counter *counter)
  1831. {
  1832. struct hw_perf_counter *hwc = &counter->hw;
  1833. s64 left = atomic64_read(&hwc->period_left);
  1834. s64 period = hwc->irq_period;
  1835. if (unlikely(left <= -period)) {
  1836. left = period;
  1837. atomic64_set(&hwc->period_left, left);
  1838. }
  1839. if (unlikely(left <= 0)) {
  1840. left += period;
  1841. atomic64_add(period, &hwc->period_left);
  1842. }
  1843. atomic64_set(&hwc->prev_count, -left);
  1844. atomic64_set(&hwc->count, -left);
  1845. }
  1846. static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
  1847. {
  1848. enum hrtimer_restart ret = HRTIMER_RESTART;
  1849. struct perf_counter *counter;
  1850. struct pt_regs *regs;
  1851. counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
  1852. counter->hw_ops->read(counter);
  1853. regs = get_irq_regs();
  1854. /*
  1855. * In case we exclude kernel IPs or are somehow not in interrupt
  1856. * context, provide the next best thing, the user IP.
  1857. */
  1858. if ((counter->hw_event.exclude_kernel || !regs) &&
  1859. !counter->hw_event.exclude_user)
  1860. regs = task_pt_regs(current);
  1861. if (regs) {
  1862. if (perf_counter_overflow(counter, 0, regs, 0))
  1863. ret = HRTIMER_NORESTART;
  1864. }
  1865. hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
  1866. return ret;
  1867. }
  1868. static void perf_swcounter_overflow(struct perf_counter *counter,
  1869. int nmi, struct pt_regs *regs, u64 addr)
  1870. {
  1871. perf_swcounter_update(counter);
  1872. perf_swcounter_set_period(counter);
  1873. if (perf_counter_overflow(counter, nmi, regs, addr))
  1874. /* soft-disable the counter */
  1875. ;
  1876. }
  1877. static int perf_swcounter_match(struct perf_counter *counter,
  1878. enum perf_event_types type,
  1879. u32 event, struct pt_regs *regs)
  1880. {
  1881. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  1882. return 0;
  1883. if (perf_event_raw(&counter->hw_event))
  1884. return 0;
  1885. if (perf_event_type(&counter->hw_event) != type)
  1886. return 0;
  1887. if (perf_event_id(&counter->hw_event) != event)
  1888. return 0;
  1889. if (counter->hw_event.exclude_user && user_mode(regs))
  1890. return 0;
  1891. if (counter->hw_event.exclude_kernel && !user_mode(regs))
  1892. return 0;
  1893. return 1;
  1894. }
  1895. static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
  1896. int nmi, struct pt_regs *regs, u64 addr)
  1897. {
  1898. int neg = atomic64_add_negative(nr, &counter->hw.count);
  1899. if (counter->hw.irq_period && !neg)
  1900. perf_swcounter_overflow(counter, nmi, regs, addr);
  1901. }
  1902. static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
  1903. enum perf_event_types type, u32 event,
  1904. u64 nr, int nmi, struct pt_regs *regs,
  1905. u64 addr)
  1906. {
  1907. struct perf_counter *counter;
  1908. if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
  1909. return;
  1910. rcu_read_lock();
  1911. list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
  1912. if (perf_swcounter_match(counter, type, event, regs))
  1913. perf_swcounter_add(counter, nr, nmi, regs, addr);
  1914. }
  1915. rcu_read_unlock();
  1916. }
  1917. static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
  1918. {
  1919. if (in_nmi())
  1920. return &cpuctx->recursion[3];
  1921. if (in_irq())
  1922. return &cpuctx->recursion[2];
  1923. if (in_softirq())
  1924. return &cpuctx->recursion[1];
  1925. return &cpuctx->recursion[0];
  1926. }
  1927. static void __perf_swcounter_event(enum perf_event_types type, u32 event,
  1928. u64 nr, int nmi, struct pt_regs *regs,
  1929. u64 addr)
  1930. {
  1931. struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
  1932. int *recursion = perf_swcounter_recursion_context(cpuctx);
  1933. if (*recursion)
  1934. goto out;
  1935. (*recursion)++;
  1936. barrier();
  1937. perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
  1938. nr, nmi, regs, addr);
  1939. if (cpuctx->task_ctx) {
  1940. perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
  1941. nr, nmi, regs, addr);
  1942. }
  1943. barrier();
  1944. (*recursion)--;
  1945. out:
  1946. put_cpu_var(perf_cpu_context);
  1947. }
  1948. void
  1949. perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
  1950. {
  1951. __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
  1952. }
  1953. static void perf_swcounter_read(struct perf_counter *counter)
  1954. {
  1955. perf_swcounter_update(counter);
  1956. }
  1957. static int perf_swcounter_enable(struct perf_counter *counter)
  1958. {
  1959. perf_swcounter_set_period(counter);
  1960. return 0;
  1961. }
  1962. static void perf_swcounter_disable(struct perf_counter *counter)
  1963. {
  1964. perf_swcounter_update(counter);
  1965. }
  1966. static const struct hw_perf_counter_ops perf_ops_generic = {
  1967. .enable = perf_swcounter_enable,
  1968. .disable = perf_swcounter_disable,
  1969. .read = perf_swcounter_read,
  1970. };
  1971. /*
  1972. * Software counter: cpu wall time clock
  1973. */
  1974. static void cpu_clock_perf_counter_update(struct perf_counter *counter)
  1975. {
  1976. int cpu = raw_smp_processor_id();
  1977. s64 prev;
  1978. u64 now;
  1979. now = cpu_clock(cpu);
  1980. prev = atomic64_read(&counter->hw.prev_count);
  1981. atomic64_set(&counter->hw.prev_count, now);
  1982. atomic64_add(now - prev, &counter->count);
  1983. }
  1984. static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
  1985. {
  1986. struct hw_perf_counter *hwc = &counter->hw;
  1987. int cpu = raw_smp_processor_id();
  1988. atomic64_set(&hwc->prev_count, cpu_clock(cpu));
  1989. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1990. hwc->hrtimer.function = perf_swcounter_hrtimer;
  1991. if (hwc->irq_period) {
  1992. __hrtimer_start_range_ns(&hwc->hrtimer,
  1993. ns_to_ktime(hwc->irq_period), 0,
  1994. HRTIMER_MODE_REL, 0);
  1995. }
  1996. return 0;
  1997. }
  1998. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  1999. {
  2000. hrtimer_cancel(&counter->hw.hrtimer);
  2001. cpu_clock_perf_counter_update(counter);
  2002. }
  2003. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  2004. {
  2005. cpu_clock_perf_counter_update(counter);
  2006. }
  2007. static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
  2008. .enable = cpu_clock_perf_counter_enable,
  2009. .disable = cpu_clock_perf_counter_disable,
  2010. .read = cpu_clock_perf_counter_read,
  2011. };
  2012. /*
  2013. * Software counter: task time clock
  2014. */
  2015. static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
  2016. {
  2017. u64 prev;
  2018. s64 delta;
  2019. prev = atomic64_xchg(&counter->hw.prev_count, now);
  2020. delta = now - prev;
  2021. atomic64_add(delta, &counter->count);
  2022. }
  2023. static int task_clock_perf_counter_enable(struct perf_counter *counter)
  2024. {
  2025. struct hw_perf_counter *hwc = &counter->hw;
  2026. u64 now;
  2027. now = counter->ctx->time;
  2028. atomic64_set(&hwc->prev_count, now);
  2029. hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2030. hwc->hrtimer.function = perf_swcounter_hrtimer;
  2031. if (hwc->irq_period) {
  2032. __hrtimer_start_range_ns(&hwc->hrtimer,
  2033. ns_to_ktime(hwc->irq_period), 0,
  2034. HRTIMER_MODE_REL, 0);
  2035. }
  2036. return 0;
  2037. }
  2038. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  2039. {
  2040. hrtimer_cancel(&counter->hw.hrtimer);
  2041. task_clock_perf_counter_update(counter, counter->ctx->time);
  2042. }
  2043. static void task_clock_perf_counter_read(struct perf_counter *counter)
  2044. {
  2045. u64 time;
  2046. if (!in_nmi()) {
  2047. update_context_time(counter->ctx);
  2048. time = counter->ctx->time;
  2049. } else {
  2050. u64 now = perf_clock();
  2051. u64 delta = now - counter->ctx->timestamp;
  2052. time = counter->ctx->time + delta;
  2053. }
  2054. task_clock_perf_counter_update(counter, time);
  2055. }
  2056. static const struct hw_perf_counter_ops perf_ops_task_clock = {
  2057. .enable = task_clock_perf_counter_enable,
  2058. .disable = task_clock_perf_counter_disable,
  2059. .read = task_clock_perf_counter_read,
  2060. };
  2061. /*
  2062. * Software counter: cpu migrations
  2063. */
  2064. static inline u64 get_cpu_migrations(struct perf_counter *counter)
  2065. {
  2066. struct task_struct *curr = counter->ctx->task;
  2067. if (curr)
  2068. return curr->se.nr_migrations;
  2069. return cpu_nr_migrations(smp_processor_id());
  2070. }
  2071. static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
  2072. {
  2073. u64 prev, now;
  2074. s64 delta;
  2075. prev = atomic64_read(&counter->hw.prev_count);
  2076. now = get_cpu_migrations(counter);
  2077. atomic64_set(&counter->hw.prev_count, now);
  2078. delta = now - prev;
  2079. atomic64_add(delta, &counter->count);
  2080. }
  2081. static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
  2082. {
  2083. cpu_migrations_perf_counter_update(counter);
  2084. }
  2085. static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
  2086. {
  2087. if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
  2088. atomic64_set(&counter->hw.prev_count,
  2089. get_cpu_migrations(counter));
  2090. return 0;
  2091. }
  2092. static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
  2093. {
  2094. cpu_migrations_perf_counter_update(counter);
  2095. }
  2096. static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
  2097. .enable = cpu_migrations_perf_counter_enable,
  2098. .disable = cpu_migrations_perf_counter_disable,
  2099. .read = cpu_migrations_perf_counter_read,
  2100. };
  2101. #ifdef CONFIG_EVENT_PROFILE
  2102. void perf_tpcounter_event(int event_id)
  2103. {
  2104. struct pt_regs *regs = get_irq_regs();
  2105. if (!regs)
  2106. regs = task_pt_regs(current);
  2107. __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
  2108. }
  2109. extern int ftrace_profile_enable(int);
  2110. extern void ftrace_profile_disable(int);
  2111. static void tp_perf_counter_destroy(struct perf_counter *counter)
  2112. {
  2113. ftrace_profile_disable(perf_event_id(&counter->hw_event));
  2114. }
  2115. static const struct hw_perf_counter_ops *
  2116. tp_perf_counter_init(struct perf_counter *counter)
  2117. {
  2118. int event_id = perf_event_id(&counter->hw_event);
  2119. int ret;
  2120. ret = ftrace_profile_enable(event_id);
  2121. if (ret)
  2122. return NULL;
  2123. counter->destroy = tp_perf_counter_destroy;
  2124. counter->hw.irq_period = counter->hw_event.irq_period;
  2125. return &perf_ops_generic;
  2126. }
  2127. #else
  2128. static const struct hw_perf_counter_ops *
  2129. tp_perf_counter_init(struct perf_counter *counter)
  2130. {
  2131. return NULL;
  2132. }
  2133. #endif
  2134. static const struct hw_perf_counter_ops *
  2135. sw_perf_counter_init(struct perf_counter *counter)
  2136. {
  2137. struct perf_counter_hw_event *hw_event = &counter->hw_event;
  2138. const struct hw_perf_counter_ops *hw_ops = NULL;
  2139. struct hw_perf_counter *hwc = &counter->hw;
  2140. /*
  2141. * Software counters (currently) can't in general distinguish
  2142. * between user, kernel and hypervisor events.
  2143. * However, context switches and cpu migrations are considered
  2144. * to be kernel events, and page faults are never hypervisor
  2145. * events.
  2146. */
  2147. switch (perf_event_id(&counter->hw_event)) {
  2148. case PERF_COUNT_CPU_CLOCK:
  2149. hw_ops = &perf_ops_cpu_clock;
  2150. if (hw_event->irq_period && hw_event->irq_period < 10000)
  2151. hw_event->irq_period = 10000;
  2152. break;
  2153. case PERF_COUNT_TASK_CLOCK:
  2154. /*
  2155. * If the user instantiates this as a per-cpu counter,
  2156. * use the cpu_clock counter instead.
  2157. */
  2158. if (counter->ctx->task)
  2159. hw_ops = &perf_ops_task_clock;
  2160. else
  2161. hw_ops = &perf_ops_cpu_clock;
  2162. if (hw_event->irq_period && hw_event->irq_period < 10000)
  2163. hw_event->irq_period = 10000;
  2164. break;
  2165. case PERF_COUNT_PAGE_FAULTS:
  2166. case PERF_COUNT_PAGE_FAULTS_MIN:
  2167. case PERF_COUNT_PAGE_FAULTS_MAJ:
  2168. case PERF_COUNT_CONTEXT_SWITCHES:
  2169. hw_ops = &perf_ops_generic;
  2170. break;
  2171. case PERF_COUNT_CPU_MIGRATIONS:
  2172. if (!counter->hw_event.exclude_kernel)
  2173. hw_ops = &perf_ops_cpu_migrations;
  2174. break;
  2175. }
  2176. if (hw_ops)
  2177. hwc->irq_period = hw_event->irq_period;
  2178. return hw_ops;
  2179. }
  2180. /*
  2181. * Allocate and initialize a counter structure
  2182. */
  2183. static struct perf_counter *
  2184. perf_counter_alloc(struct perf_counter_hw_event *hw_event,
  2185. int cpu,
  2186. struct perf_counter_context *ctx,
  2187. struct perf_counter *group_leader,
  2188. gfp_t gfpflags)
  2189. {
  2190. const struct hw_perf_counter_ops *hw_ops;
  2191. struct perf_counter *counter;
  2192. long err;
  2193. counter = kzalloc(sizeof(*counter), gfpflags);
  2194. if (!counter)
  2195. return ERR_PTR(-ENOMEM);
  2196. /*
  2197. * Single counters are their own group leaders, with an
  2198. * empty sibling list:
  2199. */
  2200. if (!group_leader)
  2201. group_leader = counter;
  2202. mutex_init(&counter->mutex);
  2203. INIT_LIST_HEAD(&counter->list_entry);
  2204. INIT_LIST_HEAD(&counter->event_entry);
  2205. INIT_LIST_HEAD(&counter->sibling_list);
  2206. init_waitqueue_head(&counter->waitq);
  2207. mutex_init(&counter->mmap_mutex);
  2208. INIT_LIST_HEAD(&counter->child_list);
  2209. counter->cpu = cpu;
  2210. counter->hw_event = *hw_event;
  2211. counter->group_leader = group_leader;
  2212. counter->hw_ops = NULL;
  2213. counter->ctx = ctx;
  2214. counter->state = PERF_COUNTER_STATE_INACTIVE;
  2215. if (hw_event->disabled)
  2216. counter->state = PERF_COUNTER_STATE_OFF;
  2217. hw_ops = NULL;
  2218. if (perf_event_raw(hw_event)) {
  2219. hw_ops = hw_perf_counter_init(counter);
  2220. goto done;
  2221. }
  2222. switch (perf_event_type(hw_event)) {
  2223. case PERF_TYPE_HARDWARE:
  2224. hw_ops = hw_perf_counter_init(counter);
  2225. break;
  2226. case PERF_TYPE_SOFTWARE:
  2227. hw_ops = sw_perf_counter_init(counter);
  2228. break;
  2229. case PERF_TYPE_TRACEPOINT:
  2230. hw_ops = tp_perf_counter_init(counter);
  2231. break;
  2232. }
  2233. done:
  2234. err = 0;
  2235. if (!hw_ops)
  2236. err = -EINVAL;
  2237. else if (IS_ERR(hw_ops))
  2238. err = PTR_ERR(hw_ops);
  2239. if (err) {
  2240. kfree(counter);
  2241. return ERR_PTR(err);
  2242. }
  2243. counter->hw_ops = hw_ops;
  2244. if (counter->hw_event.mmap)
  2245. atomic_inc(&nr_mmap_tracking);
  2246. if (counter->hw_event.munmap)
  2247. atomic_inc(&nr_munmap_tracking);
  2248. if (counter->hw_event.comm)
  2249. atomic_inc(&nr_comm_tracking);
  2250. return counter;
  2251. }
  2252. /**
  2253. * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  2254. *
  2255. * @hw_event_uptr: event type attributes for monitoring/sampling
  2256. * @pid: target pid
  2257. * @cpu: target cpu
  2258. * @group_fd: group leader counter fd
  2259. */
  2260. SYSCALL_DEFINE5(perf_counter_open,
  2261. const struct perf_counter_hw_event __user *, hw_event_uptr,
  2262. pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
  2263. {
  2264. struct perf_counter *counter, *group_leader;
  2265. struct perf_counter_hw_event hw_event;
  2266. struct perf_counter_context *ctx;
  2267. struct file *counter_file = NULL;
  2268. struct file *group_file = NULL;
  2269. int fput_needed = 0;
  2270. int fput_needed2 = 0;
  2271. int ret;
  2272. /* for future expandability... */
  2273. if (flags)
  2274. return -EINVAL;
  2275. if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
  2276. return -EFAULT;
  2277. /*
  2278. * Get the target context (task or percpu):
  2279. */
  2280. ctx = find_get_context(pid, cpu);
  2281. if (IS_ERR(ctx))
  2282. return PTR_ERR(ctx);
  2283. /*
  2284. * Look up the group leader (we will attach this counter to it):
  2285. */
  2286. group_leader = NULL;
  2287. if (group_fd != -1) {
  2288. ret = -EINVAL;
  2289. group_file = fget_light(group_fd, &fput_needed);
  2290. if (!group_file)
  2291. goto err_put_context;
  2292. if (group_file->f_op != &perf_fops)
  2293. goto err_put_context;
  2294. group_leader = group_file->private_data;
  2295. /*
  2296. * Do not allow a recursive hierarchy (this new sibling
  2297. * becoming part of another group-sibling):
  2298. */
  2299. if (group_leader->group_leader != group_leader)
  2300. goto err_put_context;
  2301. /*
  2302. * Do not allow to attach to a group in a different
  2303. * task or CPU context:
  2304. */
  2305. if (group_leader->ctx != ctx)
  2306. goto err_put_context;
  2307. /*
  2308. * Only a group leader can be exclusive or pinned
  2309. */
  2310. if (hw_event.exclusive || hw_event.pinned)
  2311. goto err_put_context;
  2312. }
  2313. counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
  2314. GFP_KERNEL);
  2315. ret = PTR_ERR(counter);
  2316. if (IS_ERR(counter))
  2317. goto err_put_context;
  2318. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  2319. if (ret < 0)
  2320. goto err_free_put_context;
  2321. counter_file = fget_light(ret, &fput_needed2);
  2322. if (!counter_file)
  2323. goto err_free_put_context;
  2324. counter->filp = counter_file;
  2325. mutex_lock(&ctx->mutex);
  2326. perf_install_in_context(ctx, counter, cpu);
  2327. mutex_unlock(&ctx->mutex);
  2328. fput_light(counter_file, fput_needed2);
  2329. out_fput:
  2330. fput_light(group_file, fput_needed);
  2331. return ret;
  2332. err_free_put_context:
  2333. kfree(counter);
  2334. err_put_context:
  2335. put_context(ctx);
  2336. goto out_fput;
  2337. }
  2338. /*
  2339. * Initialize the perf_counter context in a task_struct:
  2340. */
  2341. static void
  2342. __perf_counter_init_context(struct perf_counter_context *ctx,
  2343. struct task_struct *task)
  2344. {
  2345. memset(ctx, 0, sizeof(*ctx));
  2346. spin_lock_init(&ctx->lock);
  2347. mutex_init(&ctx->mutex);
  2348. INIT_LIST_HEAD(&ctx->counter_list);
  2349. INIT_LIST_HEAD(&ctx->event_list);
  2350. ctx->task = task;
  2351. }
  2352. /*
  2353. * inherit a counter from parent task to child task:
  2354. */
  2355. static struct perf_counter *
  2356. inherit_counter(struct perf_counter *parent_counter,
  2357. struct task_struct *parent,
  2358. struct perf_counter_context *parent_ctx,
  2359. struct task_struct *child,
  2360. struct perf_counter *group_leader,
  2361. struct perf_counter_context *child_ctx)
  2362. {
  2363. struct perf_counter *child_counter;
  2364. /*
  2365. * Instead of creating recursive hierarchies of counters,
  2366. * we link inherited counters back to the original parent,
  2367. * which has a filp for sure, which we use as the reference
  2368. * count:
  2369. */
  2370. if (parent_counter->parent)
  2371. parent_counter = parent_counter->parent;
  2372. child_counter = perf_counter_alloc(&parent_counter->hw_event,
  2373. parent_counter->cpu, child_ctx,
  2374. group_leader, GFP_KERNEL);
  2375. if (IS_ERR(child_counter))
  2376. return child_counter;
  2377. /*
  2378. * Link it up in the child's context:
  2379. */
  2380. child_counter->task = child;
  2381. add_counter_to_ctx(child_counter, child_ctx);
  2382. child_counter->parent = parent_counter;
  2383. /*
  2384. * inherit into child's child as well:
  2385. */
  2386. child_counter->hw_event.inherit = 1;
  2387. /*
  2388. * Get a reference to the parent filp - we will fput it
  2389. * when the child counter exits. This is safe to do because
  2390. * we are in the parent and we know that the filp still
  2391. * exists and has a nonzero count:
  2392. */
  2393. atomic_long_inc(&parent_counter->filp->f_count);
  2394. /*
  2395. * Link this into the parent counter's child list
  2396. */
  2397. mutex_lock(&parent_counter->mutex);
  2398. list_add_tail(&child_counter->child_list, &parent_counter->child_list);
  2399. /*
  2400. * Make the child state follow the state of the parent counter,
  2401. * not its hw_event.disabled bit. We hold the parent's mutex,
  2402. * so we won't race with perf_counter_{en,dis}able_family.
  2403. */
  2404. if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
  2405. child_counter->state = PERF_COUNTER_STATE_INACTIVE;
  2406. else
  2407. child_counter->state = PERF_COUNTER_STATE_OFF;
  2408. mutex_unlock(&parent_counter->mutex);
  2409. return child_counter;
  2410. }
  2411. static int inherit_group(struct perf_counter *parent_counter,
  2412. struct task_struct *parent,
  2413. struct perf_counter_context *parent_ctx,
  2414. struct task_struct *child,
  2415. struct perf_counter_context *child_ctx)
  2416. {
  2417. struct perf_counter *leader;
  2418. struct perf_counter *sub;
  2419. struct perf_counter *child_ctr;
  2420. leader = inherit_counter(parent_counter, parent, parent_ctx,
  2421. child, NULL, child_ctx);
  2422. if (IS_ERR(leader))
  2423. return PTR_ERR(leader);
  2424. list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
  2425. child_ctr = inherit_counter(sub, parent, parent_ctx,
  2426. child, leader, child_ctx);
  2427. if (IS_ERR(child_ctr))
  2428. return PTR_ERR(child_ctr);
  2429. }
  2430. return 0;
  2431. }
  2432. static void sync_child_counter(struct perf_counter *child_counter,
  2433. struct perf_counter *parent_counter)
  2434. {
  2435. u64 parent_val, child_val;
  2436. parent_val = atomic64_read(&parent_counter->count);
  2437. child_val = atomic64_read(&child_counter->count);
  2438. /*
  2439. * Add back the child's count to the parent's count:
  2440. */
  2441. atomic64_add(child_val, &parent_counter->count);
  2442. atomic64_add(child_counter->total_time_enabled,
  2443. &parent_counter->child_total_time_enabled);
  2444. atomic64_add(child_counter->total_time_running,
  2445. &parent_counter->child_total_time_running);
  2446. /*
  2447. * Remove this counter from the parent's list
  2448. */
  2449. mutex_lock(&parent_counter->mutex);
  2450. list_del_init(&child_counter->child_list);
  2451. mutex_unlock(&parent_counter->mutex);
  2452. /*
  2453. * Release the parent counter, if this was the last
  2454. * reference to it.
  2455. */
  2456. fput(parent_counter->filp);
  2457. }
  2458. static void
  2459. __perf_counter_exit_task(struct task_struct *child,
  2460. struct perf_counter *child_counter,
  2461. struct perf_counter_context *child_ctx)
  2462. {
  2463. struct perf_counter *parent_counter;
  2464. struct perf_counter *sub, *tmp;
  2465. /*
  2466. * If we do not self-reap then we have to wait for the
  2467. * child task to unschedule (it will happen for sure),
  2468. * so that its counter is at its final count. (This
  2469. * condition triggers rarely - child tasks usually get
  2470. * off their CPU before the parent has a chance to
  2471. * get this far into the reaping action)
  2472. */
  2473. if (child != current) {
  2474. wait_task_inactive(child, 0);
  2475. list_del_init(&child_counter->list_entry);
  2476. update_counter_times(child_counter);
  2477. } else {
  2478. struct perf_cpu_context *cpuctx;
  2479. unsigned long flags;
  2480. u64 perf_flags;
  2481. /*
  2482. * Disable and unlink this counter.
  2483. *
  2484. * Be careful about zapping the list - IRQ/NMI context
  2485. * could still be processing it:
  2486. */
  2487. local_irq_save(flags);
  2488. perf_flags = hw_perf_save_disable();
  2489. cpuctx = &__get_cpu_var(perf_cpu_context);
  2490. group_sched_out(child_counter, cpuctx, child_ctx);
  2491. update_counter_times(child_counter);
  2492. list_del_init(&child_counter->list_entry);
  2493. child_ctx->nr_counters--;
  2494. hw_perf_restore(perf_flags);
  2495. local_irq_restore(flags);
  2496. }
  2497. parent_counter = child_counter->parent;
  2498. /*
  2499. * It can happen that parent exits first, and has counters
  2500. * that are still around due to the child reference. These
  2501. * counters need to be zapped - but otherwise linger.
  2502. */
  2503. if (parent_counter) {
  2504. sync_child_counter(child_counter, parent_counter);
  2505. list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
  2506. list_entry) {
  2507. if (sub->parent) {
  2508. sync_child_counter(sub, sub->parent);
  2509. free_counter(sub);
  2510. }
  2511. }
  2512. free_counter(child_counter);
  2513. }
  2514. }
  2515. /*
  2516. * When a child task exits, feed back counter values to parent counters.
  2517. *
  2518. * Note: we may be running in child context, but the PID is not hashed
  2519. * anymore so new counters will not be added.
  2520. */
  2521. void perf_counter_exit_task(struct task_struct *child)
  2522. {
  2523. struct perf_counter *child_counter, *tmp;
  2524. struct perf_counter_context *child_ctx;
  2525. child_ctx = &child->perf_counter_ctx;
  2526. if (likely(!child_ctx->nr_counters))
  2527. return;
  2528. list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
  2529. list_entry)
  2530. __perf_counter_exit_task(child, child_counter, child_ctx);
  2531. }
  2532. /*
  2533. * Initialize the perf_counter context in task_struct
  2534. */
  2535. void perf_counter_init_task(struct task_struct *child)
  2536. {
  2537. struct perf_counter_context *child_ctx, *parent_ctx;
  2538. struct perf_counter *counter;
  2539. struct task_struct *parent = current;
  2540. child_ctx = &child->perf_counter_ctx;
  2541. parent_ctx = &parent->perf_counter_ctx;
  2542. __perf_counter_init_context(child_ctx, child);
  2543. /*
  2544. * This is executed from the parent task context, so inherit
  2545. * counters that have been marked for cloning:
  2546. */
  2547. if (likely(!parent_ctx->nr_counters))
  2548. return;
  2549. /*
  2550. * Lock the parent list. No need to lock the child - not PID
  2551. * hashed yet and not running, so nobody can access it.
  2552. */
  2553. mutex_lock(&parent_ctx->mutex);
  2554. /*
  2555. * We dont have to disable NMIs - we are only looking at
  2556. * the list, not manipulating it:
  2557. */
  2558. list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
  2559. if (!counter->hw_event.inherit)
  2560. continue;
  2561. if (inherit_group(counter, parent,
  2562. parent_ctx, child, child_ctx))
  2563. break;
  2564. }
  2565. mutex_unlock(&parent_ctx->mutex);
  2566. }
  2567. static void __cpuinit perf_counter_init_cpu(int cpu)
  2568. {
  2569. struct perf_cpu_context *cpuctx;
  2570. cpuctx = &per_cpu(perf_cpu_context, cpu);
  2571. __perf_counter_init_context(&cpuctx->ctx, NULL);
  2572. mutex_lock(&perf_resource_mutex);
  2573. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  2574. mutex_unlock(&perf_resource_mutex);
  2575. hw_perf_counter_setup(cpu);
  2576. }
  2577. #ifdef CONFIG_HOTPLUG_CPU
  2578. static void __perf_counter_exit_cpu(void *info)
  2579. {
  2580. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  2581. struct perf_counter_context *ctx = &cpuctx->ctx;
  2582. struct perf_counter *counter, *tmp;
  2583. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  2584. __perf_counter_remove_from_context(counter);
  2585. }
  2586. static void perf_counter_exit_cpu(int cpu)
  2587. {
  2588. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  2589. struct perf_counter_context *ctx = &cpuctx->ctx;
  2590. mutex_lock(&ctx->mutex);
  2591. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  2592. mutex_unlock(&ctx->mutex);
  2593. }
  2594. #else
  2595. static inline void perf_counter_exit_cpu(int cpu) { }
  2596. #endif
  2597. static int __cpuinit
  2598. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  2599. {
  2600. unsigned int cpu = (long)hcpu;
  2601. switch (action) {
  2602. case CPU_UP_PREPARE:
  2603. case CPU_UP_PREPARE_FROZEN:
  2604. perf_counter_init_cpu(cpu);
  2605. break;
  2606. case CPU_DOWN_PREPARE:
  2607. case CPU_DOWN_PREPARE_FROZEN:
  2608. perf_counter_exit_cpu(cpu);
  2609. break;
  2610. default:
  2611. break;
  2612. }
  2613. return NOTIFY_OK;
  2614. }
  2615. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  2616. .notifier_call = perf_cpu_notify,
  2617. };
  2618. static int __init perf_counter_init(void)
  2619. {
  2620. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  2621. (void *)(long)smp_processor_id());
  2622. register_cpu_notifier(&perf_cpu_nb);
  2623. return 0;
  2624. }
  2625. early_initcall(perf_counter_init);
  2626. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  2627. {
  2628. return sprintf(buf, "%d\n", perf_reserved_percpu);
  2629. }
  2630. static ssize_t
  2631. perf_set_reserve_percpu(struct sysdev_class *class,
  2632. const char *buf,
  2633. size_t count)
  2634. {
  2635. struct perf_cpu_context *cpuctx;
  2636. unsigned long val;
  2637. int err, cpu, mpt;
  2638. err = strict_strtoul(buf, 10, &val);
  2639. if (err)
  2640. return err;
  2641. if (val > perf_max_counters)
  2642. return -EINVAL;
  2643. mutex_lock(&perf_resource_mutex);
  2644. perf_reserved_percpu = val;
  2645. for_each_online_cpu(cpu) {
  2646. cpuctx = &per_cpu(perf_cpu_context, cpu);
  2647. spin_lock_irq(&cpuctx->ctx.lock);
  2648. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  2649. perf_max_counters - perf_reserved_percpu);
  2650. cpuctx->max_pertask = mpt;
  2651. spin_unlock_irq(&cpuctx->ctx.lock);
  2652. }
  2653. mutex_unlock(&perf_resource_mutex);
  2654. return count;
  2655. }
  2656. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  2657. {
  2658. return sprintf(buf, "%d\n", perf_overcommit);
  2659. }
  2660. static ssize_t
  2661. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  2662. {
  2663. unsigned long val;
  2664. int err;
  2665. err = strict_strtoul(buf, 10, &val);
  2666. if (err)
  2667. return err;
  2668. if (val > 1)
  2669. return -EINVAL;
  2670. mutex_lock(&perf_resource_mutex);
  2671. perf_overcommit = val;
  2672. mutex_unlock(&perf_resource_mutex);
  2673. return count;
  2674. }
  2675. static SYSDEV_CLASS_ATTR(
  2676. reserve_percpu,
  2677. 0644,
  2678. perf_show_reserve_percpu,
  2679. perf_set_reserve_percpu
  2680. );
  2681. static SYSDEV_CLASS_ATTR(
  2682. overcommit,
  2683. 0644,
  2684. perf_show_overcommit,
  2685. perf_set_overcommit
  2686. );
  2687. static struct attribute *perfclass_attrs[] = {
  2688. &attr_reserve_percpu.attr,
  2689. &attr_overcommit.attr,
  2690. NULL
  2691. };
  2692. static struct attribute_group perfclass_attr_group = {
  2693. .attrs = perfclass_attrs,
  2694. .name = "perf_counters",
  2695. };
  2696. static int __init perf_counter_sysfs_init(void)
  2697. {
  2698. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  2699. &perfclass_attr_group);
  2700. }
  2701. device_initcall(perf_counter_sysfs_init);