perf_counter.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * Performance counter core code
  3. *
  4. * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
  6. *
  7. * For licencing details see kernel-base/COPYING
  8. */
  9. #include <linux/fs.h>
  10. #include <linux/cpu.h>
  11. #include <linux/smp.h>
  12. #include <linux/file.h>
  13. #include <linux/poll.h>
  14. #include <linux/sysfs.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/percpu.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/anon_inodes.h>
  20. #include <linux/perf_counter.h>
  21. /*
  22. * Each CPU has a list of per CPU counters:
  23. */
  24. DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
  25. int perf_max_counters __read_mostly;
  26. static int perf_reserved_percpu __read_mostly;
  27. static int perf_overcommit __read_mostly = 1;
  28. /*
  29. * Mutex for (sysadmin-configurable) counter reservations:
  30. */
  31. static DEFINE_MUTEX(perf_resource_mutex);
  32. /*
  33. * Architecture provided APIs - weak aliases:
  34. */
  35. extern __weak const struct hw_perf_counter_ops *
  36. hw_perf_counter_init(struct perf_counter *counter)
  37. {
  38. return ERR_PTR(-EINVAL);
  39. }
  40. u64 __weak hw_perf_save_disable(void) { return 0; }
  41. void __weak hw_perf_restore(u64 ctrl) { }
  42. void __weak hw_perf_counter_setup(void) { }
  43. static void
  44. list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  45. {
  46. struct perf_counter *group_leader = counter->group_leader;
  47. /*
  48. * Depending on whether it is a standalone or sibling counter,
  49. * add it straight to the context's counter list, or to the group
  50. * leader's sibling list:
  51. */
  52. if (counter->group_leader == counter)
  53. list_add_tail(&counter->list_entry, &ctx->counter_list);
  54. else
  55. list_add_tail(&counter->list_entry, &group_leader->sibling_list);
  56. }
  57. static void
  58. list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
  59. {
  60. struct perf_counter *sibling, *tmp;
  61. list_del_init(&counter->list_entry);
  62. /*
  63. * If this was a group counter with sibling counters then
  64. * upgrade the siblings to singleton counters by adding them
  65. * to the context list directly:
  66. */
  67. list_for_each_entry_safe(sibling, tmp,
  68. &counter->sibling_list, list_entry) {
  69. list_del_init(&sibling->list_entry);
  70. list_add_tail(&sibling->list_entry, &ctx->counter_list);
  71. WARN_ON_ONCE(!sibling->group_leader);
  72. WARN_ON_ONCE(sibling->group_leader == sibling);
  73. sibling->group_leader = sibling;
  74. }
  75. }
  76. /*
  77. * Cross CPU call to remove a performance counter
  78. *
  79. * We disable the counter on the hardware level first. After that we
  80. * remove it from the context list.
  81. */
  82. static void __perf_counter_remove_from_context(void *info)
  83. {
  84. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  85. struct perf_counter *counter = info;
  86. struct perf_counter_context *ctx = counter->ctx;
  87. u64 perf_flags;
  88. /*
  89. * If this is a task context, we need to check whether it is
  90. * the current task context of this cpu. If not it has been
  91. * scheduled out before the smp call arrived.
  92. */
  93. if (ctx->task && cpuctx->task_ctx != ctx)
  94. return;
  95. spin_lock(&ctx->lock);
  96. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  97. counter->hw_ops->hw_perf_counter_disable(counter);
  98. counter->state = PERF_COUNTER_STATE_INACTIVE;
  99. ctx->nr_active--;
  100. cpuctx->active_oncpu--;
  101. counter->task = NULL;
  102. }
  103. ctx->nr_counters--;
  104. /*
  105. * Protect the list operation against NMI by disabling the
  106. * counters on a global level. NOP for non NMI based counters.
  107. */
  108. perf_flags = hw_perf_save_disable();
  109. list_del_counter(counter, ctx);
  110. hw_perf_restore(perf_flags);
  111. if (!ctx->task) {
  112. /*
  113. * Allow more per task counters with respect to the
  114. * reservation:
  115. */
  116. cpuctx->max_pertask =
  117. min(perf_max_counters - ctx->nr_counters,
  118. perf_max_counters - perf_reserved_percpu);
  119. }
  120. spin_unlock(&ctx->lock);
  121. }
  122. /*
  123. * Remove the counter from a task's (or a CPU's) list of counters.
  124. *
  125. * Must be called with counter->mutex held.
  126. *
  127. * CPU counters are removed with a smp call. For task counters we only
  128. * call when the task is on a CPU.
  129. */
  130. static void perf_counter_remove_from_context(struct perf_counter *counter)
  131. {
  132. struct perf_counter_context *ctx = counter->ctx;
  133. struct task_struct *task = ctx->task;
  134. if (!task) {
  135. /*
  136. * Per cpu counters are removed via an smp call and
  137. * the removal is always sucessful.
  138. */
  139. smp_call_function_single(counter->cpu,
  140. __perf_counter_remove_from_context,
  141. counter, 1);
  142. return;
  143. }
  144. retry:
  145. task_oncpu_function_call(task, __perf_counter_remove_from_context,
  146. counter);
  147. spin_lock_irq(&ctx->lock);
  148. /*
  149. * If the context is active we need to retry the smp call.
  150. */
  151. if (ctx->nr_active && !list_empty(&counter->list_entry)) {
  152. spin_unlock_irq(&ctx->lock);
  153. goto retry;
  154. }
  155. /*
  156. * The lock prevents that this context is scheduled in so we
  157. * can remove the counter safely, if the call above did not
  158. * succeed.
  159. */
  160. if (!list_empty(&counter->list_entry)) {
  161. ctx->nr_counters--;
  162. list_del_counter(counter, ctx);
  163. counter->task = NULL;
  164. }
  165. spin_unlock_irq(&ctx->lock);
  166. }
  167. /*
  168. * Cross CPU call to install and enable a preformance counter
  169. */
  170. static void __perf_install_in_context(void *info)
  171. {
  172. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  173. struct perf_counter *counter = info;
  174. struct perf_counter_context *ctx = counter->ctx;
  175. int cpu = smp_processor_id();
  176. u64 perf_flags;
  177. /*
  178. * If this is a task context, we need to check whether it is
  179. * the current task context of this cpu. If not it has been
  180. * scheduled out before the smp call arrived.
  181. */
  182. if (ctx->task && cpuctx->task_ctx != ctx)
  183. return;
  184. spin_lock(&ctx->lock);
  185. /*
  186. * Protect the list operation against NMI by disabling the
  187. * counters on a global level. NOP for non NMI based counters.
  188. */
  189. perf_flags = hw_perf_save_disable();
  190. list_add_counter(counter, ctx);
  191. hw_perf_restore(perf_flags);
  192. ctx->nr_counters++;
  193. if (cpuctx->active_oncpu < perf_max_counters) {
  194. counter->state = PERF_COUNTER_STATE_ACTIVE;
  195. counter->oncpu = cpu;
  196. ctx->nr_active++;
  197. cpuctx->active_oncpu++;
  198. counter->hw_ops->hw_perf_counter_enable(counter);
  199. }
  200. if (!ctx->task && cpuctx->max_pertask)
  201. cpuctx->max_pertask--;
  202. spin_unlock(&ctx->lock);
  203. }
  204. /*
  205. * Attach a performance counter to a context
  206. *
  207. * First we add the counter to the list with the hardware enable bit
  208. * in counter->hw_config cleared.
  209. *
  210. * If the counter is attached to a task which is on a CPU we use a smp
  211. * call to enable it in the task context. The task might have been
  212. * scheduled away, but we check this in the smp call again.
  213. */
  214. static void
  215. perf_install_in_context(struct perf_counter_context *ctx,
  216. struct perf_counter *counter,
  217. int cpu)
  218. {
  219. struct task_struct *task = ctx->task;
  220. counter->ctx = ctx;
  221. if (!task) {
  222. /*
  223. * Per cpu counters are installed via an smp call and
  224. * the install is always sucessful.
  225. */
  226. smp_call_function_single(cpu, __perf_install_in_context,
  227. counter, 1);
  228. return;
  229. }
  230. counter->task = task;
  231. retry:
  232. task_oncpu_function_call(task, __perf_install_in_context,
  233. counter);
  234. spin_lock_irq(&ctx->lock);
  235. /*
  236. * we need to retry the smp call.
  237. */
  238. if (ctx->nr_active && list_empty(&counter->list_entry)) {
  239. spin_unlock_irq(&ctx->lock);
  240. goto retry;
  241. }
  242. /*
  243. * The lock prevents that this context is scheduled in so we
  244. * can add the counter safely, if it the call above did not
  245. * succeed.
  246. */
  247. if (list_empty(&counter->list_entry)) {
  248. list_add_counter(counter, ctx);
  249. ctx->nr_counters++;
  250. }
  251. spin_unlock_irq(&ctx->lock);
  252. }
  253. static void
  254. counter_sched_out(struct perf_counter *counter,
  255. struct perf_cpu_context *cpuctx,
  256. struct perf_counter_context *ctx)
  257. {
  258. if (counter->state != PERF_COUNTER_STATE_ACTIVE)
  259. return;
  260. counter->hw_ops->hw_perf_counter_disable(counter);
  261. counter->state = PERF_COUNTER_STATE_INACTIVE;
  262. counter->oncpu = -1;
  263. cpuctx->active_oncpu--;
  264. ctx->nr_active--;
  265. }
  266. static void
  267. group_sched_out(struct perf_counter *group_counter,
  268. struct perf_cpu_context *cpuctx,
  269. struct perf_counter_context *ctx)
  270. {
  271. struct perf_counter *counter;
  272. counter_sched_out(group_counter, cpuctx, ctx);
  273. /*
  274. * Schedule out siblings (if any):
  275. */
  276. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  277. counter_sched_out(counter, cpuctx, ctx);
  278. }
  279. /*
  280. * Called from scheduler to remove the counters of the current task,
  281. * with interrupts disabled.
  282. *
  283. * We stop each counter and update the counter value in counter->count.
  284. *
  285. * This does not protect us against NMI, but hw_perf_counter_disable()
  286. * sets the disabled bit in the control field of counter _before_
  287. * accessing the counter control register. If a NMI hits, then it will
  288. * not restart the counter.
  289. */
  290. void perf_counter_task_sched_out(struct task_struct *task, int cpu)
  291. {
  292. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  293. struct perf_counter_context *ctx = &task->perf_counter_ctx;
  294. struct perf_counter *counter;
  295. if (likely(!cpuctx->task_ctx))
  296. return;
  297. spin_lock(&ctx->lock);
  298. if (ctx->nr_active) {
  299. list_for_each_entry(counter, &ctx->counter_list, list_entry)
  300. group_sched_out(counter, cpuctx, ctx);
  301. }
  302. spin_unlock(&ctx->lock);
  303. cpuctx->task_ctx = NULL;
  304. }
  305. static void
  306. counter_sched_in(struct perf_counter *counter,
  307. struct perf_cpu_context *cpuctx,
  308. struct perf_counter_context *ctx,
  309. int cpu)
  310. {
  311. if (counter->state == PERF_COUNTER_STATE_OFF)
  312. return;
  313. counter->hw_ops->hw_perf_counter_enable(counter);
  314. counter->state = PERF_COUNTER_STATE_ACTIVE;
  315. counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
  316. cpuctx->active_oncpu++;
  317. ctx->nr_active++;
  318. }
  319. static void
  320. group_sched_in(struct perf_counter *group_counter,
  321. struct perf_cpu_context *cpuctx,
  322. struct perf_counter_context *ctx,
  323. int cpu)
  324. {
  325. struct perf_counter *counter;
  326. counter_sched_in(group_counter, cpuctx, ctx, cpu);
  327. /*
  328. * Schedule in siblings as one group (if any):
  329. */
  330. list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
  331. counter_sched_in(counter, cpuctx, ctx, cpu);
  332. }
  333. /*
  334. * Called from scheduler to add the counters of the current task
  335. * with interrupts disabled.
  336. *
  337. * We restore the counter value and then enable it.
  338. *
  339. * This does not protect us against NMI, but hw_perf_counter_enable()
  340. * sets the enabled bit in the control field of counter _before_
  341. * accessing the counter control register. If a NMI hits, then it will
  342. * keep the counter running.
  343. */
  344. void perf_counter_task_sched_in(struct task_struct *task, int cpu)
  345. {
  346. struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
  347. struct perf_counter_context *ctx = &task->perf_counter_ctx;
  348. struct perf_counter *counter;
  349. if (likely(!ctx->nr_counters))
  350. return;
  351. spin_lock(&ctx->lock);
  352. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  353. if (ctx->nr_active == cpuctx->max_pertask)
  354. break;
  355. /*
  356. * Listen to the 'cpu' scheduling filter constraint
  357. * of counters:
  358. */
  359. if (counter->cpu != -1 && counter->cpu != cpu)
  360. continue;
  361. group_sched_in(counter, cpuctx, ctx, cpu);
  362. }
  363. spin_unlock(&ctx->lock);
  364. cpuctx->task_ctx = ctx;
  365. }
  366. int perf_counter_task_disable(void)
  367. {
  368. struct task_struct *curr = current;
  369. struct perf_counter_context *ctx = &curr->perf_counter_ctx;
  370. struct perf_counter *counter;
  371. u64 perf_flags;
  372. int cpu;
  373. if (likely(!ctx->nr_counters))
  374. return 0;
  375. local_irq_disable();
  376. cpu = smp_processor_id();
  377. perf_counter_task_sched_out(curr, cpu);
  378. spin_lock(&ctx->lock);
  379. /*
  380. * Disable all the counters:
  381. */
  382. perf_flags = hw_perf_save_disable();
  383. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  384. WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
  385. counter->state = PERF_COUNTER_STATE_OFF;
  386. }
  387. hw_perf_restore(perf_flags);
  388. spin_unlock(&ctx->lock);
  389. local_irq_enable();
  390. return 0;
  391. }
  392. int perf_counter_task_enable(void)
  393. {
  394. struct task_struct *curr = current;
  395. struct perf_counter_context *ctx = &curr->perf_counter_ctx;
  396. struct perf_counter *counter;
  397. u64 perf_flags;
  398. int cpu;
  399. if (likely(!ctx->nr_counters))
  400. return 0;
  401. local_irq_disable();
  402. cpu = smp_processor_id();
  403. spin_lock(&ctx->lock);
  404. /*
  405. * Disable all the counters:
  406. */
  407. perf_flags = hw_perf_save_disable();
  408. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  409. if (counter->state != PERF_COUNTER_STATE_OFF)
  410. continue;
  411. counter->state = PERF_COUNTER_STATE_INACTIVE;
  412. }
  413. hw_perf_restore(perf_flags);
  414. spin_unlock(&ctx->lock);
  415. perf_counter_task_sched_in(curr, cpu);
  416. local_irq_enable();
  417. return 0;
  418. }
  419. void perf_counter_task_tick(struct task_struct *curr, int cpu)
  420. {
  421. struct perf_counter_context *ctx = &curr->perf_counter_ctx;
  422. struct perf_counter *counter;
  423. u64 perf_flags;
  424. if (likely(!ctx->nr_counters))
  425. return;
  426. perf_counter_task_sched_out(curr, cpu);
  427. spin_lock(&ctx->lock);
  428. /*
  429. * Rotate the first entry last (works just fine for group counters too):
  430. */
  431. perf_flags = hw_perf_save_disable();
  432. list_for_each_entry(counter, &ctx->counter_list, list_entry) {
  433. list_del(&counter->list_entry);
  434. list_add_tail(&counter->list_entry, &ctx->counter_list);
  435. break;
  436. }
  437. hw_perf_restore(perf_flags);
  438. spin_unlock(&ctx->lock);
  439. perf_counter_task_sched_in(curr, cpu);
  440. }
  441. /*
  442. * Initialize the perf_counter context in a task_struct:
  443. */
  444. static void
  445. __perf_counter_init_context(struct perf_counter_context *ctx,
  446. struct task_struct *task)
  447. {
  448. spin_lock_init(&ctx->lock);
  449. INIT_LIST_HEAD(&ctx->counter_list);
  450. ctx->nr_counters = 0;
  451. ctx->task = task;
  452. }
  453. /*
  454. * Initialize the perf_counter context in task_struct
  455. */
  456. void perf_counter_init_task(struct task_struct *task)
  457. {
  458. __perf_counter_init_context(&task->perf_counter_ctx, task);
  459. }
  460. /*
  461. * Cross CPU call to read the hardware counter
  462. */
  463. static void __hw_perf_counter_read(void *info)
  464. {
  465. struct perf_counter *counter = info;
  466. counter->hw_ops->hw_perf_counter_read(counter);
  467. }
  468. static u64 perf_counter_read(struct perf_counter *counter)
  469. {
  470. /*
  471. * If counter is enabled and currently active on a CPU, update the
  472. * value in the counter structure:
  473. */
  474. if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
  475. smp_call_function_single(counter->oncpu,
  476. __hw_perf_counter_read, counter, 1);
  477. }
  478. return atomic64_read(&counter->count);
  479. }
  480. /*
  481. * Cross CPU call to switch performance data pointers
  482. */
  483. static void __perf_switch_irq_data(void *info)
  484. {
  485. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  486. struct perf_counter *counter = info;
  487. struct perf_counter_context *ctx = counter->ctx;
  488. struct perf_data *oldirqdata = counter->irqdata;
  489. /*
  490. * If this is a task context, we need to check whether it is
  491. * the current task context of this cpu. If not it has been
  492. * scheduled out before the smp call arrived.
  493. */
  494. if (ctx->task) {
  495. if (cpuctx->task_ctx != ctx)
  496. return;
  497. spin_lock(&ctx->lock);
  498. }
  499. /* Change the pointer NMI safe */
  500. atomic_long_set((atomic_long_t *)&counter->irqdata,
  501. (unsigned long) counter->usrdata);
  502. counter->usrdata = oldirqdata;
  503. if (ctx->task)
  504. spin_unlock(&ctx->lock);
  505. }
  506. static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
  507. {
  508. struct perf_counter_context *ctx = counter->ctx;
  509. struct perf_data *oldirqdata = counter->irqdata;
  510. struct task_struct *task = ctx->task;
  511. if (!task) {
  512. smp_call_function_single(counter->cpu,
  513. __perf_switch_irq_data,
  514. counter, 1);
  515. return counter->usrdata;
  516. }
  517. retry:
  518. spin_lock_irq(&ctx->lock);
  519. if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
  520. counter->irqdata = counter->usrdata;
  521. counter->usrdata = oldirqdata;
  522. spin_unlock_irq(&ctx->lock);
  523. return oldirqdata;
  524. }
  525. spin_unlock_irq(&ctx->lock);
  526. task_oncpu_function_call(task, __perf_switch_irq_data, counter);
  527. /* Might have failed, because task was scheduled out */
  528. if (counter->irqdata == oldirqdata)
  529. goto retry;
  530. return counter->usrdata;
  531. }
  532. static void put_context(struct perf_counter_context *ctx)
  533. {
  534. if (ctx->task)
  535. put_task_struct(ctx->task);
  536. }
  537. static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
  538. {
  539. struct perf_cpu_context *cpuctx;
  540. struct perf_counter_context *ctx;
  541. struct task_struct *task;
  542. /*
  543. * If cpu is not a wildcard then this is a percpu counter:
  544. */
  545. if (cpu != -1) {
  546. /* Must be root to operate on a CPU counter: */
  547. if (!capable(CAP_SYS_ADMIN))
  548. return ERR_PTR(-EACCES);
  549. if (cpu < 0 || cpu > num_possible_cpus())
  550. return ERR_PTR(-EINVAL);
  551. /*
  552. * We could be clever and allow to attach a counter to an
  553. * offline CPU and activate it when the CPU comes up, but
  554. * that's for later.
  555. */
  556. if (!cpu_isset(cpu, cpu_online_map))
  557. return ERR_PTR(-ENODEV);
  558. cpuctx = &per_cpu(perf_cpu_context, cpu);
  559. ctx = &cpuctx->ctx;
  560. WARN_ON_ONCE(ctx->task);
  561. return ctx;
  562. }
  563. rcu_read_lock();
  564. if (!pid)
  565. task = current;
  566. else
  567. task = find_task_by_vpid(pid);
  568. if (task)
  569. get_task_struct(task);
  570. rcu_read_unlock();
  571. if (!task)
  572. return ERR_PTR(-ESRCH);
  573. ctx = &task->perf_counter_ctx;
  574. ctx->task = task;
  575. /* Reuse ptrace permission checks for now. */
  576. if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
  577. put_context(ctx);
  578. return ERR_PTR(-EACCES);
  579. }
  580. return ctx;
  581. }
  582. /*
  583. * Called when the last reference to the file is gone.
  584. */
  585. static int perf_release(struct inode *inode, struct file *file)
  586. {
  587. struct perf_counter *counter = file->private_data;
  588. struct perf_counter_context *ctx = counter->ctx;
  589. file->private_data = NULL;
  590. mutex_lock(&counter->mutex);
  591. perf_counter_remove_from_context(counter);
  592. put_context(ctx);
  593. mutex_unlock(&counter->mutex);
  594. kfree(counter);
  595. return 0;
  596. }
  597. /*
  598. * Read the performance counter - simple non blocking version for now
  599. */
  600. static ssize_t
  601. perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
  602. {
  603. u64 cntval;
  604. if (count != sizeof(cntval))
  605. return -EINVAL;
  606. mutex_lock(&counter->mutex);
  607. cntval = perf_counter_read(counter);
  608. mutex_unlock(&counter->mutex);
  609. return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
  610. }
  611. static ssize_t
  612. perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
  613. {
  614. if (!usrdata->len)
  615. return 0;
  616. count = min(count, (size_t)usrdata->len);
  617. if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
  618. return -EFAULT;
  619. /* Adjust the counters */
  620. usrdata->len -= count;
  621. if (!usrdata->len)
  622. usrdata->rd_idx = 0;
  623. else
  624. usrdata->rd_idx += count;
  625. return count;
  626. }
  627. static ssize_t
  628. perf_read_irq_data(struct perf_counter *counter,
  629. char __user *buf,
  630. size_t count,
  631. int nonblocking)
  632. {
  633. struct perf_data *irqdata, *usrdata;
  634. DECLARE_WAITQUEUE(wait, current);
  635. ssize_t res;
  636. irqdata = counter->irqdata;
  637. usrdata = counter->usrdata;
  638. if (usrdata->len + irqdata->len >= count)
  639. goto read_pending;
  640. if (nonblocking)
  641. return -EAGAIN;
  642. spin_lock_irq(&counter->waitq.lock);
  643. __add_wait_queue(&counter->waitq, &wait);
  644. for (;;) {
  645. set_current_state(TASK_INTERRUPTIBLE);
  646. if (usrdata->len + irqdata->len >= count)
  647. break;
  648. if (signal_pending(current))
  649. break;
  650. spin_unlock_irq(&counter->waitq.lock);
  651. schedule();
  652. spin_lock_irq(&counter->waitq.lock);
  653. }
  654. __remove_wait_queue(&counter->waitq, &wait);
  655. __set_current_state(TASK_RUNNING);
  656. spin_unlock_irq(&counter->waitq.lock);
  657. if (usrdata->len + irqdata->len < count)
  658. return -ERESTARTSYS;
  659. read_pending:
  660. mutex_lock(&counter->mutex);
  661. /* Drain pending data first: */
  662. res = perf_copy_usrdata(usrdata, buf, count);
  663. if (res < 0 || res == count)
  664. goto out;
  665. /* Switch irq buffer: */
  666. usrdata = perf_switch_irq_data(counter);
  667. if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) {
  668. if (!res)
  669. res = -EFAULT;
  670. } else {
  671. res = count;
  672. }
  673. out:
  674. mutex_unlock(&counter->mutex);
  675. return res;
  676. }
  677. static ssize_t
  678. perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  679. {
  680. struct perf_counter *counter = file->private_data;
  681. switch (counter->hw_event.record_type) {
  682. case PERF_RECORD_SIMPLE:
  683. return perf_read_hw(counter, buf, count);
  684. case PERF_RECORD_IRQ:
  685. case PERF_RECORD_GROUP:
  686. return perf_read_irq_data(counter, buf, count,
  687. file->f_flags & O_NONBLOCK);
  688. }
  689. return -EINVAL;
  690. }
  691. static unsigned int perf_poll(struct file *file, poll_table *wait)
  692. {
  693. struct perf_counter *counter = file->private_data;
  694. unsigned int events = 0;
  695. unsigned long flags;
  696. poll_wait(file, &counter->waitq, wait);
  697. spin_lock_irqsave(&counter->waitq.lock, flags);
  698. if (counter->usrdata->len || counter->irqdata->len)
  699. events |= POLLIN;
  700. spin_unlock_irqrestore(&counter->waitq.lock, flags);
  701. return events;
  702. }
  703. static const struct file_operations perf_fops = {
  704. .release = perf_release,
  705. .read = perf_read,
  706. .poll = perf_poll,
  707. };
  708. static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
  709. {
  710. }
  711. static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
  712. {
  713. }
  714. static void cpu_clock_perf_counter_read(struct perf_counter *counter)
  715. {
  716. int cpu = raw_smp_processor_id();
  717. atomic64_set(&counter->count, cpu_clock(cpu));
  718. }
  719. static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
  720. .hw_perf_counter_enable = cpu_clock_perf_counter_enable,
  721. .hw_perf_counter_disable = cpu_clock_perf_counter_disable,
  722. .hw_perf_counter_read = cpu_clock_perf_counter_read,
  723. };
  724. static void task_clock_perf_counter_enable(struct perf_counter *counter)
  725. {
  726. }
  727. static void task_clock_perf_counter_disable(struct perf_counter *counter)
  728. {
  729. }
  730. static void task_clock_perf_counter_read(struct perf_counter *counter)
  731. {
  732. atomic64_set(&counter->count, current->se.sum_exec_runtime);
  733. }
  734. static const struct hw_perf_counter_ops perf_ops_task_clock = {
  735. .hw_perf_counter_enable = task_clock_perf_counter_enable,
  736. .hw_perf_counter_disable = task_clock_perf_counter_disable,
  737. .hw_perf_counter_read = task_clock_perf_counter_read,
  738. };
  739. static const struct hw_perf_counter_ops *
  740. sw_perf_counter_init(struct perf_counter *counter)
  741. {
  742. const struct hw_perf_counter_ops *hw_ops = NULL;
  743. switch (counter->hw_event.type) {
  744. case PERF_COUNT_CPU_CLOCK:
  745. hw_ops = &perf_ops_cpu_clock;
  746. break;
  747. case PERF_COUNT_TASK_CLOCK:
  748. hw_ops = &perf_ops_task_clock;
  749. break;
  750. default:
  751. break;
  752. }
  753. return hw_ops;
  754. }
  755. /*
  756. * Allocate and initialize a counter structure
  757. */
  758. static struct perf_counter *
  759. perf_counter_alloc(struct perf_counter_hw_event *hw_event,
  760. int cpu,
  761. struct perf_counter *group_leader)
  762. {
  763. const struct hw_perf_counter_ops *hw_ops;
  764. struct perf_counter *counter;
  765. counter = kzalloc(sizeof(*counter), GFP_KERNEL);
  766. if (!counter)
  767. return NULL;
  768. /*
  769. * Single counters are their own group leaders, with an
  770. * empty sibling list:
  771. */
  772. if (!group_leader)
  773. group_leader = counter;
  774. mutex_init(&counter->mutex);
  775. INIT_LIST_HEAD(&counter->list_entry);
  776. INIT_LIST_HEAD(&counter->sibling_list);
  777. init_waitqueue_head(&counter->waitq);
  778. counter->irqdata = &counter->data[0];
  779. counter->usrdata = &counter->data[1];
  780. counter->cpu = cpu;
  781. counter->hw_event = *hw_event;
  782. counter->wakeup_pending = 0;
  783. counter->group_leader = group_leader;
  784. counter->hw_ops = NULL;
  785. hw_ops = NULL;
  786. if (!hw_event->raw && hw_event->type < 0)
  787. hw_ops = sw_perf_counter_init(counter);
  788. if (!hw_ops) {
  789. hw_ops = hw_perf_counter_init(counter);
  790. }
  791. if (!hw_ops) {
  792. kfree(counter);
  793. return NULL;
  794. }
  795. counter->hw_ops = hw_ops;
  796. return counter;
  797. }
  798. /**
  799. * sys_perf_task_open - open a performance counter, associate it to a task/cpu
  800. *
  801. * @hw_event_uptr: event type attributes for monitoring/sampling
  802. * @pid: target pid
  803. * @cpu: target cpu
  804. * @group_fd: group leader counter fd
  805. */
  806. asmlinkage int
  807. sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user,
  808. pid_t pid, int cpu, int group_fd)
  809. {
  810. struct perf_counter *counter, *group_leader;
  811. struct perf_counter_hw_event hw_event;
  812. struct perf_counter_context *ctx;
  813. struct file *group_file = NULL;
  814. int fput_needed = 0;
  815. int ret;
  816. if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
  817. return -EFAULT;
  818. /*
  819. * Get the target context (task or percpu):
  820. */
  821. ctx = find_get_context(pid, cpu);
  822. if (IS_ERR(ctx))
  823. return PTR_ERR(ctx);
  824. /*
  825. * Look up the group leader (we will attach this counter to it):
  826. */
  827. group_leader = NULL;
  828. if (group_fd != -1) {
  829. ret = -EINVAL;
  830. group_file = fget_light(group_fd, &fput_needed);
  831. if (!group_file)
  832. goto err_put_context;
  833. if (group_file->f_op != &perf_fops)
  834. goto err_put_context;
  835. group_leader = group_file->private_data;
  836. /*
  837. * Do not allow a recursive hierarchy (this new sibling
  838. * becoming part of another group-sibling):
  839. */
  840. if (group_leader->group_leader != group_leader)
  841. goto err_put_context;
  842. /*
  843. * Do not allow to attach to a group in a different
  844. * task or CPU context:
  845. */
  846. if (group_leader->ctx != ctx)
  847. goto err_put_context;
  848. }
  849. ret = -EINVAL;
  850. counter = perf_counter_alloc(&hw_event, cpu, group_leader);
  851. if (!counter)
  852. goto err_put_context;
  853. perf_install_in_context(ctx, counter, cpu);
  854. ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
  855. if (ret < 0)
  856. goto err_remove_free_put_context;
  857. out_fput:
  858. fput_light(group_file, fput_needed);
  859. return ret;
  860. err_remove_free_put_context:
  861. mutex_lock(&counter->mutex);
  862. perf_counter_remove_from_context(counter);
  863. mutex_unlock(&counter->mutex);
  864. kfree(counter);
  865. err_put_context:
  866. put_context(ctx);
  867. goto out_fput;
  868. }
  869. static void __cpuinit perf_counter_init_cpu(int cpu)
  870. {
  871. struct perf_cpu_context *cpuctx;
  872. cpuctx = &per_cpu(perf_cpu_context, cpu);
  873. __perf_counter_init_context(&cpuctx->ctx, NULL);
  874. mutex_lock(&perf_resource_mutex);
  875. cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
  876. mutex_unlock(&perf_resource_mutex);
  877. hw_perf_counter_setup();
  878. }
  879. #ifdef CONFIG_HOTPLUG_CPU
  880. static void __perf_counter_exit_cpu(void *info)
  881. {
  882. struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
  883. struct perf_counter_context *ctx = &cpuctx->ctx;
  884. struct perf_counter *counter, *tmp;
  885. list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
  886. __perf_counter_remove_from_context(counter);
  887. }
  888. static void perf_counter_exit_cpu(int cpu)
  889. {
  890. smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
  891. }
  892. #else
  893. static inline void perf_counter_exit_cpu(int cpu) { }
  894. #endif
  895. static int __cpuinit
  896. perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  897. {
  898. unsigned int cpu = (long)hcpu;
  899. switch (action) {
  900. case CPU_UP_PREPARE:
  901. case CPU_UP_PREPARE_FROZEN:
  902. perf_counter_init_cpu(cpu);
  903. break;
  904. case CPU_DOWN_PREPARE:
  905. case CPU_DOWN_PREPARE_FROZEN:
  906. perf_counter_exit_cpu(cpu);
  907. break;
  908. default:
  909. break;
  910. }
  911. return NOTIFY_OK;
  912. }
  913. static struct notifier_block __cpuinitdata perf_cpu_nb = {
  914. .notifier_call = perf_cpu_notify,
  915. };
  916. static int __init perf_counter_init(void)
  917. {
  918. perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
  919. (void *)(long)smp_processor_id());
  920. register_cpu_notifier(&perf_cpu_nb);
  921. return 0;
  922. }
  923. early_initcall(perf_counter_init);
  924. static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
  925. {
  926. return sprintf(buf, "%d\n", perf_reserved_percpu);
  927. }
  928. static ssize_t
  929. perf_set_reserve_percpu(struct sysdev_class *class,
  930. const char *buf,
  931. size_t count)
  932. {
  933. struct perf_cpu_context *cpuctx;
  934. unsigned long val;
  935. int err, cpu, mpt;
  936. err = strict_strtoul(buf, 10, &val);
  937. if (err)
  938. return err;
  939. if (val > perf_max_counters)
  940. return -EINVAL;
  941. mutex_lock(&perf_resource_mutex);
  942. perf_reserved_percpu = val;
  943. for_each_online_cpu(cpu) {
  944. cpuctx = &per_cpu(perf_cpu_context, cpu);
  945. spin_lock_irq(&cpuctx->ctx.lock);
  946. mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
  947. perf_max_counters - perf_reserved_percpu);
  948. cpuctx->max_pertask = mpt;
  949. spin_unlock_irq(&cpuctx->ctx.lock);
  950. }
  951. mutex_unlock(&perf_resource_mutex);
  952. return count;
  953. }
  954. static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
  955. {
  956. return sprintf(buf, "%d\n", perf_overcommit);
  957. }
  958. static ssize_t
  959. perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
  960. {
  961. unsigned long val;
  962. int err;
  963. err = strict_strtoul(buf, 10, &val);
  964. if (err)
  965. return err;
  966. if (val > 1)
  967. return -EINVAL;
  968. mutex_lock(&perf_resource_mutex);
  969. perf_overcommit = val;
  970. mutex_unlock(&perf_resource_mutex);
  971. return count;
  972. }
  973. static SYSDEV_CLASS_ATTR(
  974. reserve_percpu,
  975. 0644,
  976. perf_show_reserve_percpu,
  977. perf_set_reserve_percpu
  978. );
  979. static SYSDEV_CLASS_ATTR(
  980. overcommit,
  981. 0644,
  982. perf_show_overcommit,
  983. perf_set_overcommit
  984. );
  985. static struct attribute *perfclass_attrs[] = {
  986. &attr_reserve_percpu.attr,
  987. &attr_overcommit.attr,
  988. NULL
  989. };
  990. static struct attribute_group perfclass_attr_group = {
  991. .attrs = perfclass_attrs,
  992. .name = "perf_counters",
  993. };
  994. static int __init perf_counter_sysfs_init(void)
  995. {
  996. return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
  997. &perfclass_attr_group);
  998. }
  999. device_initcall(perf_counter_sysfs_init);